1 /*
2  * Copyright (c) 2006-2025 RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2006-03-17     Bernard      the first version
9  * 2006-04-28     Bernard      fix the scheduler algorthm
10  * 2006-04-30     Bernard      add SCHEDULER_DEBUG
11  * 2006-05-27     Bernard      fix the scheduler algorthm for same priority
12  *                             thread schedule
13  * 2006-06-04     Bernard      rewrite the scheduler algorithm
14  * 2006-08-03     Bernard      add hook support
15  * 2006-09-05     Bernard      add 32 priority level support
16  * 2006-09-24     Bernard      add rt_system_scheduler_start function
17  * 2009-09-16     Bernard      fix _rt_scheduler_stack_check
18  * 2010-04-11     yi.qiu       add module feature
19  * 2010-07-13     Bernard      fix the maximal number of rt_scheduler_lock_nest
20  *                             issue found by kuronca
21  * 2010-12-13     Bernard      add defunct list initialization even if not use heap.
22  * 2011-05-10     Bernard      clean scheduler debug log.
23  * 2013-12-21     Grissiom     add rt_critical_level
24  * 2018-11-22     Jesven       remove the current task from ready queue
25  *                             add per cpu ready queue
26  *                             add _scheduler_get_highest_priority_thread to find highest priority task
27  *                             rt_schedule_insert_thread won't insert current task to ready queue
28  *                             in smp version, rt_hw_context_switch_interrupt maybe switch to
29  *                             new task directly
30  * 2022-01-07     Gabriel      Moving __on_rt_xxxxx_hook to scheduler.c
31  * 2023-03-27     rose_man     Split into scheduler upc and scheduler_mp.c
32  * 2023-10-17     ChuShicheng  Modify the timing of clearing RT_THREAD_STAT_YIELD flag bits
33  * 2025-08-04     Pillar       Add rt_scheduler_critical_switch_flag
34  */
35 
36 #define __RT_IPC_SOURCE__
37 #include <rtthread.h>
38 #include <rthw.h>
39 
40 #define DBG_TAG           "kernel.scheduler"
41 #define DBG_LVL           DBG_INFO
42 #include <rtdbg.h>
43 
44 rt_list_t rt_thread_priority_table[RT_THREAD_PRIORITY_MAX];
45 rt_uint32_t rt_thread_ready_priority_group;
46 #if RT_THREAD_PRIORITY_MAX > 32
47 /* Maximum priority level, 256 */
48 rt_uint8_t rt_thread_ready_table[32];
49 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
50 
51 extern volatile rt_atomic_t rt_interrupt_nest;
52 static rt_int16_t rt_scheduler_lock_nest;
53 rt_uint8_t rt_current_priority;
54 
55 static rt_int8_t rt_scheduler_critical_switch_flag;
56 #define IS_CRITICAL_SWITCH_PEND()  (rt_scheduler_critical_switch_flag == 1)
57 #define SET_CRITICAL_SWITCH_FLAG() (rt_scheduler_critical_switch_flag = 1)
58 #define CLR_CRITICAL_SWITCH_FLAG() (rt_scheduler_critical_switch_flag = 0)
59 
60 #if defined(RT_USING_HOOK) && defined(RT_HOOK_USING_FUNC_PTR)
61 static void (*rt_scheduler_hook)(struct rt_thread *from, struct rt_thread *to);
62 static void (*rt_scheduler_switch_hook)(struct rt_thread *tid);
63 
64 /**
65  * @addtogroup group_hook
66  */
67 
68 /**@{*/
69 
70 /**
71  * @brief This function will set a hook function, which will be invoked when thread
72  *        switch happens.
73  *
74  * @param hook is the hook function.
75  */
rt_scheduler_sethook(void (* hook)(struct rt_thread * from,struct rt_thread * to))76 void rt_scheduler_sethook(void (*hook)(struct rt_thread *from, struct rt_thread *to))
77 {
78     rt_scheduler_hook = hook;
79 }
80 
81 /**
82  * @brief This function will set a hook function, which will be invoked when context
83  *        switch happens.
84  *
85  * @param hook is the hook function.
86  */
rt_scheduler_switch_sethook(void (* hook)(struct rt_thread * tid))87 void rt_scheduler_switch_sethook(void (*hook)(struct rt_thread *tid))
88 {
89     rt_scheduler_switch_hook = hook;
90 }
91 
92 /**@}*/
93 #endif /* RT_USING_HOOK */
94 
_scheduler_get_highest_priority_thread(rt_ubase_t * highest_prio)95 static struct rt_thread* _scheduler_get_highest_priority_thread(rt_ubase_t *highest_prio)
96 {
97     struct rt_thread *highest_priority_thread;
98     rt_ubase_t highest_ready_priority;
99 
100 #if RT_THREAD_PRIORITY_MAX > 32
101     rt_ubase_t number;
102 
103     number = __rt_ffs(rt_thread_ready_priority_group) - 1;
104     highest_ready_priority = (number << 3) + __rt_ffs(rt_thread_ready_table[number]) - 1;
105 #else
106     highest_ready_priority = __rt_ffs(rt_thread_ready_priority_group) - 1;
107 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
108 
109     /* get highest ready priority thread */
110     highest_priority_thread = RT_THREAD_LIST_NODE_ENTRY(rt_thread_priority_table[highest_ready_priority].next);
111 
112     *highest_prio = highest_ready_priority;
113 
114     return highest_priority_thread;
115 }
116 
117 /**
118  * @brief Lock the scheduler and save the interrupt level
119  *
120  * @param plvl Pointer to store the interrupt level before locking
121  *
122  * @return rt_err_t
123  *   - RT_EOK on success
124  *   - -RT_EINVAL if plvl is NULL
125  *
126  * @details This function:
127  *   - Disables interrupts to prevent preemption
128  *   - Saves the previous interrupt level in plvl
129  *   - Must be paired with rt_sched_unlock() to restore interrupts
130  *
131  * @note The lock is implemented by disabling interrupts
132  *       Caller must ensure plvl is valid
133  */
rt_sched_lock(rt_sched_lock_level_t * plvl)134 rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl)
135 {
136     rt_base_t level;
137     if (!plvl)
138         return -RT_EINVAL;
139 
140     level = rt_hw_interrupt_disable();
141     *plvl = level;
142 
143     return RT_EOK;
144 }
145 
146 /**
147  * @brief Unlock the scheduler and restore the interrupt level
148  *
149  * @param level The interrupt level to restore (previously saved by rt_sched_lock)
150  * @return rt_err_t Always returns RT_EOK
151  *
152  * @details This function:
153  *   - Restores the interrupt level that was saved when locking the scheduler
154  *   - Must be called to match each rt_sched_lock() call
155  *
156  * @note Must be called with the same interrupt level that was saved by rt_sched_lock()
157  *       Should not be called without a corresponding rt_sched_lock() first
158  */
rt_sched_unlock(rt_sched_lock_level_t level)159 rt_err_t rt_sched_unlock(rt_sched_lock_level_t level)
160 {
161     rt_hw_interrupt_enable(level);
162 
163     return RT_EOK;
164 }
165 
166 /**
167  * @brief Unlock scheduler and trigger a reschedule if needed
168  *
169  * @param level The interrupt level to restore (previously saved by rt_sched_lock)
170  * @return rt_err_t Always returns RT_EOK
171  *
172  * @details This function:
173  *   - Restores the interrupt level that was saved when locking the scheduler
174  *   - Triggers a reschedule if the scheduler is available (rt_thread_self() != NULL)
175  *   - Combines the functionality of rt_sched_unlock() and rt_schedule()
176  */
rt_sched_unlock_n_resched(rt_sched_lock_level_t level)177 rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level)
178 {
179     if (rt_thread_self())
180     {
181         /* if scheduler is available */
182         rt_schedule();
183     }
184     rt_hw_interrupt_enable(level);
185 
186     return RT_EOK;
187 }
188 
189 /**
190  * @brief Initialize the system scheduler for single-core systems
191  *
192  * @details This function performs the following initialization tasks:
193  *   - Resets the scheduler lock nest counter to 0
194  *   - Initializes the priority table for all priority levels
195  *   - Clears the ready priority group bitmap
196  *   - For systems with >32 priority levels, initializes the ready table
197  *
198  * @note This function must be called before any thread scheduling can occur.
199  *       It prepares the scheduler data structures for single-core operation
200  */
rt_system_scheduler_init(void)201 void rt_system_scheduler_init(void)
202 {
203     rt_base_t offset;
204     rt_scheduler_lock_nest = 0;
205 
206     LOG_D("start scheduler: max priority 0x%02x",
207           RT_THREAD_PRIORITY_MAX);
208 
209     for (offset = 0; offset < RT_THREAD_PRIORITY_MAX; offset ++)
210     {
211         rt_list_init(&rt_thread_priority_table[offset]);
212     }
213 
214     /* initialize ready priority group */
215     rt_thread_ready_priority_group = 0;
216 
217 #if RT_THREAD_PRIORITY_MAX > 32
218     /* initialize ready table */
219     rt_memset(rt_thread_ready_table, 0, sizeof(rt_thread_ready_table));
220 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
221 }
222 
223 /**
224  * @brief Start the system scheduler and switch to the highest priority thread
225  *
226  * @details This function:
227  *   - Gets the highest priority ready thread using _scheduler_get_highest_priority_thread()
228  *   - Sets it as the current thread for the CPU
229  *   - Removes the thread from ready queue and sets its status to RUNNING
230  *   - Performs a context switch to the selected thread using rt_hw_context_switch_to()
231  *
232  * @note This function does not return as it switches to the first thread to run.
233  *       Must be called after rt_system_scheduler_init().
234  *       The selected thread will begin execution immediately
235  */
rt_system_scheduler_start(void)236 void rt_system_scheduler_start(void)
237 {
238     struct rt_thread *to_thread;
239     rt_ubase_t highest_ready_priority;
240 
241     to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
242 
243     rt_cpu_self()->current_thread = to_thread;
244 
245     /* flush critical switch flag */
246     CLR_CRITICAL_SWITCH_FLAG();
247 
248     rt_sched_remove_thread(to_thread);
249     RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING;
250 
251     /* switch to new thread */
252 
253     rt_hw_context_switch_to((rt_uintptr_t)&to_thread->sp);
254 
255     /* never come back */
256 }
257 
258 /**
259  * @addtogroup group_thread_management
260  * @cond
261  */
262 
263 /**@{*/
264 
265 /**
266  * @brief Perform thread scheduling once. Select the highest priority thread and switch to it.
267  *
268  * @details This function:
269  *   - Disables interrupts to prevent preemption during scheduling
270  *   - Checks if scheduler is enabled (lock_nest == 0)
271  *   - Gets the highest priority ready thread
272  *   - Determines if current thread should continue running or be preempted
273  *   - Performs context switch if needed:
274  *     * From current thread to new thread (normal case)
275  *     * Handles special cases like interrupt context switches
276  *   - Manages thread states (READY/RUNNING) and priority queues
277  *   - Handles thread yield flags and signal processing
278  */
rt_schedule(void)279 void rt_schedule(void)
280 {
281     rt_base_t level;
282     struct rt_thread *to_thread;
283     struct rt_thread *from_thread;
284     /* using local variable to avoid unecessary function call */
285     struct rt_thread *curr_thread = rt_thread_self();
286 
287     /* disable interrupt */
288     level = rt_hw_interrupt_disable();
289 
290     /* check the scheduler is enabled or not */
291     if (rt_scheduler_lock_nest == 0)
292     {
293         rt_ubase_t highest_ready_priority;
294 
295         if (rt_thread_ready_priority_group != 0)
296         {
297             /* need_insert_from_thread: need to insert from_thread to ready queue */
298             int need_insert_from_thread = 0;
299 
300             to_thread = _scheduler_get_highest_priority_thread(&highest_ready_priority);
301 
302             if ((RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_MASK) == RT_THREAD_RUNNING)
303             {
304                 if (RT_SCHED_PRIV(curr_thread).current_priority < highest_ready_priority)
305                 {
306                     to_thread = curr_thread;
307                 }
308                 else if (RT_SCHED_PRIV(curr_thread).current_priority == highest_ready_priority
309                          && (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_YIELD_MASK) == 0)
310                 {
311                     to_thread = curr_thread;
312                 }
313                 else
314                 {
315                     need_insert_from_thread = 1;
316                 }
317             }
318 
319             if (to_thread != curr_thread)
320             {
321                 /* if the destination thread is not the same as current thread */
322                 rt_current_priority = (rt_uint8_t)highest_ready_priority;
323                 from_thread                   = curr_thread;
324                 rt_cpu_self()->current_thread = to_thread;
325 
326                 RT_OBJECT_HOOK_CALL(rt_scheduler_hook, (from_thread, to_thread));
327 
328                 if (need_insert_from_thread)
329                 {
330                     rt_sched_insert_thread(from_thread);
331                 }
332 
333                 if ((RT_SCHED_CTX(from_thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
334                 {
335                     RT_SCHED_CTX(from_thread).stat &= ~RT_THREAD_STAT_YIELD_MASK;
336                 }
337 
338                 rt_sched_remove_thread(to_thread);
339                 RT_SCHED_CTX(to_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(to_thread).stat & ~RT_THREAD_STAT_MASK);
340 
341                 /* switch to new thread */
342                 LOG_D("[%d]switch to priority#%d "
343                          "thread:%.*s(sp:0x%08x), "
344                          "from thread:%.*s(sp: 0x%08x)",
345                          rt_interrupt_nest, highest_ready_priority,
346                          RT_NAME_MAX, to_thread->parent.name, to_thread->sp,
347                          RT_NAME_MAX, from_thread->parent.name, from_thread->sp);
348 
349                 RT_SCHEDULER_STACK_CHECK(to_thread);
350 
351                 if (rt_interrupt_nest == 0)
352                 {
353                     extern void rt_thread_handle_sig(rt_bool_t clean_state);
354 
355                     RT_OBJECT_HOOK_CALL(rt_scheduler_switch_hook, (from_thread));
356 
357                     rt_hw_context_switch((rt_uintptr_t)&from_thread->sp,
358                             (rt_uintptr_t)&to_thread->sp);
359 
360                     /* enable interrupt */
361                     rt_hw_interrupt_enable(level);
362 
363 #ifdef RT_USING_SIGNALS
364                     /* check stat of thread for signal */
365                     level = rt_hw_interrupt_disable();
366                     if (RT_SCHED_CTX(curr_thread).stat & RT_THREAD_STAT_SIGNAL_PENDING)
367                     {
368                         extern void rt_thread_handle_sig(rt_bool_t clean_state);
369 
370                         RT_SCHED_CTX(curr_thread).stat &= ~RT_THREAD_STAT_SIGNAL_PENDING;
371 
372                         rt_hw_interrupt_enable(level);
373 
374                         /* check signal status */
375                         rt_thread_handle_sig(RT_TRUE);
376                     }
377                     else
378                     {
379                         rt_hw_interrupt_enable(level);
380                     }
381 #endif /* RT_USING_SIGNALS */
382                     goto __exit;
383                 }
384                 else
385                 {
386                     LOG_D("switch in interrupt");
387 
388                     rt_hw_context_switch_interrupt((rt_uintptr_t)&from_thread->sp,
389                             (rt_uintptr_t)&to_thread->sp, from_thread, to_thread);
390                 }
391             }
392             else
393             {
394                 rt_sched_remove_thread(curr_thread);
395                 RT_SCHED_CTX(curr_thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(curr_thread).stat & ~RT_THREAD_STAT_MASK);
396             }
397         }
398     }
399     else
400     {
401         SET_CRITICAL_SWITCH_FLAG();
402     }
403 
404     /* enable interrupt */
405     rt_hw_interrupt_enable(level);
406 
407 __exit:
408     return;
409 }
410 
411 /**
412  * @brief Initialize thread scheduling attributes for startup
413  *
414  * @param thread The thread to be initialized
415  *
416  * @details This function:
417  *   - For systems with >32 priority levels:
418  *     * Sets the thread's priority group number (5 bits)
419  *     * Creates number mask for the priority group
420  *     * Creates high mask for the specific priority (3 bits)
421  *   - For systems with <=32 priority levels:
422  *     * Creates a simple number mask for the priority
423  *   - Sets thread state to SUSPEND to prepare for later activation
424  *
425  * @note This function must be called before a thread can be scheduled.
426  *       It prepares the thread's priority-related data structures.
427  *       Normally, there isn't anyone racing with us so this operation is lockless
428  */
rt_sched_thread_startup(struct rt_thread * thread)429 void rt_sched_thread_startup(struct rt_thread *thread)
430 {
431 #if RT_THREAD_PRIORITY_MAX > 32
432     RT_SCHED_PRIV(thread).number = RT_SCHED_PRIV(thread).current_priority >> 3;            /* 5bit */
433     RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).number;
434     RT_SCHED_PRIV(thread).high_mask = 1L << (RT_SCHED_PRIV(thread).current_priority & 0x07);  /* 3bit */
435 #else
436     RT_SCHED_PRIV(thread).number_mask = 1L << RT_SCHED_PRIV(thread).current_priority;
437 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
438 
439     /* change thread stat, so we can resume it */
440     RT_SCHED_CTX(thread).stat = RT_THREAD_SUSPEND;
441 }
442 
443 /**
444  * @brief Initialize thread's scheduling private data
445  *
446  * @param thread Pointer to the thread control block
447  * @param tick Initial time slice value for the thread
448  * @param priority Initial priority of the thread
449  *
450  * @details This function:
451  *   - Initializes the thread's list node
452  *   - Sets initial and current priority (must be < RT_THREAD_PRIORITY_MAX)
453  *   - Initializes priority masks (number_mask, number, high_mask for >32 priorities)
454  *   - Sets initial and remaining time slice ticks
455  */
rt_sched_thread_init_priv(struct rt_thread * thread,rt_uint32_t tick,rt_uint8_t priority)456 void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority)
457 {
458     rt_list_init(&RT_THREAD_LIST_NODE(thread));
459 
460     /* priority init */
461     RT_ASSERT(priority < RT_THREAD_PRIORITY_MAX);
462     RT_SCHED_PRIV(thread).init_priority    = priority;
463     RT_SCHED_PRIV(thread).current_priority = priority;
464 
465     /* don't add to scheduler queue as init thread */
466     RT_SCHED_PRIV(thread).number_mask = 0;
467 #if RT_THREAD_PRIORITY_MAX > 32
468     RT_SCHED_PRIV(thread).number = 0;
469     RT_SCHED_PRIV(thread).high_mask = 0;
470 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
471 
472     /* tick init */
473     RT_SCHED_PRIV(thread).init_tick = tick;
474     RT_SCHED_PRIV(thread).remaining_tick = tick;
475 }
476 
477 /**
478  * @brief This function will insert a thread to the system ready queue. The state of
479  *        thread will be set as READY and the thread will be removed from suspend queue.
480  *
481  * @param thread is the thread to be inserted.
482  *
483  * @note  Please do not invoke this function in user application.
484  */
rt_sched_insert_thread(struct rt_thread * thread)485 void rt_sched_insert_thread(struct rt_thread *thread)
486 {
487     rt_base_t level;
488 
489     RT_ASSERT(thread != RT_NULL);
490 
491     /* disable interrupt */
492     level = rt_hw_interrupt_disable();
493 
494     /* it's current thread, it should be RUNNING thread */
495     if (thread == rt_current_thread)
496     {
497         RT_SCHED_CTX(thread).stat = RT_THREAD_RUNNING | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
498         goto __exit;
499     }
500 
501     /* READY thread, insert to ready queue */
502     RT_SCHED_CTX(thread).stat = RT_THREAD_READY | (RT_SCHED_CTX(thread).stat & ~RT_THREAD_STAT_MASK);
503     /* there is no time slices left(YIELD), inserting thread before ready list*/
504     if((RT_SCHED_CTX(thread).stat & RT_THREAD_STAT_YIELD_MASK) != 0)
505     {
506         rt_list_insert_before(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
507                               &RT_THREAD_LIST_NODE(thread));
508     }
509     /* there are some time slices left, inserting thread after ready list to schedule it firstly at next time*/
510     else
511     {
512         rt_list_insert_after(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority]),
513                               &RT_THREAD_LIST_NODE(thread));
514     }
515 
516     LOG_D("insert thread[%.*s], the priority: %d",
517           RT_NAME_MAX, thread->parent.name, RT_SCHED_PRIV(rt_current_thread).current_priority);
518 
519     /* set priority mask */
520 #if RT_THREAD_PRIORITY_MAX > 32
521     rt_thread_ready_table[RT_SCHED_PRIV(thread).number] |= RT_SCHED_PRIV(thread).high_mask;
522 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
523     rt_thread_ready_priority_group |= RT_SCHED_PRIV(thread).number_mask;
524 
525 __exit:
526     /* enable interrupt */
527     rt_hw_interrupt_enable(level);
528 }
529 
530 /**
531  * @brief This function will remove a thread from system ready queue.
532  *
533  * @param thread is the thread to be removed.
534  *
535  * @note  Please do not invoke this function in user application.
536  */
rt_sched_remove_thread(struct rt_thread * thread)537 void rt_sched_remove_thread(struct rt_thread *thread)
538 {
539     rt_base_t level;
540 
541     RT_ASSERT(thread != RT_NULL);
542 
543     /* disable interrupt */
544     level = rt_hw_interrupt_disable();
545 
546     LOG_D("remove thread[%.*s], the priority: %d",
547           RT_NAME_MAX, thread->parent.name,
548           RT_SCHED_PRIV(rt_current_thread).current_priority);
549 
550     /* remove thread from ready list */
551     rt_list_remove(&RT_THREAD_LIST_NODE(thread));
552     if (rt_list_isempty(&(rt_thread_priority_table[RT_SCHED_PRIV(thread).current_priority])))
553     {
554 #if RT_THREAD_PRIORITY_MAX > 32
555         rt_thread_ready_table[RT_SCHED_PRIV(thread).number] &= ~RT_SCHED_PRIV(thread).high_mask;
556         if (rt_thread_ready_table[RT_SCHED_PRIV(thread).number] == 0)
557         {
558             rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
559         }
560 #else
561         rt_thread_ready_priority_group &= ~RT_SCHED_PRIV(thread).number_mask;
562 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
563     }
564 
565     /* enable interrupt */
566     rt_hw_interrupt_enable(level);
567 }
568 
569 #ifdef RT_DEBUGING_CRITICAL
570 
571 static volatile int _critical_error_occurred = 0;
572 
573 /**
574  * @brief Safely exit critical section with level checking
575  *
576  * @param critical_level The expected critical level to match current lock nest
577  *
578  * @details This function:
579  *   - Disables interrupts to prevent preemption during check
580  *   - Verifies the provided critical_level matches current rt_scheduler_lock_nest
581  *   - If mismatch detected (debug mode only):
582  *     * Sets error flag
583  *     * Prints debug information including backtrace
584  *     * Enters infinite loop to halt system
585  *   - Always calls rt_exit_critical() to perform actual exit
586  *
587  * @note This is a debug version that adds safety checks for critical section exit.
588  */
rt_exit_critical_safe(rt_base_t critical_level)589 void rt_exit_critical_safe(rt_base_t critical_level)
590 {
591     rt_base_t level;
592     /* disable interrupt */
593     level = rt_hw_interrupt_disable();
594 
595     if (!_critical_error_occurred)
596     {
597         if (critical_level != rt_scheduler_lock_nest)
598         {
599             int dummy = 1;
600             _critical_error_occurred = 1;
601 
602             rt_kprintf("%s: un-compatible critical level\n" \
603                        "\tCurrent %d\n\tCaller %d\n",
604                        __func__, rt_scheduler_lock_nest,
605                        critical_level);
606             rt_backtrace();
607 
608             while (dummy) ;
609         }
610     }
611     rt_hw_interrupt_enable(level);
612 
613     rt_exit_critical();
614 }
615 
616 #else /* !RT_DEBUGING_CRITICAL */
617 
618 /**
619  * @brief Safely exit critical section (non-debug version)
620  *        If the scheduling function is called before exiting, it will be scheduled in this function.
621  *
622  * @param critical_level The expected critical level (unused in non-debug build)
623  *
624  * @details This is the non-debug version that simply calls rt_exit_critical().
625  *          The critical_level parameter is ignored in this implementation.
626  */
rt_exit_critical_safe(rt_base_t critical_level)627 void rt_exit_critical_safe(rt_base_t critical_level)
628 {
629     rt_exit_critical();
630 }
631 
632 #endif/* RT_DEBUGING_CRITICAL */
633 RTM_EXPORT(rt_exit_critical_safe);
634 
635 /**
636  * @brief Enter critical section and lock the scheduler
637  *
638  * @return rt_base_t The current critical level (nesting count)
639  *
640  * @details This function:
641  *   - Disables interrupts to prevent preemption
642  *   - Increments the scheduler lock nesting count
643  *   - Returns the new nesting count as critical level
644  *   - Re-enables interrupts while maintaining the lock
645  *
646  * @note The nesting count can go up to RT_UINT16_MAX.
647  *       Must be paired with rt_exit_critical().
648  *       Interrupts are only disabled during the lock operation.
649  */
rt_enter_critical(void)650 rt_base_t rt_enter_critical(void)
651 {
652     rt_base_t level;
653     rt_base_t critical_level;
654 
655     /* disable interrupt */
656     level = rt_hw_interrupt_disable();
657 
658     /*
659      * the maximal number of nest is RT_UINT16_MAX, which is big
660      * enough and does not check here
661      */
662     rt_scheduler_lock_nest ++;
663     critical_level = rt_scheduler_lock_nest;
664 
665     /* enable interrupt */
666     rt_hw_interrupt_enable(level);
667 
668     return critical_level;
669 }
670 RTM_EXPORT(rt_enter_critical);
671 
672 /**
673  * @brief Exit critical section and unlock scheduler
674  *        If the scheduling function is called before exiting, it will be scheduled in this function.
675  *
676  * @details This function:
677  *   - Decrements the scheduler lock nesting count
678  *   - If nesting count reaches zero:
679  *     * Resets the nesting count
680  *     * Re-enables interrupts
681  *     * Triggers a scheduler run if current thread exists
682  *   - If nesting count still positive:
683  *     * Just re-enables interrupts while maintaining lock
684  *
685  * @note Must be paired with rt_enter_critical().
686  *       Interrupts are only disabled during the lock operation.
687  *       Scheduling only occurs when fully unlocked (nest=0)
688  */
rt_exit_critical(void)689 void rt_exit_critical(void)
690 {
691     rt_base_t level;
692 
693     /* disable interrupt */
694     level = rt_hw_interrupt_disable();
695 
696     rt_scheduler_lock_nest --;
697     if (rt_scheduler_lock_nest <= 0)
698     {
699         rt_scheduler_lock_nest = 0;
700         /* enable interrupt */
701         rt_hw_interrupt_enable(level);
702 
703         if (IS_CRITICAL_SWITCH_PEND())
704         {
705             CLR_CRITICAL_SWITCH_FLAG();
706             /* if scheduler is started and needs to be scheduled, do a schedule */
707             rt_schedule();
708         }
709     }
710     else
711     {
712         /* enable interrupt */
713         rt_hw_interrupt_enable(level);
714     }
715 }
716 RTM_EXPORT(rt_exit_critical);
717 
718 /**
719  * @brief Get the scheduler lock level.
720  *
721  * @return the level of the scheduler lock. 0 means unlocked.
722  */
rt_critical_level(void)723 rt_uint16_t rt_critical_level(void)
724 {
725     return rt_scheduler_lock_nest;
726 }
727 RTM_EXPORT(rt_critical_level);
728 
rt_sched_thread_bind_cpu(struct rt_thread * thread,int cpu)729 rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu)
730 {
731     return -RT_EINVAL;
732 }
733 
734 /**@}*/
735 /**@endcond*/