1 /*
2  * Copyright (c) 2023-2024, RT-Thread Development Team
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Change Logs:
7  * Date           Author       Notes
8  * 2024-01-19     Shell        Separate scheduling statements from rt_thread_t
9  *                             to rt_sched_thread_ctx. Add definitions of scheduler.
10  */
11 #ifndef __RT_SCHED_H__
12 #define __RT_SCHED_H__
13 
14 #include "rttypes.h"
15 #include "rtcompiler.h"
16 
17 #ifdef __cplusplus
18 extern "C" {
19 #endif
20 
21 struct rt_thread;
22 
23 typedef rt_uint8_t rt_sched_thread_status_t;
24 
25 /**
26  * Scheduler private status binding on thread. Caller should never accessing
27  * these members.
28  */
29 struct rt_sched_thread_priv
30 {
31     rt_tick_t                   init_tick;              /**< thread's initialized tick */
32     rt_tick_t                   remaining_tick;         /**< remaining tick */
33 
34     /* priority */
35     rt_uint8_t                  current_priority;       /**< current priority */
36     rt_uint8_t                  init_priority;          /**< initialized priority */
37 #if RT_THREAD_PRIORITY_MAX > 32
38     rt_uint8_t                  number;                 /**< priority low number */
39     rt_uint8_t                  high_mask;              /**< priority high mask */
40 #endif /* RT_THREAD_PRIORITY_MAX > 32 */
41     rt_uint32_t                 number_mask;            /**< priority number mask */
42 
43 };
44 
45 /**
46  * Scheduler public status binding on thread. Caller must hold the scheduler
47  * lock before access any one of its member.
48  */
49 struct rt_sched_thread_ctx
50 {
51     rt_list_t                   thread_list_node;       /**< node in thread list */
52 
53     rt_uint8_t                  stat;                   /**< thread status */
54     rt_uint8_t                  sched_flag_locked:1;    /**< calling thread have the scheduler locked */
55     rt_uint8_t                  sched_flag_ttmr_set:1;  /**< thread timer is start */
56 
57 #ifdef ARCH_USING_HW_THREAD_SELF
58     rt_uint8_t                  critical_switch_flag:1; /**< critical switch pending */
59 #endif /* ARCH_USING_HW_THREAD_SELF */
60 
61 #ifdef RT_USING_SMP
62     rt_uint8_t                  bind_cpu;               /**< thread is bind to cpu */
63     rt_uint8_t                  oncpu;                  /**< process on cpu */
64 
65     rt_base_t                   critical_lock_nest;     /**< critical lock count */
66 #endif
67 
68     struct rt_sched_thread_priv sched_thread_priv;      /**< private context of scheduler */
69 };
70 
71 #define RT_SCHED_THREAD_CTX struct rt_sched_thread_ctx sched_thread_ctx;
72 
73 #define RT_SCHED_PRIV(thread) ((thread)->sched_thread_ctx.sched_thread_priv)
74 #define RT_SCHED_CTX(thread) ((thread)->sched_thread_ctx)
75 
76 /**
77  * Convert a list node in container RT_SCHED_CTX(thread)->thread_list_node
78  * to a thread pointer.
79  */
80 #define RT_THREAD_LIST_NODE_ENTRY(node)                                      \
81     rt_container_of(                                                         \
82         rt_list_entry((node), struct rt_sched_thread_ctx, thread_list_node), \
83         struct rt_thread, sched_thread_ctx)
84 #define RT_THREAD_LIST_NODE(thread) (RT_SCHED_CTX(thread).thread_list_node)
85 
86 /**
87  * System Scheduler Locking
88  */
89 
90 typedef rt_ubase_t rt_sched_lock_level_t;
91 
92 rt_err_t rt_sched_lock(rt_sched_lock_level_t *plvl);
93 rt_err_t rt_sched_unlock(rt_sched_lock_level_t level);
94 rt_err_t rt_sched_unlock_n_resched(rt_sched_lock_level_t level);
95 
96 rt_bool_t rt_sched_is_locked(void);
97 
98 #ifdef RT_USING_SMP
99 #define RT_SCHED_DEBUG_IS_LOCKED do { RT_ASSERT(rt_sched_is_locked()); } while (0)
100 #define RT_SCHED_DEBUG_IS_UNLOCKED do { RT_ASSERT(!rt_sched_is_locked()); } while (0)
101 
102 #else /* !RT_USING_SMP */
103 
104 #define RT_SCHED_DEBUG_IS_LOCKED
105 #define RT_SCHED_DEBUG_IS_UNLOCKED
106 #endif /* RT_USING_SMP */
107 
108 /**
109  * NOTE: user should NEVER use these APIs directly. See rt_thread_.* or IPC
110  * methods instead.
111  */
112 #if defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__)
113 
114 /* thread initialization and startup routine */
115 void rt_sched_thread_init_ctx(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
116 void rt_sched_thread_init_priv(struct rt_thread *thread, rt_uint32_t tick, rt_uint8_t priority);
117 void rt_sched_thread_startup(struct rt_thread *thread);
118 
119 /* scheduler related routine */
120 void rt_sched_post_ctx_switch(struct rt_thread *thread);
121 rt_err_t rt_sched_tick_increase(rt_tick_t tick);
122 
123 /* thread status operation */
124 rt_uint8_t rt_sched_thread_get_stat(struct rt_thread *thread);
125 rt_uint8_t rt_sched_thread_get_curr_prio(struct rt_thread *thread);
126 rt_uint8_t rt_sched_thread_get_init_prio(struct rt_thread *thread);
127 rt_err_t rt_sched_thread_yield(struct rt_thread *thread);
128 rt_err_t rt_sched_thread_close(struct rt_thread *thread);
129 rt_err_t rt_sched_thread_ready(struct rt_thread *thread);
130 rt_err_t rt_sched_thread_suspend(struct rt_thread *thread, rt_sched_lock_level_t level);
131 rt_err_t rt_sched_thread_change_priority(struct rt_thread *thread, rt_uint8_t priority);
132 rt_err_t rt_sched_thread_reset_priority(struct rt_thread *thread, rt_uint8_t priority);
133 rt_err_t rt_sched_thread_bind_cpu(struct rt_thread *thread, int cpu);
134 rt_uint8_t rt_sched_thread_is_suspended(struct rt_thread *thread);
135 rt_err_t rt_sched_thread_timer_stop(struct rt_thread *thread);
136 rt_err_t rt_sched_thread_timer_start(struct rt_thread *thread);
137 void rt_sched_insert_thread(struct rt_thread *thread);
138 void rt_sched_remove_thread(struct rt_thread *thread);
139 struct rt_thread *rt_sched_thread_self(void);
140 
141 #endif /* defined(__RT_KERNEL_SOURCE__) || defined(__RT_IPC_SOURCE__) */
142 
143 #ifdef __cplusplus
144 }
145 #endif
146 
147 #endif /* __RT_SCHED_H__ */
148