1 /*
2  * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3  */
4 
5 #include "k_api.h"
6 
rhino_stack_check_init(void)7 RHINO_INLINE void rhino_stack_check_init(void)
8 {
9 #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0)
10 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
11     g_intrpt_stack_bottom  = (cpu_stack_t *)RHINO_CONFIG_INTRPT_STACK_TOP;
12     *g_intrpt_stack_bottom = RHINO_INTRPT_STACK_OVF_MAGIC;
13 #else
14     g_intrpt_stack_top  = (cpu_stack_t *)RHINO_CONFIG_INTRPT_STACK_TOP;
15     *g_intrpt_stack_top = RHINO_INTRPT_STACK_OVF_MAGIC;
16 #endif
17 #endif /* RHINO_CONFIG_INTRPT_STACK_OVF_CHECK */
18 
19 #if (RHINO_CONFIG_STACK_OVF_CHECK_HW != 0)
20     cpu_intrpt_stack_protect();
21 #endif
22 }
23 
krhino_init(void)24 kstat_t krhino_init(void)
25 {
26 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
27     kstat_t ret;
28 #endif
29     g_sys_stat = RHINO_STOPPED;
30     g_task_id  = 0;
31 
32 #if (RHINO_CONFIG_CPU_NUM > 1)
33     krhino_spin_lock_init(&g_sys_lock);
34     klist_init(&g_task_del_head);
35 #endif
36 
37     runqueue_init(&g_ready_queue);
38 
39     tick_list_init();
40 
41 #if (RHINO_CONFIG_KOBJ_LIST > 0)
42     kobj_list_init();
43 #endif
44 
45 #if (RHINO_CONFIG_USER_HOOK > 0)
46     krhino_init_hook();
47 #endif
48 
49 #if (RHINO_CONFIG_MM_TLF > 0)
50     k_mm_init();
51 #endif
52 
53 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
54     klist_init(&g_res_list);
55     ret = krhino_sem_create(&g_res_sem, "res_sem", 0);
56     if (ret != RHINO_SUCCESS)
57     {
58         return ret;
59     }
60     dyn_mem_proc_task_start();
61 #endif
62 
63 #if (RHINO_CONFIG_CPU_NUM > 1)
64     for (uint8_t i = 0; i < RHINO_CONFIG_CPU_NUM; i++) {
65         krhino_task_cpu_create(&g_idle_task[i], "idle_task", NULL, RHINO_IDLE_PRI, 0,
66                                &g_idle_task_stack[i][0], RHINO_CONFIG_IDLE_TASK_STACK_SIZE,
67                                idle_task, i, 1u);
68     }
69 #else
70     krhino_task_create(&g_idle_task[0], "idle_task", NULL, RHINO_IDLE_PRI, 0,
71                        &g_idle_task_stack[0][0], RHINO_CONFIG_IDLE_TASK_STACK_SIZE,
72                        idle_task, 1u);
73 #endif
74 
75 #if (RHINO_CONFIG_WORKQUEUE > 0)
76     workqueue_init();
77 #endif
78 
79 #if (RHINO_CONFIG_TIMER > 0)
80     ktimer_init();
81 #endif
82 
83 #if (RHINO_CONFIG_CPU_USAGE_STATS > 0)
84     cpu_usage_stats_start();
85 #endif
86 
87     rhino_stack_check_init();
88 
89     return RHINO_SUCCESS;
90 }
91 
krhino_start(void)92 kstat_t krhino_start(void)
93 {
94     ktask_t *preferred_task;
95 
96     if (g_sys_stat == RHINO_STOPPED) {
97 #if (RHINO_CONFIG_CPU_NUM > 1)
98         for (uint8_t i = 0; i < RHINO_CONFIG_CPU_NUM; i++) {
99             preferred_task            = preferred_cpu_ready_task_get(&g_ready_queue, i);
100             preferred_task->cpu_num   = i;
101             preferred_task->cur_exc   = 1;
102             g_preferred_ready_task[i] = preferred_task;
103             g_active_task[i]          = g_preferred_ready_task[i];
104             g_active_task[i]->cur_exc = 1;
105         }
106 #else
107         preferred_task = preferred_cpu_ready_task_get(&g_ready_queue, 0);
108         g_preferred_ready_task[0] = preferred_task;
109         g_active_task[0] = preferred_task;
110 #endif
111 
112 #if (RHINO_CONFIG_USER_HOOK > 0)
113         krhino_start_hook();
114 #endif
115 
116         g_sys_stat = RHINO_RUNNING;
117         cpu_first_task_start();
118 
119         /* should not be here */
120         return RHINO_SYS_FATAL_ERR;
121     }
122 
123     return RHINO_RUNNING;
124 }
125 
126 #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0)
127 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
krhino_intrpt_stack_ovf_check(void)128 void krhino_intrpt_stack_ovf_check(void)
129 {
130     if (*g_intrpt_stack_bottom != RHINO_INTRPT_STACK_OVF_MAGIC) {
131         k_err_proc(RHINO_INTRPT_STACK_OVF);
132     }
133 }
134 #else
krhino_intrpt_stack_ovf_check(void)135 void krhino_intrpt_stack_ovf_check(void)
136 {
137     if (*g_intrpt_stack_top != RHINO_INTRPT_STACK_OVF_MAGIC) {
138         k_err_proc(RHINO_INTRPT_STACK_OVF);
139     }
140 }
141 #endif
142 #endif /* RHINO_CONFIG_INTRPT_STACK_OVF_CHECK */
143 
krhino_intrpt_enter(void)144 kstat_t krhino_intrpt_enter(void)
145 {
146     CPSR_ALLOC();
147 
148     TRACE_INTRPT_ENTETR();
149 
150 #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0)
151     krhino_intrpt_stack_ovf_check();
152 #endif
153 
154     RHINO_CPU_INTRPT_DISABLE();
155     g_intrpt_nested_level[cpu_cur_get()]++;
156     RHINO_CPU_INTRPT_ENABLE();
157 
158 #if (RHINO_CONFIG_PWRMGMT > 0)
159     cpu_pwr_up();
160 #endif
161 
162     return RHINO_SUCCESS;
163 }
164 
krhino_intrpt_exit(void)165 void krhino_intrpt_exit(void)
166 {
167     CPSR_ALLOC();
168     uint8_t    cur_cpu_num;
169     ktask_t   *preferred_task;
170 #if (RHINO_CONFIG_SCHED_CFS > 0)
171     lr_timer_t cur_task_exec_time;
172 #endif
173 
174     TRACE_INTRPT_EXIT();
175 
176 #if (RHINO_CONFIG_INTRPT_STACK_OVF_CHECK > 0)
177     krhino_intrpt_stack_ovf_check();
178 #endif
179 
180     RHINO_CPU_INTRPT_DISABLE();
181 
182     cur_cpu_num = cpu_cur_get();
183 
184     g_intrpt_nested_level[cur_cpu_num]--;
185 
186     if (g_intrpt_nested_level[cur_cpu_num] > 0u) {
187         RHINO_CPU_INTRPT_ENABLE();
188         return;
189     }
190 
191     if (g_per_cpu[cur_cpu_num].dis_sched > 0u) {
192         g_per_cpu[cur_cpu_num].dis_sched = 0u;
193         RHINO_CPU_INTRPT_ENABLE();
194         return;
195     }
196 
197     if (g_sched_lock[cur_cpu_num] > 0u) {
198         RHINO_CPU_INTRPT_ENABLE();
199         return;
200     }
201 
202     preferred_task = preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num);
203 
204 #if (RHINO_CONFIG_SCHED_CFS > 0)
205     if (preferred_task == &g_idle_task[cur_cpu_num]) {
206         if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) {
207             if (g_active_task[cur_cpu_num]->task_state == K_RDY) {
208                 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run +
209                                  (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start);
210                 if (cur_task_exec_time < MIN_TASK_RUN_TIME) {
211                     RHINO_CPU_INTRPT_ENABLE();
212                     return;
213                 }
214                 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time);
215              }
216         }
217         preferred_task = cfs_preferred_task_get();
218         if (preferred_task == 0) {
219             preferred_task = &g_idle_task[cur_cpu_num];
220         }
221     } else {
222         if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) {
223             if (g_active_task[cur_cpu_num]->task_state == K_RDY) {
224                 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run +
225                                  (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start);
226                 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time);
227             }
228         }
229     }
230 
231     if (preferred_task->sched_policy == KSCHED_CFS) {
232         cfs_node_del(&preferred_task->node);
233     }
234 #endif
235 
236     if (preferred_task == g_active_task[cur_cpu_num]) {
237         RHINO_CPU_INTRPT_ENABLE();
238         return;
239     }
240 
241     TRACE_INTRPT_TASK_SWITCH(g_active_task[cur_cpu_num], preferred_task);
242 
243 #if (RHINO_SCHED_NONE_PREEMPT > 0)
244     if (g_active_task[cur_cpu_num] == &g_idle_task[cur_cpu_num]) {
245 #endif
246 #if (RHINO_CONFIG_CPU_NUM > 1)
247         g_active_task[cur_cpu_num]->cur_exc = 0;
248         preferred_task->cpu_num             = cur_cpu_num;
249         preferred_task->cur_exc             = 1;
250 #endif
251         g_preferred_ready_task[cur_cpu_num] = preferred_task;
252         cpu_intrpt_switch();
253 #if (RHINO_SCHED_NONE_PREEMPT > 0)
254     }
255 #endif
256     RHINO_CPU_INTRPT_ENABLE();
257 }
258 
krhino_next_sleep_ticks_get(void)259 tick_t krhino_next_sleep_ticks_get(void)
260 {
261     CPSR_ALLOC();
262     klist_t *tick_head;
263     ktask_t *tcb;
264     klist_t *iter;
265     tick_t   ticks;
266 
267     tick_head = &g_tick_head;
268 
269     RHINO_CRITICAL_ENTER();
270     if (tick_head->next == &g_tick_head) {
271         RHINO_CRITICAL_EXIT();
272         return RHINO_WAIT_FOREVER;
273     }
274 
275     iter  = tick_head->next;
276     tcb   = krhino_list_entry(iter, ktask_t, tick_list);
277     ticks = tcb->tick_match - g_tick_count;
278     RHINO_CRITICAL_EXIT();
279 
280     return ticks;
281 }
282 
283 
krhino_global_space_get(void)284 size_t krhino_global_space_get(void)
285 {
286     size_t mem;
287 
288     mem = sizeof(g_sys_stat) + sizeof(g_idle_task_spawned) + sizeof(g_ready_queue)
289           + sizeof(g_sched_lock) + sizeof(g_intrpt_nested_level) + sizeof(g_preferred_ready_task)
290           + sizeof(g_active_task) + sizeof(g_idle_task) + sizeof(g_idle_task_stack)
291           + sizeof(g_tick_head) + sizeof(g_tick_count) + sizeof(g_idle_count);
292 
293 #if (RHINO_CONFIG_TIMER > 0)
294     mem += sizeof(g_timer_head) + sizeof(g_timer_count)
295            + sizeof(g_timer_task) + sizeof(g_timer_task_stack)
296            + sizeof(g_timer_queue) + sizeof(timer_queue_cb);
297 #endif
298 
299 #if (RHINO_CONFIG_KOBJ_LIST > 0)
300     mem += sizeof(g_kobj_list);
301 #endif
302 
303     return mem;
304 }
305 
krhino_version_get(void)306 uint32_t krhino_version_get(void)
307 {
308     return RHINO_VERSION;
309 }
310 
311