1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6
7 #if (RHINO_CONFIG_SYS_STATS > 0)
sched_disable_measure_start(void)8 static void sched_disable_measure_start(void)
9 {
10 /* start measure system lock time */
11 if (g_sched_lock[cpu_cur_get()] == 0u) {
12 g_sched_disable_time_start = HR_COUNT_GET();
13 }
14 }
15
sched_disable_measure_stop(void)16 static void sched_disable_measure_stop(void)
17 {
18 hr_timer_t diff;
19
20 /* stop measure system lock time, g_sched_lock is always zero here */
21 diff = HR_COUNT_GET() - g_sched_disable_time_start;
22
23 if (g_sched_disable_max_time < diff) {
24 g_sched_disable_max_time = diff;
25 }
26
27 if (g_cur_sched_disable_max_time < diff) {
28 g_cur_sched_disable_max_time = diff;
29 }
30 }
31 #endif
32
krhino_sched_disable(void)33 kstat_t krhino_sched_disable(void)
34 {
35 CPSR_ALLOC();
36
37 RHINO_CRITICAL_ENTER();
38
39 INTRPT_NESTED_LEVEL_CHK();
40
41 if (g_sched_lock[cpu_cur_get()] >= SCHED_MAX_LOCK_COUNT) {
42 RHINO_CRITICAL_EXIT();
43 return RHINO_SCHED_LOCK_COUNT_OVF;
44 }
45
46 #if (RHINO_CONFIG_SYS_STATS > 0)
47 sched_disable_measure_start();
48 #endif
49
50 g_sched_lock[cpu_cur_get()]++;
51
52 RHINO_CRITICAL_EXIT();
53
54 return RHINO_SUCCESS;
55 }
56
krhino_sched_enable(void)57 kstat_t krhino_sched_enable(void)
58 {
59 CPSR_ALLOC();
60
61 RHINO_CRITICAL_ENTER();
62
63 INTRPT_NESTED_LEVEL_CHK();
64
65 if (g_sched_lock[cpu_cur_get()] == 0u) {
66 RHINO_CRITICAL_EXIT();
67 return RHINO_SCHED_ALREADY_ENABLED;
68 }
69
70 g_sched_lock[cpu_cur_get()]--;
71
72 if (g_sched_lock[cpu_cur_get()] > 0u) {
73 RHINO_CRITICAL_EXIT();
74 return RHINO_SCHED_DISABLE;
75 }
76
77 #if (RHINO_CONFIG_SYS_STATS > 0)
78 sched_disable_measure_stop();
79 #endif
80
81 RHINO_CRITICAL_EXIT_SCHED();
82
83 return RHINO_SUCCESS;
84 }
85
86 #if (RHINO_CONFIG_CPU_NUM > 1)
core_sched(void)87 void core_sched(void)
88 {
89 uint8_t cur_cpu_num;
90 ktask_t *preferred_task;
91 #if (RHINO_CONFIG_SCHED_CFS > 0)
92 lr_timer_t cur_task_exec_time;
93 #endif
94
95 cur_cpu_num = cpu_cur_get();
96
97 if (g_per_cpu[cur_cpu_num].dis_sched > 0u) {
98 g_per_cpu[cur_cpu_num].dis_sched = 0u;
99 krhino_spin_unlock(&g_sys_lock);
100 return;
101 }
102
103 if (g_intrpt_nested_level[cur_cpu_num] > 0u) {
104 krhino_spin_unlock(&g_sys_lock);
105 return;
106 }
107
108 if (g_sched_lock[cur_cpu_num] > 0u) {
109 krhino_spin_unlock(&g_sys_lock);
110 return;
111 }
112
113 preferred_task = preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num);
114
115 #if (RHINO_CONFIG_SCHED_CFS > 0)
116 if (preferred_task == &g_idle_task[cur_cpu_num]) {
117 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) {
118 if (g_active_task[cur_cpu_num]->task_state == K_RDY) {
119 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run +
120 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start);
121 if (cur_task_exec_time < MIN_TASK_RUN_TIME) {
122 krhino_spin_unlock(&g_sys_lock);
123 return;
124 }
125 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time);
126 }
127 }
128 preferred_task = cfs_preferred_task_get();
129 if (preferred_task == 0) {
130 preferred_task = &g_idle_task[cur_cpu_num];
131 }
132 } else {
133 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) {
134 if (g_active_task[cur_cpu_num]->task_state == K_RDY) {
135 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run +
136 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start);
137 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time);
138 }
139 }
140 }
141
142 if (preferred_task->sched_policy == KSCHED_CFS) {
143 cfs_node_del(&preferred_task->node);
144 }
145 #endif
146
147 /* if preferred task is currently task, then no need to do switch and just return */
148 if (preferred_task == g_active_task[cur_cpu_num]) {
149 krhino_spin_unlock(&g_sys_lock);
150 return;
151 }
152
153 TRACE_TASK_SWITCH(g_active_task[cur_cpu_num], preferred_task);
154
155 #if (RHINO_CONFIG_USER_HOOK > 0)
156 krhino_task_switch_hook(g_active_task[cur_cpu_num], preferred_task);
157 #endif
158
159 g_active_task[cur_cpu_num]->cur_exc = 0;
160 preferred_task->cpu_num = cur_cpu_num;
161 preferred_task->cur_exc = 1;
162 g_preferred_ready_task[cur_cpu_num] = preferred_task;
163
164 cpu_task_switch();
165 }
166 #else
core_sched(void)167 void core_sched(void)
168 {
169 uint8_t cur_cpu_num;
170 ktask_t *preferred_task;
171 #if (RHINO_CONFIG_SCHED_CFS > 0)
172 lr_timer_t cur_task_exec_time;
173 #endif
174 cur_cpu_num = cpu_cur_get();
175
176 if (g_intrpt_nested_level[cur_cpu_num] > 0u) {
177 return;
178 }
179
180 if (g_sched_lock[cur_cpu_num] > 0u) {
181 return;
182 }
183
184 preferred_task = preferred_cpu_ready_task_get(&g_ready_queue, cur_cpu_num);
185
186 #if (RHINO_CONFIG_SCHED_CFS > 0)
187 if (preferred_task == &g_idle_task[cur_cpu_num]) {
188 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) {
189 if (g_active_task[cur_cpu_num]->task_state == K_RDY) {
190 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run +
191 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start);
192 if (cur_task_exec_time < MIN_TASK_RUN_TIME) {
193 return;
194 }
195 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time);
196 }
197 }
198 preferred_task = cfs_preferred_task_get();
199 if (preferred_task == 0) {
200 preferred_task = &g_idle_task[cur_cpu_num];
201 }
202 } else {
203 if (g_active_task[cur_cpu_num]->sched_policy == KSCHED_CFS) {
204 if (g_active_task[cur_cpu_num]->task_state == K_RDY) {
205 cur_task_exec_time = g_active_task[cur_cpu_num]->task_time_this_run +
206 (LR_COUNT_GET() - g_active_task[cur_cpu_num]->task_time_start);
207 cfs_node_insert(&g_active_task[cur_cpu_num]->node, cur_task_exec_time);
208 }
209 }
210 }
211
212 if (preferred_task->sched_policy == KSCHED_CFS) {
213 cfs_node_del(&preferred_task->node);
214 }
215 #endif
216
217 /* if preferred task is currently task, then no need to do switch and just return */
218 if (preferred_task == g_active_task[cur_cpu_num]) {
219 return;
220 }
221
222 g_preferred_ready_task[cur_cpu_num] = preferred_task;
223
224 TRACE_TASK_SWITCH(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]);
225
226 #if (RHINO_CONFIG_USER_HOOK > 0)
227 krhino_task_switch_hook(g_active_task[cur_cpu_num], g_preferred_ready_task[cur_cpu_num]);
228 #endif
229
230 cpu_task_switch();
231 }
232 #endif
233
runqueue_init(runqueue_t * rq)234 void runqueue_init(runqueue_t *rq)
235 {
236 uint8_t prio;
237
238 rq->highest_pri = RHINO_CONFIG_PRI_MAX;
239
240 for (prio = 0; prio < RHINO_CONFIG_PRI_MAX; prio++) {
241 rq->cur_list_item[prio] = NULL;
242 }
243 }
244
ready_list_init(runqueue_t * rq,ktask_t * task)245 RHINO_INLINE void ready_list_init(runqueue_t *rq, ktask_t *task)
246 {
247 rq->cur_list_item[task->prio] = &task->task_list;
248 klist_init(rq->cur_list_item[task->prio]);
249 krhino_bitmap_set(rq->task_bit_map, task->prio);
250
251 if ((task->prio) < (rq->highest_pri)) {
252 rq->highest_pri = task->prio;
253 }
254 }
255
is_ready_list_empty(uint8_t prio)256 RHINO_INLINE uint8_t is_ready_list_empty(uint8_t prio)
257 {
258 return (g_ready_queue.cur_list_item[prio] == NULL);
259 }
260
_ready_list_add_tail(runqueue_t * rq,ktask_t * task)261 RHINO_INLINE void _ready_list_add_tail(runqueue_t *rq, ktask_t *task)
262 {
263 if (is_ready_list_empty(task->prio)) {
264 ready_list_init(rq, task);
265 return;
266 }
267
268 klist_insert(rq->cur_list_item[task->prio], &task->task_list);
269 }
270
_ready_list_add_head(runqueue_t * rq,ktask_t * task)271 RHINO_INLINE void _ready_list_add_head(runqueue_t *rq, ktask_t *task)
272 {
273 if (is_ready_list_empty(task->prio)) {
274 ready_list_init(rq, task);
275 return;
276 }
277
278 klist_insert(rq->cur_list_item[task->prio], &task->task_list);
279 rq->cur_list_item[task->prio] = &task->task_list;
280 }
281
282 #if (RHINO_CONFIG_CPU_NUM > 1)
task_sched_to_cpu(runqueue_t * rq,ktask_t * task,uint8_t cur_cpu_num)283 static void task_sched_to_cpu(runqueue_t *rq, ktask_t *task, uint8_t cur_cpu_num)
284 {
285 size_t i;
286 uint8_t low_pri;
287 size_t low_pri_cpu_num = 0;
288
289 (void)rq;
290
291 if (g_sys_stat == RHINO_RUNNING) {
292 if (task->cpu_binded == 1) {
293 if (task->cpu_num != cur_cpu_num) {
294 if (task->prio <= g_active_task[task->cpu_num]->prio) {
295 cpu_signal(task->cpu_num);
296 }
297 }
298 } else {
299 /* find the lowest pri */
300 low_pri = g_active_task[0]->prio;
301 for (i = 0; i < RHINO_CONFIG_CPU_NUM - 1; i++) {
302 if (low_pri < g_active_task[i + 1]->prio) {
303 low_pri = g_active_task[i + 1]->prio;
304 low_pri_cpu_num = i + 1;
305 }
306 }
307
308 if (task->prio <= low_pri) {
309 if (low_pri_cpu_num != cur_cpu_num) {
310 if (task->prio < g_active_task[cur_cpu_num]->prio) {
311 g_per_cpu[cur_cpu_num].dis_sched = 1u;
312 }
313 cpu_signal(low_pri_cpu_num);
314 }
315 }
316 }
317 }
318 }
319
ready_list_add_head(runqueue_t * rq,ktask_t * task)320 void ready_list_add_head(runqueue_t *rq, ktask_t *task)
321 {
322 #if (RHINO_CONFIG_SCHED_CFS > 0)
323 if (task->sched_policy == KSCHED_CFS) {
324 cfs_node_insert(&task->node, cfs_node_min_get());
325 } else {
326 _ready_list_add_head(rq, task);
327 }
328 task_sched_to_cpu(rq, task, cpu_cur_get());
329 #else
330 _ready_list_add_head(rq, task);
331 task_sched_to_cpu(rq, task, cpu_cur_get());
332 #endif
333 }
334
ready_list_add_tail(runqueue_t * rq,ktask_t * task)335 void ready_list_add_tail(runqueue_t *rq, ktask_t *task)
336 {
337 #if (RHINO_CONFIG_SCHED_CFS > 0)
338 if (task->sched_policy == KSCHED_CFS) {
339 cfs_node_insert(&task->node, cfs_node_min_get());
340 } else {
341 _ready_list_add_tail(rq, task);
342 }
343 task_sched_to_cpu(rq, task, cpu_cur_get());
344 #else
345 _ready_list_add_tail(rq, task);
346 task_sched_to_cpu(rq, task, cpu_cur_get());
347 #endif
348 }
349
350 #else
ready_list_add_head(runqueue_t * rq,ktask_t * task)351 void ready_list_add_head(runqueue_t *rq, ktask_t *task)
352 {
353 #if (RHINO_CONFIG_SCHED_CFS > 0)
354 if (task->sched_policy == KSCHED_CFS) {
355 cfs_node_insert(&task->node, cfs_node_min_get());
356 } else {
357 _ready_list_add_head(rq, task);
358 }
359 #else
360 _ready_list_add_head(rq, task);
361 #endif
362 }
363
ready_list_add_tail(runqueue_t * rq,ktask_t * task)364 void ready_list_add_tail(runqueue_t *rq, ktask_t *task)
365 {
366 #if (RHINO_CONFIG_SCHED_CFS > 0)
367 if (task->sched_policy == KSCHED_CFS) {
368 cfs_node_insert(&task->node, cfs_node_min_get());
369 }
370 else {
371 _ready_list_add_tail(rq, task);
372 }
373 #else
374 _ready_list_add_tail(rq, task);
375 #endif
376 }
377 #endif
378
ready_list_add(runqueue_t * rq,ktask_t * task)379 void ready_list_add(runqueue_t *rq, ktask_t *task)
380 {
381 ready_list_add_tail(rq, task);
382 TRACE_TASK_START_READY(task);
383 }
384
ready_list_rm(runqueue_t * rq,ktask_t * task)385 void ready_list_rm(runqueue_t *rq, ktask_t *task)
386 {
387 int32_t i;
388 uint8_t pri = task->prio;
389
390 TRACE_TASK_STOP_READY(task);
391
392 #if (RHINO_CONFIG_SCHED_CFS > 0)
393 if (task->sched_policy == KSCHED_CFS) {
394 if (g_active_task[cpu_cur_get()] != task) {
395 cfs_node_del(&task->node);
396 }
397 return;
398 }
399 #endif
400
401 /* if the ready list is not only one, we do not need to update the highest prio */
402 if ((rq->cur_list_item[pri]) != (rq->cur_list_item[pri]->next)) {
403 if (rq->cur_list_item[pri] == &task->task_list) {
404 rq->cur_list_item[pri] = rq->cur_list_item[pri]->next;
405 }
406
407 klist_rm(&task->task_list);
408 return;
409 }
410
411 /* only one item,just set cur item ptr to NULL */
412 rq->cur_list_item[pri] = NULL;
413
414 krhino_bitmap_clear(rq->task_bit_map, pri);
415
416 /* if task prio not equal to the highest prio, then we do not need to update the highest prio */
417 /* this condition happens when a current high prio task to suspend a low priotity task */
418 if (pri != rq->highest_pri) {
419 return;
420 }
421
422 /* find the highest ready task */
423 i = krhino_bitmap_first(rq->task_bit_map);
424
425 /* update the next highest prio task */
426 if (i >= 0) {
427 rq->highest_pri = i;
428 } else {
429 k_err_proc(RHINO_SYS_FATAL_ERR);
430 }
431 }
432
ready_list_head_to_tail(runqueue_t * rq,ktask_t * task)433 void ready_list_head_to_tail(runqueue_t *rq, ktask_t *task)
434 {
435 rq->cur_list_item[task->prio] = rq->cur_list_item[task->prio]->next;
436 }
437
438 #if (RHINO_CONFIG_CPU_NUM > 1)
preferred_cpu_ready_task_get(runqueue_t * rq,uint8_t cpu_num)439 ktask_t *preferred_cpu_ready_task_get(runqueue_t *rq, uint8_t cpu_num)
440 {
441 klist_t *iter;
442 ktask_t *task;
443 uint32_t task_bit_map[NUM_WORDS];
444 klist_t *node;
445 uint8_t flag;
446 uint8_t i;
447 uint8_t highest_pri = rq->highest_pri;
448
449 node = rq->cur_list_item[highest_pri];
450 iter = node;
451
452 for (i = 0; i < NUM_WORDS; i++) {
453 task_bit_map[i] = rq->task_bit_map[i];
454 }
455
456 while (1) {
457
458 task = krhino_list_entry(iter, ktask_t, task_list);
459
460 if (g_active_task[cpu_num] == task) {
461 return task;
462 }
463
464 flag = ((task->cur_exc == 0) && (task->cpu_binded == 0))
465 || ((task->cur_exc == 0) && (task->cpu_binded == 1) && (task->cpu_num == cpu_num));
466
467 if (flag > 0) {
468 return task;
469 }
470
471 if (iter->next == rq->cur_list_item[highest_pri]) {
472 krhino_bitmap_clear(task_bit_map, highest_pri);
473 highest_pri = krhino_bitmap_first(task_bit_map);
474 iter = rq->cur_list_item[highest_pri];
475 } else {
476 iter = iter->next;
477 }
478 }
479 }
480 #else
preferred_cpu_ready_task_get(runqueue_t * rq,uint8_t cpu_num)481 ktask_t *preferred_cpu_ready_task_get(runqueue_t *rq, uint8_t cpu_num)
482 {
483 klist_t *node = rq->cur_list_item[rq->highest_pri];
484 /* get the highest prio task object */
485 return krhino_list_entry(node, ktask_t, task_list);
486 }
487 #endif
488
489 #if (RHINO_CONFIG_SCHED_RR > 0)
time_slice_update(void)490 void time_slice_update(void)
491 {
492 CPSR_ALLOC();
493 ktask_t *task;
494 klist_t *head;
495 uint8_t task_pri;
496
497 RHINO_CRITICAL_ENTER();
498 task = g_active_task[cpu_cur_get()];
499
500 #if (RHINO_CONFIG_SCHED_CFS > 0)
501 if (task->sched_policy == KSCHED_CFS) {
502 RHINO_CRITICAL_EXIT();
503 return;
504 }
505 #endif
506
507 task_pri = task->prio;
508 head = g_ready_queue.cur_list_item[task_pri];
509
510 /* if ready list is empty then just return because nothing is to be caculated */
511 if (is_ready_list_empty(task_pri)) {
512 RHINO_CRITICAL_EXIT();
513 return;
514 }
515
516 if (task->sched_policy == KSCHED_FIFO) {
517 RHINO_CRITICAL_EXIT();
518 return;
519 }
520
521 /* there is only one task on this ready list, so do not need to caculate time slice */
522 /* idle task must satisfy this condition */
523 if (head->next == head) {
524 RHINO_CRITICAL_EXIT();
525 return;
526 }
527
528 if (task->time_slice > 0u) {
529 task->time_slice--;
530 }
531
532 /* if current active task has time_slice, just return */
533 if (task->time_slice > 0u) {
534 RHINO_CRITICAL_EXIT();
535 return;
536 }
537
538 /* move current active task to the end of ready list for the same prio */
539 ready_list_head_to_tail(&g_ready_queue, task);
540
541 /* restore the task time slice */
542 task->time_slice = task->time_total;
543
544 RHINO_CRITICAL_EXIT();
545 }
546 #endif
547
548