1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6
7 #if (RHINO_CONFIG_SCHED_CFS > 0)
task_policy_change(ktask_t * task,uint8_t new_pri)8 static void task_policy_change(ktask_t *task, uint8_t new_pri)
9 {
10 if ((new_pri >= RT_MIN_PRI) && (new_pri <= RT_MAX_PRI)) {
11 if (task->sched_policy == KSCHED_CFS) {
12 task->sched_policy = KSCHED_FIFO;
13 }
14 } else {
15 task->sched_policy = KSCHED_CFS;
16 }
17 }
18
task_policy_check(uint8_t prio,uint8_t policy)19 static kstat_t task_policy_check(uint8_t prio, uint8_t policy)
20 {
21 kstat_t err;
22
23 err = RHINO_SUCCESS;
24 switch (policy) {
25 case KSCHED_FIFO:
26 case KSCHED_RR:
27 if (prio > RT_MAX_PRI) {
28 if (prio != RHINO_IDLE_PRI) {
29 err = RHINO_INV_PARAM;
30 }
31 }
32 break;
33 case KSCHED_CFS:
34 if (prio <= RT_MAX_PRI) {
35 err = RHINO_INV_PARAM;
36 }
37 break;
38 default:
39 k_err_proc(RHINO_INV_TASK_STATE);
40 err = RHINO_INV_TASK_STATE;
41 }
42
43 return err;
44 }
45 #endif
46
task_create(ktask_t * task,const name_t * name,void * arg,uint8_t prio,tick_t ticks,cpu_stack_t * stack_buf,size_t stack_size,task_entry_t entry,uint8_t autorun,uint8_t mm_alloc_flag,uint8_t cpu_num,uint8_t cpu_binded,uint8_t sched_policy)47 static kstat_t task_create(ktask_t *task, const name_t *name, void *arg,
48 uint8_t prio, tick_t ticks, cpu_stack_t *stack_buf,
49 size_t stack_size, task_entry_t entry, uint8_t autorun,
50 uint8_t mm_alloc_flag, uint8_t cpu_num, uint8_t cpu_binded,
51 uint8_t sched_policy)
52 {
53 CPSR_ALLOC();
54 cpu_stack_t *tmp;
55 uint8_t i = 0;
56
57 (void)cpu_binded;
58 (void)i;
59
60 NULL_PARA_CHK(task);
61 NULL_PARA_CHK(name);
62 NULL_PARA_CHK(entry);
63 NULL_PARA_CHK(stack_buf);
64
65 if (stack_size == 0u) {
66 return RHINO_TASK_INV_STACK_SIZE;
67 }
68
69 if (prio >= RHINO_CONFIG_PRI_MAX) {
70 return RHINO_BEYOND_MAX_PRI;
71 }
72
73 #if (RHINO_CONFIG_SCHED_CFS > 0)
74 if (task_policy_check(prio, sched_policy) != RHINO_SUCCESS) {
75 return task_policy_check(prio, sched_policy);
76 }
77 #endif
78
79 RHINO_CRITICAL_ENTER();
80
81 INTRPT_NESTED_LEVEL_CHK();
82
83 /* idle task is only allowed to create once */
84 if (prio == RHINO_IDLE_PRI) {
85 if (g_idle_task_spawned[cpu_num] > 0u) {
86 RHINO_CRITICAL_EXIT();
87 return RHINO_IDLE_TASK_EXIST;
88 }
89
90 g_idle_task_spawned[cpu_num] = 1u;
91 }
92
93 RHINO_CRITICAL_EXIT();
94
95 memset(task, 0, sizeof(ktask_t));
96
97 #if (RHINO_CONFIG_SCHED_RR > 0)
98 if (ticks > 0u) {
99 task->time_total = ticks;
100 } else {
101 task->time_total = RHINO_CONFIG_TIME_SLICE_DEFAULT;
102 }
103
104 task->time_slice = task->time_total;
105 #endif
106
107 task->sched_policy = sched_policy;
108
109 if (autorun > 0u) {
110 task->task_state = K_RDY;
111 } else {
112 task->task_state = K_SUSPENDED;
113 task->suspend_count = 1u;
114 }
115
116 /* init all the stack element to 0 */
117 task->task_stack_base = stack_buf;
118 tmp = stack_buf;
119
120 memset(tmp, 0, stack_size * sizeof(cpu_stack_t));
121
122 klist_init(&task->tick_list);
123 task->task_name = name;
124 task->prio = prio;
125 task->b_prio = prio;
126 task->stack_size = stack_size;
127 task->mm_alloc_flag = mm_alloc_flag;
128 task->cpu_num = cpu_num;
129 task->task_id = ++g_task_id;
130 #if (RHINO_CONFIG_MM_DEBUG > 0)
131 task->task_alloc_size = 0;
132 #endif
133 #if (RHINO_CONFIG_USER_SPACE > 0)
134 task->mode = 0;
135 task->pid = 0;
136 task->task_ustack_base = 0;
137 task->task_group = 0;
138 #endif
139
140 #if (RHINO_CONFIG_CPU_NUM > 1)
141 task->cpu_binded = cpu_binded;
142 #endif
143
144 #if (RHINO_CONFIG_TASK_STACK_OVF_CHECK > 0)
145 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
146 tmp = task->task_stack_base;
147 for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) {
148 *tmp++ = RHINO_TASK_STACK_OVF_MAGIC;
149 }
150 #else
151 tmp = (cpu_stack_t *)(task->task_stack_base) + task->stack_size - RHINO_CONFIG_STK_CHK_WORDS;
152 for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) {
153 *tmp++ = RHINO_TASK_STACK_OVF_MAGIC;
154 }
155 #endif
156 #endif
157
158 task->task_stack = cpu_task_stack_init(stack_buf, stack_size, arg, entry);
159
160 #if (RHINO_CONFIG_USER_HOOK > 0)
161 krhino_task_create_hook(task);
162 #endif
163
164 TRACE_TASK_CREATE(task);
165
166 RHINO_CRITICAL_ENTER();
167
168 #if (RHINO_CONFIG_KOBJ_LIST > 0)
169 klist_insert(&(g_kobj_list.task_head), &task->task_stats_item);
170 #endif
171
172 if (autorun > 0u) {
173 ready_list_add_tail(&g_ready_queue, task);
174 /* if system is not start,not call core_sched */
175 if (g_sys_stat == RHINO_RUNNING) {
176 RHINO_CRITICAL_EXIT_SCHED();
177 return RHINO_SUCCESS;
178 }
179 }
180
181 RHINO_CRITICAL_EXIT();
182 return RHINO_SUCCESS;
183 }
184
krhino_task_create(ktask_t * task,const name_t * name,void * arg,uint8_t prio,tick_t ticks,cpu_stack_t * stack_buf,size_t stack_size,task_entry_t entry,uint8_t autorun)185 kstat_t krhino_task_create(ktask_t *task, const name_t *name, void *arg,
186 uint8_t prio, tick_t ticks, cpu_stack_t *stack_buf,
187 size_t stack_size, task_entry_t entry, uint8_t autorun)
188 {
189 return task_create(task, name, arg, prio, ticks, stack_buf, stack_size, entry,
190 autorun, K_OBJ_STATIC_ALLOC, 0, 0, KSCHED_RR);
191 }
192
193 #if (RHINO_CONFIG_SCHED_CFS > 0)
krhino_cfs_task_create(ktask_t * task,const name_t * name,void * arg,uint8_t prio,cpu_stack_t * stack_buf,size_t stack_size,task_entry_t entry,uint8_t autorun)194 kstat_t krhino_cfs_task_create(ktask_t *task, const name_t *name, void *arg,
195 uint8_t prio, cpu_stack_t *stack_buf, size_t stack_size,
196 task_entry_t entry, uint8_t autorun)
197 {
198 return task_create(task, name, arg, prio, 0, stack_buf, stack_size, entry,
199 autorun, K_OBJ_STATIC_ALLOC, 0, 0, KSCHED_CFS);
200 }
201 #endif
202
203 #if (RHINO_CONFIG_CPU_NUM > 1)
krhino_task_cpu_create(ktask_t * task,const name_t * name,void * arg,uint8_t prio,tick_t ticks,cpu_stack_t * stack_buf,size_t stack_size,task_entry_t entry,uint8_t cpu_num,uint8_t autorun)204 kstat_t krhino_task_cpu_create(ktask_t *task, const name_t *name, void *arg,
205 uint8_t prio, tick_t ticks, cpu_stack_t *stack_buf,
206 size_t stack_size, task_entry_t entry, uint8_t cpu_num,
207 uint8_t autorun)
208 {
209 return task_create(task, name, arg, prio, ticks, stack_buf, stack_size, entry,
210 autorun, K_OBJ_STATIC_ALLOC, cpu_num, 1, KSCHED_RR);
211 }
212
213 #if (RHINO_CONFIG_SCHED_CFS > 0)
krhino_cfs_task_cpu_create(ktask_t * task,const name_t * name,void * arg,uint8_t prio,cpu_stack_t * stack_buf,size_t stack_size,task_entry_t entry,uint8_t cpu_num,uint8_t autorun)214 kstat_t krhino_cfs_task_cpu_create(ktask_t *task, const name_t *name, void *arg,
215 uint8_t prio, cpu_stack_t *stack_buf, size_t stack_size,
216 task_entry_t entry, uint8_t cpu_num, uint8_t autorun)
217 {
218 return task_create(task, name, arg, prio, 0, stack_buf, stack_size, entry,
219 autorun, K_OBJ_STATIC_ALLOC, cpu_num, 1, KSCHED_CFS);
220 }
221 #endif
222
krhino_task_cpu_bind(ktask_t * task,uint8_t cpu_num)223 kstat_t krhino_task_cpu_bind(ktask_t *task, uint8_t cpu_num)
224 {
225 CPSR_ALLOC();
226 ktask_t *task_cur;
227
228 RHINO_CRITICAL_ENTER();
229 task_cur = g_active_task[cpu_cur_get()];
230 if (task != task_cur) {
231 RHINO_CRITICAL_EXIT();
232 return RHINO_INV_PARAM;
233 }
234 task->cpu_num = cpu_num;
235 task->cpu_binded = 1u;
236 RHINO_CRITICAL_EXIT_SCHED();
237
238 return RHINO_SUCCESS;
239 }
240
krhino_task_cpu_unbind(ktask_t * task)241 kstat_t krhino_task_cpu_unbind(ktask_t *task)
242 {
243 CPSR_ALLOC();
244 ktask_t *task_cur;
245
246 RHINO_CRITICAL_ENTER();
247 task_cur = g_active_task[cpu_cur_get()];
248 if (task != task_cur) {
249 RHINO_CRITICAL_EXIT();
250 return RHINO_INV_PARAM;
251 }
252 task->cpu_binded = 0u;
253 RHINO_CRITICAL_EXIT_SCHED();
254
255 return RHINO_SUCCESS;
256 }
257 #endif
258
259 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
task_dyn_create(ktask_t ** task,const name_t * name,void * arg,uint8_t pri,tick_t ticks,size_t stack,task_entry_t entry,uint8_t cpu_num,uint8_t cpu_binded,uint8_t autorun,uint8_t sched_policy)260 static kstat_t task_dyn_create(ktask_t **task, const name_t *name, void *arg,
261 uint8_t pri, tick_t ticks, size_t stack, task_entry_t entry,
262 uint8_t cpu_num, uint8_t cpu_binded, uint8_t autorun, uint8_t sched_policy)
263 {
264 kstat_t ret;
265 cpu_stack_t *task_stack;
266 ktask_t *task_obj;
267
268 NULL_PARA_CHK(task);
269
270 if (stack == 0) {
271 return RHINO_INV_PARAM;
272 }
273
274 task_stack = krhino_mm_alloc(stack * sizeof(cpu_stack_t));
275 if (task_stack == NULL) {
276 return RHINO_NO_MEM;
277 }
278
279 task_obj = krhino_mm_alloc(sizeof(ktask_t));
280 if (task_obj == NULL) {
281 krhino_mm_free(task_stack);
282 return RHINO_NO_MEM;
283 }
284
285 *task = task_obj;
286
287 ret = task_create(task_obj, name, arg, pri, ticks, task_stack, stack, entry,
288 autorun, K_OBJ_DYN_ALLOC, cpu_num, cpu_binded, sched_policy);
289 if ((ret != RHINO_SUCCESS) && (ret != RHINO_STOPPED)) {
290 krhino_mm_free(task_stack);
291 krhino_mm_free(task_obj);
292 *task = NULL;
293 return ret;
294 }
295
296 return ret;
297 }
298
krhino_task_dyn_create(ktask_t ** task,const name_t * name,void * arg,uint8_t pri,tick_t ticks,size_t stack,task_entry_t entry,uint8_t autorun)299 kstat_t krhino_task_dyn_create(ktask_t **task, const name_t *name, void *arg,
300 uint8_t pri, tick_t ticks, size_t stack,
301 task_entry_t entry, uint8_t autorun)
302 {
303 return task_dyn_create(task, name, arg, pri, ticks, stack, entry, 0, 0, autorun, KSCHED_RR);
304 }
305
306 #if (RHINO_CONFIG_SCHED_CFS > 0)
krhino_cfs_task_dyn_create(ktask_t ** task,const name_t * name,void * arg,uint8_t pri,size_t stack,task_entry_t entry,uint8_t autorun)307 kstat_t krhino_cfs_task_dyn_create(ktask_t **task, const name_t *name, void *arg,
308 uint8_t pri, size_t stack, task_entry_t entry,
309 uint8_t autorun)
310 {
311 return task_dyn_create(task, name, arg, pri, 0, stack, entry, 0, 0, autorun, KSCHED_CFS);
312 }
313 #endif
314
315 #if (RHINO_CONFIG_CPU_NUM > 1)
krhino_task_cpu_dyn_create(ktask_t ** task,const name_t * name,void * arg,uint8_t pri,tick_t ticks,size_t stack,task_entry_t entry,uint8_t cpu_num,uint8_t autorun)316 kstat_t krhino_task_cpu_dyn_create(ktask_t **task, const name_t *name, void *arg,
317 uint8_t pri, tick_t ticks, size_t stack,
318 task_entry_t entry, uint8_t cpu_num, uint8_t autorun)
319 {
320 return task_dyn_create(task, name, arg, pri, ticks, stack, entry, cpu_num, 1, autorun, KSCHED_RR);
321 }
322
323 #if (RHINO_CONFIG_SCHED_CFS > 0)
krhino_cfs_task_cpu_dyn_create(ktask_t ** task,const name_t * name,void * arg,uint8_t pri,size_t stack,task_entry_t entry,uint8_t cpu_num,uint8_t autorun)324 kstat_t krhino_cfs_task_cpu_dyn_create(ktask_t **task, const name_t *name, void *arg,
325 uint8_t pri, size_t stack, task_entry_t entry,
326 uint8_t cpu_num, uint8_t autorun)
327 {
328 return task_dyn_create(task, name, arg, pri, 0, stack, entry, cpu_num, 1, autorun, KSCHED_CFS);
329 }
330 #endif
331
332 #endif
333 #endif
334
krhino_task_sleep(tick_t ticks)335 kstat_t krhino_task_sleep(tick_t ticks)
336 {
337 CPSR_ALLOC();
338 uint8_t cur_cpu_num;
339 kstat_t ret;
340
341 if (ticks == 0u) {
342 return RHINO_INV_PARAM;
343 }
344
345 RHINO_CRITICAL_ENTER();
346
347 INTRPT_NESTED_LEVEL_CHK();
348
349 cur_cpu_num = cpu_cur_get();
350
351 /* system is locked so task can not be blocked just return immediately */
352 if (g_sched_lock[cur_cpu_num] > 0u) {
353 RHINO_CRITICAL_EXIT();
354 return RHINO_SCHED_DISABLE;
355 }
356
357 g_active_task[cur_cpu_num]->task_state = K_SLEEP;
358 tick_list_insert(g_active_task[cur_cpu_num], ticks);
359 ready_list_rm(&g_ready_queue, g_active_task[cur_cpu_num]);
360
361 TRACE_TASK_SLEEP(g_active_task[cur_cpu_num], ticks);
362
363 RHINO_CRITICAL_EXIT_SCHED();
364
365 RHINO_CPU_INTRPT_DISABLE();
366
367 /* is task timeout normally after sleep */
368 ret = pend_state_end_proc(g_active_task[cpu_cur_get()], NULL);
369
370 RHINO_CPU_INTRPT_ENABLE();
371
372 return ret;
373 }
374
krhino_task_yield(void)375 kstat_t krhino_task_yield(void)
376 {
377 CPSR_ALLOC();
378
379 /* make current task to the end of ready list */
380 RHINO_CRITICAL_ENTER();
381 ready_list_head_to_tail(&g_ready_queue, g_active_task[cpu_cur_get()]);
382 RHINO_CRITICAL_EXIT_SCHED();
383
384 return RHINO_SUCCESS;
385 }
386
krhino_cur_task_get(void)387 ktask_t *krhino_cur_task_get(void)
388 {
389 CPSR_ALLOC();
390 ktask_t *task;
391
392 RHINO_CRITICAL_ENTER();
393 task = g_active_task[cpu_cur_get()];
394 RHINO_CRITICAL_EXIT();
395
396 return task;
397 }
398
task_suspend(ktask_t * task)399 kstat_t task_suspend(ktask_t *task)
400 {
401 CPSR_ALLOC();
402 uint8_t cur_cpu_num;
403
404 RHINO_CRITICAL_ENTER();
405
406 cur_cpu_num = cpu_cur_get();
407 #if (RHINO_CONFIG_CPU_NUM > 1)
408 if (task->cpu_num != cur_cpu_num) {
409 if (task->cur_exc == 1) {
410 RHINO_CRITICAL_EXIT();
411 return RHINO_TRY_AGAIN;
412 }
413 }
414 #endif
415
416 if (task == g_active_task[cur_cpu_num]) {
417 if (g_sched_lock[cur_cpu_num] > 0u) {
418 RHINO_CRITICAL_EXIT();
419 return RHINO_SCHED_DISABLE;
420 }
421 }
422
423 switch (task->task_state) {
424 case K_RDY:
425 task->suspend_count = 1u;
426 task->task_state = K_SUSPENDED;
427 ready_list_rm(&g_ready_queue, task);
428 break;
429 case K_SLEEP:
430 task->suspend_count = 1u;
431 task->task_state = K_SLEEP_SUSPENDED;
432 break;
433 case K_PEND:
434 task->suspend_count = 1u;
435 task->task_state = K_PEND_SUSPENDED;
436 break;
437 case K_SUSPENDED:
438 case K_SLEEP_SUSPENDED:
439 case K_PEND_SUSPENDED:
440 if (task->suspend_count == (suspend_nested_t) -1) {
441 RHINO_CRITICAL_EXIT();
442 return RHINO_SUSPENDED_COUNT_OVF;
443 }
444
445 task->suspend_count++;
446 break;
447 case K_SEED:
448 default:
449 RHINO_CRITICAL_EXIT();
450 return RHINO_INV_TASK_STATE;
451 }
452
453 TRACE_TASK_SUSPEND(g_active_task[cur_cpu_num], task);
454
455 RHINO_CRITICAL_EXIT_SCHED();
456
457 return RHINO_SUCCESS;
458 }
459
krhino_task_suspend(ktask_t * task)460 kstat_t krhino_task_suspend(ktask_t *task)
461 {
462 if (task == NULL) {
463 return RHINO_NULL_PTR;
464 }
465
466 if (task->prio == RHINO_IDLE_PRI) {
467 return RHINO_TASK_SUSPEND_NOT_ALLOWED;
468 }
469
470 return task_suspend(task);
471 }
472
task_resume(ktask_t * task)473 kstat_t task_resume(ktask_t *task)
474 {
475 CPSR_ALLOC();
476 RHINO_CRITICAL_ENTER();
477
478 switch (task->task_state) {
479 case K_RDY:
480 case K_SLEEP:
481 case K_PEND:
482 RHINO_CRITICAL_EXIT();
483 return RHINO_TASK_NOT_SUSPENDED;
484 case K_SUSPENDED:
485 task->suspend_count--;
486
487 if (task->suspend_count == 0u) {
488 /* Make task ready */
489 task->task_state = K_RDY;
490 ready_list_add(&g_ready_queue, task);
491 }
492
493 break;
494 case K_SLEEP_SUSPENDED:
495 task->suspend_count--;
496
497 if (task->suspend_count == 0u) {
498 task->task_state = K_SLEEP;
499 }
500
501 break;
502 case K_PEND_SUSPENDED:
503 task->suspend_count--;
504
505 if (task->suspend_count == 0u) {
506 task->task_state = K_PEND;
507 }
508
509 break;
510 case K_SEED:
511 default:
512 RHINO_CRITICAL_EXIT();
513 return RHINO_INV_TASK_STATE;
514 }
515
516 TRACE_TASK_RESUME(g_active_task[cpu_cur_get()], task);
517
518 RHINO_CRITICAL_EXIT_SCHED();
519
520 return RHINO_SUCCESS;
521 }
522
krhino_task_resume(ktask_t * task)523 kstat_t krhino_task_resume(ktask_t *task)
524 {
525 NULL_PARA_CHK(task);
526
527 return task_resume(task);
528 }
529
krhino_task_stack_min_free(ktask_t * task,size_t * free)530 kstat_t krhino_task_stack_min_free(ktask_t *task, size_t *free)
531 {
532 cpu_stack_t *task_stack;
533 size_t free_stk = 0;
534
535 NULL_PARA_CHK(task);
536 NULL_PARA_CHK(free);
537
538 if (task->task_state == K_DELETED) {
539 return RHINO_INV_TASK_STATE;
540 }
541
542 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
543 task_stack = task->task_stack_base + RHINO_CONFIG_STK_CHK_WORDS;
544 while (*task_stack++ == 0u) {
545 free_stk++;
546 }
547 #else
548 task_stack = (cpu_stack_t *)(task->task_stack_base) + task->stack_size
549 - RHINO_CONFIG_STK_CHK_WORDS - 1u;
550 while (*task_stack-- == 0u) {
551 free_stk++;
552 }
553 #endif
554
555 *free = free_stk;
556
557 return RHINO_SUCCESS;
558 }
559
task_pri_change(ktask_t * task,uint8_t new_pri)560 kstat_t task_pri_change(ktask_t *task, uint8_t new_pri)
561 {
562 uint8_t old_pri;
563 uint8_t task_exec;
564 kmutex_t *mutex_tmp;
565 ktask_t *mutex_task;
566
567 do {
568 if (task->prio != new_pri) {
569 switch (task->task_state) {
570 case K_RDY:
571 task_exec = is_task_exec(task);
572 if (task_exec > 0u) {
573 if (task->sched_policy != KSCHED_CFS) {
574 ready_list_rm(&g_ready_queue, task);
575 }
576 } else {
577 ready_list_rm(&g_ready_queue, task);
578 }
579 #if (RHINO_CONFIG_SCHED_CFS > 0)
580 task_policy_change(task, new_pri);
581 #endif
582 task->prio = new_pri;
583
584 if (task_exec > 0u) {
585 if (task->sched_policy != KSCHED_CFS) {
586 ready_list_add_head(&g_ready_queue, task);
587 }
588 } else {
589 ready_list_add_tail(&g_ready_queue, task);
590 }
591
592 task = NULL;
593 break;
594 case K_SLEEP:
595 case K_SUSPENDED:
596 case K_SLEEP_SUSPENDED:
597 #if (RHINO_CONFIG_SCHED_CFS > 0)
598 task_policy_change(task, new_pri);
599 #endif
600 /* set new task prio */
601 task->prio = new_pri;
602 task = NULL;
603 break;
604 case K_PEND:
605 case K_PEND_SUSPENDED:
606 #if (RHINO_CONFIG_SCHED_CFS > 0)
607 task_policy_change(task, new_pri);
608 #endif
609 old_pri = task->prio;
610 task->prio = new_pri;
611 pend_list_reorder(task);
612
613 if (task->blk_obj->obj_type == RHINO_MUTEX_OBJ_TYPE) {
614 mutex_tmp = (kmutex_t *)(task->blk_obj);
615 mutex_task = mutex_tmp->mutex_task;
616
617 if (mutex_task->prio > task->prio) {
618 /* since the highest prio of the lock wait task
619 became higher, raise the lock get task prio
620 higher */
621 task = mutex_task;
622 } else if (mutex_task->prio == old_pri) {
623 /* find suitable tcb prio */
624 new_pri = mutex_pri_look(mutex_task, 0);
625
626 if (new_pri != mutex_task->prio) {
627 /* Change prio of lock get task */
628 task = mutex_task;
629 } else {
630 task = NULL;
631 }
632 } else {
633 task = NULL;
634 }
635 } else {
636 task = NULL;
637 }
638
639 break;
640 default:
641 k_err_proc(RHINO_INV_TASK_STATE);
642 return RHINO_INV_TASK_STATE;
643 }
644 } else {
645 task = NULL;
646 }
647 } while (task != NULL);
648
649 return RHINO_SUCCESS;
650 }
651
krhino_task_pri_change(ktask_t * task,uint8_t pri,uint8_t * old_pri)652 kstat_t krhino_task_pri_change(ktask_t *task, uint8_t pri, uint8_t *old_pri)
653 {
654 CPSR_ALLOC();
655 uint8_t pri_limit;
656 kstat_t error;
657
658 NULL_PARA_CHK(task);
659 NULL_PARA_CHK(old_pri);
660
661 /* idle task is not allowed to change prio */
662 if (task->prio >= RHINO_IDLE_PRI) {
663 return RHINO_PRI_CHG_NOT_ALLOWED;
664 }
665
666 /* not allowed change to idle prio */
667 if (pri >= RHINO_IDLE_PRI) {
668 return RHINO_PRI_CHG_NOT_ALLOWED;
669 }
670
671 /* deleted task is not allowed to change prio */
672 if (task->task_state == K_DELETED) {
673 return RHINO_INV_TASK_STATE;
674 }
675
676 RHINO_CRITICAL_ENTER();
677
678 INTRPT_NESTED_LEVEL_CHK();
679
680 /* limit the prio change by mutex at task prio change */
681 pri_limit = mutex_pri_limit(task, pri);
682
683 task->b_prio = pri;
684 /* new pripority may change here */
685 pri = pri_limit;
686 *old_pri = task->prio;
687
688 error = task_pri_change(task, pri);
689
690 if (error != RHINO_SUCCESS) {
691 RHINO_CRITICAL_EXIT();
692 return error;
693 }
694
695 TRACE_TASK_PRI_CHANGE(g_active_task[cpu_cur_get()], task, pri);
696
697 RHINO_CRITICAL_EXIT_SCHED();
698
699 return RHINO_SUCCESS;
700 }
701
krhino_task_wait_abort(ktask_t * task)702 kstat_t krhino_task_wait_abort(ktask_t *task)
703 {
704 CPSR_ALLOC();
705
706 NULL_PARA_CHK(task);
707
708 RHINO_CRITICAL_ENTER();
709
710 INTRPT_NESTED_LEVEL_CHK();
711
712 switch (task->task_state) {
713 case K_RDY:
714 break;
715 case K_SUSPENDED:
716 /* change to ready state */
717 task->task_state = K_RDY;
718 ready_list_add(&g_ready_queue, task);
719 break;
720 case K_SLEEP:
721 case K_SLEEP_SUSPENDED:
722 /* change to ready state */
723 tick_list_rm(task);
724 ready_list_add(&g_ready_queue, task);
725 task->task_state = K_RDY;
726 task->blk_state = BLK_ABORT;
727 break;
728 case K_PEND_SUSPENDED:
729 case K_PEND:
730 /* remove task on the tick list because task is woken up */
731 tick_list_rm(task);
732 /* remove task on the block list because task is woken up */
733 klist_rm(&task->task_list);
734 /* add to the ready list again */
735 ready_list_add(&g_ready_queue, task);
736 task->task_state = K_RDY;
737 task->blk_state = BLK_ABORT;
738
739 mutex_task_pri_reset(task);
740 task->blk_obj = NULL;
741
742 break;
743 default:
744 RHINO_CRITICAL_EXIT();
745 return RHINO_INV_TASK_STATE;
746 }
747
748 #if (RHINO_CONFIG_USER_HOOK > 0)
749 krhino_task_abort_hook(task);
750 #endif
751
752 TRACE_TASK_WAIT_ABORT(g_active_task[cpu_cur_get()], task);
753
754 RHINO_CRITICAL_EXIT_SCHED();
755
756 return RHINO_SUCCESS;
757 }
758
759 #if (RHINO_CONFIG_TASK_DEL > 0)
task_mutex_free(ktask_t * task)760 static void task_mutex_free(ktask_t *task)
761 {
762 kmutex_t *mutex;
763 kmutex_t *next_mutex;
764 ktask_t *next_task;
765 klist_t *blk_list_head;
766
767 next_mutex = task->mutex_list;
768
769 while ((mutex = next_mutex) != NULL) {
770 next_mutex = mutex->mutex_list;
771
772 blk_list_head = &mutex->blk_obj.blk_list;
773
774 if (!is_klist_empty(blk_list_head)) {
775 next_task = krhino_list_entry(blk_list_head->next, ktask_t, task_list);
776
777 /* wakeup wait task */
778 pend_task_wakeup(next_task);
779
780 /* change mutex get task */
781 mutex->mutex_task = next_task;
782 mutex->mutex_list = next_task->mutex_list;
783 next_task->mutex_list = mutex;
784 } else {
785 /* no wait task */
786 mutex->mutex_task = NULL;
787 }
788 }
789 }
790
krhino_task_del(ktask_t * task)791 kstat_t krhino_task_del(ktask_t *task)
792 {
793 CPSR_ALLOC();
794 uint8_t cur_cpu_num;
795 #if (RHINO_CONFIG_USER_HOOK > 0)
796 res_free_t *res_free;
797 #endif
798
799 #if (RHINO_CONFIG_NEWLIBC_REENT > 0)
800 krhino_sched_disable();
801 if (task == NULL) {
802 cur_cpu_num = cpu_cur_get();
803 task = g_active_task[cur_cpu_num];
804 }
805 if (task->newlibc_reent != NULL) {
806 /* Reclaiming reent may takes few long time as it may flush io,
807 * so don't disable interrupt. */
808 _reclaim_reent(task->newlibc_reent);
809 krhino_mm_free(task->newlibc_reent);
810 task->newlibc_reent = NULL;
811 }
812 krhino_sched_enable();
813 #endif
814
815 RHINO_CRITICAL_ENTER();
816
817 cur_cpu_num = cpu_cur_get();
818 INTRPT_NESTED_LEVEL_CHK();
819
820 if (task == NULL) {
821 task = g_active_task[cur_cpu_num];
822 }
823
824 if (task->prio == RHINO_IDLE_PRI) {
825 RHINO_CRITICAL_EXIT();
826 return RHINO_TASK_DEL_NOT_ALLOWED;
827 }
828
829 if (task->mm_alloc_flag != K_OBJ_STATIC_ALLOC) {
830 RHINO_CRITICAL_EXIT();
831 return RHINO_KOBJ_DEL_ERR;
832 }
833
834 #if (RHINO_CONFIG_CPU_NUM > 1)
835 if (task->cpu_num != cur_cpu_num) {
836 if (task->cur_exc == 1) {
837 klist_insert(&g_task_del_head, &task->task_del_item);
838 RHINO_CRITICAL_EXIT();
839 return RHINO_SUCCESS;
840 }
841 }
842 #endif
843
844 if (task == g_active_task[cpu_cur_get()]) {
845 if (g_sched_lock[cpu_cur_get()] > 0u) {
846 RHINO_CRITICAL_EXIT();
847 return RHINO_SCHED_DISABLE;
848 }
849 }
850
851 /* free all the mutex which task hold */
852 task_mutex_free(task);
853
854 switch (task->task_state) {
855 case K_RDY:
856 ready_list_rm(&g_ready_queue, task);
857 task->task_state = K_DELETED;
858 break;
859 case K_SUSPENDED:
860 task->task_state = K_DELETED;
861 break;
862 case K_SLEEP:
863 case K_SLEEP_SUSPENDED:
864 tick_list_rm(task);
865 task->task_state = K_DELETED;
866 break;
867 case K_PEND:
868 case K_PEND_SUSPENDED:
869 tick_list_rm(task);
870 klist_rm(&task->task_list);
871 task->task_state = K_DELETED;
872
873 mutex_task_pri_reset(task);
874 break;
875 default:
876 RHINO_CRITICAL_EXIT();
877 return RHINO_INV_TASK_STATE;
878 }
879
880 #if (RHINO_CONFIG_KOBJ_LIST > 0)
881 klist_rm(&task->task_stats_item);
882 #endif
883
884 TRACE_TASK_DEL(g_active_task[cur_cpu_num], task);
885
886 #if (RHINO_CONFIG_USER_HOOK > 0)
887 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
888 res_free = (res_free_t *)(task->task_stack_base + RHINO_CONFIG_STK_CHK_WORDS);
889 #else
890 res_free = (res_free_t *)(task->task_stack_base + task->stack_size -
891 (sizeof(res_free_t) / sizeof(cpu_stack_t)) - RHINO_CONFIG_STK_CHK_WORDS);
892 #endif
893 res_free->cnt = 0;
894 krhino_task_del_hook(task, res_free);
895 #endif
896
897 RHINO_CRITICAL_EXIT_SCHED();
898
899 return RHINO_SUCCESS;
900 }
901
902 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
krhino_task_dyn_del(ktask_t * task)903 kstat_t krhino_task_dyn_del(ktask_t *task)
904 {
905 CPSR_ALLOC();
906 kstat_t ret;
907 uint8_t cur_cpu_num;
908 res_free_t *res_free;
909
910 #if (RHINO_CONFIG_NEWLIBC_REENT > 0)
911 krhino_sched_disable();
912 if (task == NULL) {
913 cur_cpu_num = cpu_cur_get();
914 task = g_active_task[cur_cpu_num];
915 }
916 if (task->newlibc_reent != NULL) {
917 /* Reclaiming reent may takes few long time as it may flush io,
918 * so don't disable interrupt. */
919 _reclaim_reent(task->newlibc_reent);
920 krhino_mm_free(task->newlibc_reent);
921 task->newlibc_reent = NULL;
922 }
923 krhino_sched_enable();
924 #endif
925
926 RHINO_CRITICAL_ENTER();
927
928 cur_cpu_num = cpu_cur_get();
929
930 INTRPT_NESTED_LEVEL_CHK();
931
932 if (task == NULL) {
933 task = g_active_task[cur_cpu_num];
934 }
935
936 if (task->prio == RHINO_IDLE_PRI) {
937 RHINO_CRITICAL_EXIT();
938
939 return RHINO_TASK_DEL_NOT_ALLOWED;
940 }
941
942 if (task->mm_alloc_flag != K_OBJ_DYN_ALLOC) {
943 RHINO_CRITICAL_EXIT();
944
945 return RHINO_KOBJ_DEL_ERR;
946 }
947
948 #if (RHINO_CONFIG_CPU_NUM > 1)
949 if (task->cpu_num != cur_cpu_num) {
950 if (task->cur_exc == 1) {
951 klist_insert(&g_task_del_head, &task->task_del_item);
952 RHINO_CRITICAL_EXIT();
953 return RHINO_SUCCESS;
954 }
955 }
956 #endif
957
958 if (task == g_active_task[cpu_cur_get()]) {
959 if (g_sched_lock[cpu_cur_get()] > 0u) {
960 RHINO_CRITICAL_EXIT();
961 return RHINO_SCHED_DISABLE;
962 }
963 }
964
965 if (task->task_state == K_DELETED) {
966 RHINO_CRITICAL_EXIT();
967 return RHINO_INV_TASK_STATE;
968 }
969
970 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
971 res_free = (res_free_t *)(task->task_stack_base + RHINO_CONFIG_STK_CHK_WORDS);
972 #else
973 res_free = (res_free_t *)(task->task_stack_base + task->stack_size -
974 (sizeof(res_free_t) / sizeof(cpu_stack_t)) - RHINO_CONFIG_STK_CHK_WORDS);
975 #endif
976 res_free->cnt = 0;
977 g_sched_lock[cpu_cur_get()]++;
978 klist_insert(&g_res_list, &res_free->res_list);
979 res_free->res[0] = task->task_stack_base;
980 res_free->res[1] = task;
981 res_free->cnt += 2;
982 ret = krhino_sem_give(&g_res_sem);
983 g_sched_lock[cpu_cur_get()]--;
984
985 if (ret != RHINO_SUCCESS) {
986 RHINO_CRITICAL_EXIT();
987 k_err_proc(RHINO_SYS_SP_ERR);
988 return ret;
989 }
990
991 /* free all the mutex which task hold */
992 task_mutex_free(task);
993
994 switch (task->task_state) {
995 case K_RDY:
996 ready_list_rm(&g_ready_queue, task);
997 task->task_state = K_DELETED;
998 break;
999 case K_SUSPENDED:
1000 task->task_state = K_DELETED;
1001 break;
1002 case K_SLEEP:
1003 case K_SLEEP_SUSPENDED:
1004 tick_list_rm(task);
1005 task->task_state = K_DELETED;
1006 break;
1007 case K_PEND:
1008 case K_PEND_SUSPENDED:
1009 tick_list_rm(task);
1010 klist_rm(&task->task_list);
1011 task->task_state = K_DELETED;
1012 mutex_task_pri_reset(task);
1013 break;
1014 case K_SEED:
1015 default:
1016 break;
1017 }
1018
1019 #if (RHINO_CONFIG_KOBJ_LIST > 0)
1020 klist_rm(&task->task_stats_item);
1021 #endif
1022
1023 TRACE_TASK_DEL(g_active_task[cpu_cur_get()], task);
1024
1025 #if (RHINO_CONFIG_USER_HOOK > 0)
1026 krhino_task_del_hook(task, res_free);
1027 #endif
1028
1029 RHINO_CRITICAL_EXIT_SCHED();
1030
1031 return RHINO_SUCCESS;
1032 }
1033 #endif
1034
krhino_task_cancel(ktask_t * task)1035 kstat_t krhino_task_cancel(ktask_t *task)
1036 {
1037 CPSR_ALLOC();
1038 kstat_t ret;
1039
1040 NULL_PARA_CHK(task);
1041
1042 RHINO_CRITICAL_ENTER();
1043 task->cancel = 1u;
1044 ret = krhino_task_wait_abort(task);
1045 RHINO_CRITICAL_EXIT();
1046
1047 return ret;
1048 }
1049
krhino_task_cancel_chk(void)1050 RHINO_BOOL krhino_task_cancel_chk(void)
1051 {
1052 CPSR_ALLOC();
1053 ktask_t *cur_task;
1054 RHINO_BOOL ret;
1055
1056 cur_task = krhino_cur_task_get();
1057 RHINO_CRITICAL_ENTER();
1058 if (cur_task->cancel == 1u) {
1059 ret = RHINO_TRUE;
1060 } else {
1061 ret = RHINO_FALSE;
1062 }
1063 RHINO_CRITICAL_EXIT();
1064
1065 return ret;
1066 }
1067 #endif
1068
1069 #if (RHINO_CONFIG_SCHED_RR > 0)
krhino_task_time_slice_set(ktask_t * task,size_t slice)1070 kstat_t krhino_task_time_slice_set(ktask_t *task, size_t slice)
1071 {
1072 CPSR_ALLOC();
1073
1074 NULL_PARA_CHK(task);
1075
1076 RHINO_CRITICAL_ENTER();
1077
1078 INTRPT_NESTED_LEVEL_CHK();
1079
1080 if (slice > 0u) {
1081 /* assign the new time slice */
1082 task->time_total = slice;
1083 } else {
1084 /* assign the default time slice */
1085 task->time_total = RHINO_CONFIG_TIME_SLICE_DEFAULT;
1086 }
1087
1088 task->time_slice = task->time_total;
1089
1090 RHINO_CRITICAL_EXIT();
1091
1092 return RHINO_SUCCESS;
1093 }
1094
krhino_sched_param_set(ktask_t * task,uint8_t policy,uint8_t pri)1095 kstat_t krhino_sched_param_set(ktask_t *task, uint8_t policy, uint8_t pri)
1096 {
1097 CPSR_ALLOC();
1098 uint8_t old_pri;
1099 kstat_t ret;
1100
1101 (void)ret;
1102 NULL_PARA_CHK(task);
1103
1104 if ((policy != KSCHED_FIFO) && (policy != KSCHED_RR) && (policy != KSCHED_CFS)) {
1105 return RHINO_INV_SCHED_WAY;
1106 }
1107 #if (RHINO_CONFIG_SCHED_CFS > 0)
1108 ret = task_policy_check(pri, policy);
1109 if (ret != RHINO_SUCCESS) {
1110 return ret;
1111 }
1112 #endif
1113
1114 krhino_sched_disable();
1115 RHINO_CRITICAL_ENTER();
1116 krhino_task_pri_change(task, pri, &old_pri);
1117 task->sched_policy = policy;
1118 RHINO_CRITICAL_EXIT();
1119 krhino_sched_enable();
1120
1121 return RHINO_SUCCESS;
1122 }
1123
1124
krhino_sched_policy_set(ktask_t * task,uint8_t policy)1125 kstat_t krhino_sched_policy_set(ktask_t *task, uint8_t policy)
1126 {
1127 NULL_PARA_CHK(task);
1128 return krhino_sched_param_set(task, policy, task->prio);
1129 }
1130
1131
krhino_sched_policy_get(ktask_t * task,uint8_t * policy)1132 kstat_t krhino_sched_policy_get(ktask_t *task, uint8_t *policy)
1133 {
1134 CPSR_ALLOC();
1135
1136 NULL_PARA_CHK(task);
1137 NULL_PARA_CHK(policy);
1138
1139 RHINO_CRITICAL_ENTER();
1140
1141 INTRPT_NESTED_LEVEL_CHK();
1142
1143 *policy = task->sched_policy;
1144 RHINO_CRITICAL_EXIT();
1145
1146 return RHINO_SUCCESS;
1147 }
1148 #endif
1149
1150 #if (RHINO_CONFIG_TASK_INFO > 0)
krhino_task_info_set(ktask_t * task,size_t idx,void * info)1151 kstat_t krhino_task_info_set(ktask_t *task, size_t idx, void *info)
1152 {
1153 CPSR_ALLOC();
1154
1155 NULL_PARA_CHK(task);
1156
1157 if (idx >= RHINO_CONFIG_TASK_INFO_NUM) {
1158 return RHINO_INV_PARAM;
1159 }
1160
1161 RHINO_CPU_INTRPT_DISABLE();
1162 task->user_info[idx] = info;
1163 RHINO_CPU_INTRPT_ENABLE();
1164
1165 return RHINO_SUCCESS;
1166 }
1167
krhino_task_info_get(ktask_t * task,size_t idx,void ** info)1168 kstat_t krhino_task_info_get(ktask_t *task, size_t idx, void **info)
1169 {
1170 NULL_PARA_CHK(task);
1171 NULL_PARA_CHK(info);
1172
1173 if (idx >= RHINO_CONFIG_TASK_INFO_NUM) {
1174 return RHINO_INV_PARAM;
1175 }
1176
1177 *info = task->user_info[idx];
1178
1179 return RHINO_SUCCESS;
1180 }
1181 #endif
1182
krhino_task_deathbed(void)1183 void krhino_task_deathbed(void)
1184 {
1185 #if (RHINO_CONFIG_TASK_DEL > 0)
1186 ktask_t *task;
1187
1188 task = krhino_cur_task_get();
1189
1190 if (task->mm_alloc_flag == K_OBJ_DYN_ALLOC) {
1191 /* del my self*/
1192 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
1193 krhino_task_dyn_del(NULL);
1194 #endif
1195 } else {
1196 krhino_task_del(NULL);
1197 }
1198 #else
1199
1200 while (1) {
1201 krhino_task_sleep(RHINO_CONFIG_TICKS_PER_SECOND * 10);
1202 }
1203 #endif
1204 }
1205
krhino_task_find(name_t * name)1206 ktask_t *krhino_task_find(name_t *name)
1207 {
1208 CPSR_ALLOC();
1209 klist_t *listnode;
1210 ktask_t *task;
1211
1212 RHINO_CRITICAL_ENTER();
1213 #if (RHINO_CONFIG_KOBJ_LIST > 0)
1214 for (listnode = g_kobj_list.task_head.next;
1215 listnode != &g_kobj_list.task_head; listnode = listnode->next) {
1216 task = krhino_list_entry(listnode, ktask_t, task_stats_item);
1217 if (0 == strcmp(name, task->task_name)) {
1218 RHINO_CRITICAL_EXIT();
1219 return task;
1220 }
1221 }
1222 #endif
1223 RHINO_CRITICAL_EXIT();
1224
1225 return NULL;
1226 }
1227