1 /*
2 * Copyright (C) 2017-2019 Alibaba Group Holding Limited
3 */
4
5 /******************************************************************************
6 * @file csi_rhino.c
7 * @brief the adapter file for the rhino
8 * @version V1.0
9 * @date 20. July 2016
10 ******************************************************************************/
11
12 #include <csi_kernel.h>
13 #include <k_api.h>
14 #include <stdlib.h>
15 #include <string.h>
16 #include <stdio.h>
17 #include <csi_config.h>
18 #include <soc.h>
19
20 extern uint32_t dump_mmleak(void);
21
22 #define AUTORUN 1
23 #define TMR_ONE_SHOT_DLY 10
24 #define TMR_PERIODIC_PERIOD 10
25 #define RHINO_OS_MS_PERIOD_TICK (1000 / RHINO_CONFIG_TICKS_PER_SECOND)
26
csi_kernel_init(void)27 k_status_t csi_kernel_init(void)
28 {
29 kstat_t ret = krhino_init();
30
31 #ifdef CONFIG_KERNEL_PWR_MGMT
32 extern void cpu_pwrmgmt_init(void);
33 cpu_pwrmgmt_init();
34 #endif
35
36 if (ret == RHINO_SUCCESS) {
37 return 0;
38 } else {
39 return -EPERM;
40 }
41 }
42
csi_kernel_start(void)43 k_status_t csi_kernel_start(void)
44 {
45 kstat_t ret = krhino_start();
46
47 if (ret == RHINO_SUCCESS) {
48 return 0;
49 } else {
50 return -EPERM;
51 }
52 }
53
csi_kernel_get_stat(void)54 k_sched_stat_t csi_kernel_get_stat(void)
55 {
56 kstat_t get = g_sys_stat;
57
58 if (get == RHINO_STOPPED) {
59 return KSCHED_ST_INACTIVE;
60 } else if (get == RHINO_RUNNING) {
61 return KSCHED_ST_RUNNING;
62 }
63
64 return KSCHED_ST_ERROR;
65 }
66
csi_kernel_sched_lock(void)67 int32_t csi_kernel_sched_lock(void)
68 {
69 return -EOPNOTSUPP;
70 }
71
csi_kernel_sched_unlock(void)72 int32_t csi_kernel_sched_unlock(void)
73 {
74 return -EOPNOTSUPP;
75 }
76
csi_kernel_sched_restore_lock(int32_t lock)77 int32_t csi_kernel_sched_restore_lock(int32_t lock)
78 {
79 return -EOPNOTSUPP;
80 }
81
csi_kernel_sched_suspend(void)82 uint32_t csi_kernel_sched_suspend(void)
83 {
84 if (g_sys_stat != RHINO_RUNNING) {
85 return 0;
86 }
87
88 krhino_sched_disable();
89 return 0;
90 }
91
csi_kernel_sched_resume(uint32_t sleep_ticks)92 void csi_kernel_sched_resume(uint32_t sleep_ticks)
93 {
94 if (g_sys_stat != RHINO_RUNNING) {
95 return;
96 }
97
98 krhino_sched_enable();
99 }
100
csi_kernel_task_new(k_task_entry_t task,const char * name,void * arg,k_priority_t prio,uint32_t time_quanta,void * stack,uint32_t stack_size,k_task_handle_t * task_handle)101 k_status_t csi_kernel_task_new(k_task_entry_t task, const char *name, void *arg,
102 k_priority_t prio, uint32_t time_quanta,
103 void *stack, uint32_t stack_size, k_task_handle_t *task_handle)
104 {
105 if ((task_handle == NULL) || (stack_size % sizeof(cpu_stack_t) != 0) || ((stack_size == 0) && (stack == NULL)) || prio <= KPRIO_IDLE || prio > KPRIO_REALTIME7) {
106 return -EINVAL;
107 }
108
109 k_status_t rc = -1;
110 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
111
112 uint8_t prio_trans = RHINO_CONFIG_USER_PRI_MAX - prio;
113
114 csi_kernel_sched_suspend();
115
116 kstat_t ret;
117
118 if (name) {
119 ret = krhino_task_dyn_create((ktask_t **)task_handle, name, arg, prio_trans, time_quanta, stack_size / sizeof(cpu_stack_t), task, AUTORUN);
120 } else {
121 ret = krhino_task_dyn_create((ktask_t **)task_handle, "user_task", arg, prio_trans, time_quanta, stack_size / sizeof(cpu_stack_t), task, AUTORUN);
122 }
123
124 if (ret == RHINO_SUCCESS) {
125 csi_kernel_sched_resume(0);
126 return 0;
127 } else {
128 csi_kernel_sched_resume(0);
129 return -EPERM;
130 }
131
132 #endif
133 return rc;
134
135 }
136
csi_kernel_task_del(k_task_handle_t task_handle)137 k_status_t csi_kernel_task_del(k_task_handle_t task_handle)
138 {
139 if (task_handle == NULL) {
140 return -EINVAL;
141 }
142
143 k_status_t rc = -1;
144 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
145
146 kstat_t ret = krhino_task_dyn_del(task_handle);
147
148 if (ret == RHINO_SUCCESS) {
149 return 0;
150 } else {
151 return -EPERM;
152 }
153
154 #endif
155 return rc;
156 }
157
csi_kernel_task_get_cur(void)158 k_task_handle_t csi_kernel_task_get_cur(void)
159 {
160 ktask_t *ret;
161 ret = g_active_task[cpu_cur_get()];
162 return (k_task_handle_t)ret;
163 }
164
csi_kernel_task_get_stat(k_task_handle_t task_handle)165 k_task_stat_t csi_kernel_task_get_stat(k_task_handle_t task_handle)
166 {
167 if (task_handle == NULL) {
168 return KTASK_ST_ERROR;
169 }
170
171 if (csi_kernel_task_get_cur() == task_handle) {
172 return KTASK_ST_RUNNING;
173 }
174
175 task_stat_t get;
176 ktask_t *handle = (ktask_t *)task_handle;
177 get = handle->task_state;
178
179 switch (get) {
180 case K_PEND:
181 case K_SUSPENDED:
182 case K_PEND_SUSPENDED:
183 case K_SLEEP:
184 case K_SLEEP_SUSPENDED:
185 return KTASK_ST_BLOCKED;
186 break;
187
188 case K_DELETED:
189 return KTASK_ST_TERMINATED;
190 break;
191
192 case K_RDY:
193 return KTASK_ST_READY;
194 break;
195
196 default:
197 return KTASK_ST_ERROR;
198 }
199 }
200
csi_kernel_task_set_prio(k_task_handle_t task_handle,k_priority_t priority)201 k_status_t csi_kernel_task_set_prio(k_task_handle_t task_handle, k_priority_t priority)
202 {
203 if (task_handle == NULL || priority <= KPRIO_IDLE || priority > KPRIO_REALTIME7) {
204 return -EINVAL;
205 }
206
207 uint8_t prio = RHINO_CONFIG_USER_PRI_MAX - priority;
208 uint8_t old;
209 kstat_t ret = krhino_task_pri_change(task_handle, prio, &old);
210
211 if (ret == RHINO_SUCCESS) {
212 return 0;
213 } else {
214 return -EPERM;
215 }
216 }
217
csi_kernel_task_get_prio(k_task_handle_t task_handle)218 k_priority_t csi_kernel_task_get_prio(k_task_handle_t task_handle)
219 {
220 if (task_handle == NULL) {
221 return KPRIO_ERROR;
222 }
223
224 ktask_t *handle = (ktask_t *)task_handle;
225 uint8_t ret = handle->prio;
226
227 if (ret <= RHINO_CONFIG_USER_PRI_MAX) {
228 ret = RHINO_CONFIG_USER_PRI_MAX - ret;
229 } else {
230 ret = RHINO_CONFIG_PRI_MAX - ret;
231 }
232
233 return ret;
234 }
235
csi_kernel_task_get_name(k_task_handle_t task_handle)236 const char *csi_kernel_task_get_name(k_task_handle_t task_handle)
237 {
238 if (task_handle == NULL) {
239 return NULL;
240 }
241
242 ktask_t *handle = (ktask_t *)task_handle;
243
244 return handle->task_name;
245 }
246
csi_kernel_task_suspend(k_task_handle_t task_handle)247 k_status_t csi_kernel_task_suspend(k_task_handle_t task_handle)
248 {
249 if (task_handle == NULL) {
250 return -EINVAL;
251 }
252
253 kstat_t ret = krhino_task_suspend(task_handle);
254
255 if (ret == RHINO_SUCCESS) {
256 return 0;
257 } else {
258 return -EPERM;
259 }
260 }
261
csi_kernel_task_resume(k_task_handle_t task_handle)262 k_status_t csi_kernel_task_resume(k_task_handle_t task_handle)
263 {
264 if (task_handle == NULL) {
265 return -EINVAL;
266 }
267
268 kstat_t ret = krhino_task_resume(task_handle);
269
270 if (ret == RHINO_SUCCESS) {
271 return 0;
272 } else {
273 return -EPERM;
274 }
275 }
276
csi_kernel_task_terminate(k_task_handle_t task_handle)277 k_status_t csi_kernel_task_terminate(k_task_handle_t task_handle)
278 {
279 return csi_kernel_task_del(task_handle);
280 }
281
csi_kernel_task_exit(void)282 void csi_kernel_task_exit(void)
283 {
284 csi_kernel_task_del(csi_kernel_task_get_cur());
285 }
286
csi_kernel_task_yield(void)287 k_status_t csi_kernel_task_yield(void)
288 {
289 kstat_t ret = krhino_task_yield();
290
291 if (ret == RHINO_SUCCESS) {
292 return 0;
293 } else {
294 return -EPERM;
295 }
296 }
297
csi_kernel_task_get_count(void)298 uint32_t csi_kernel_task_get_count(void)
299 {
300 #if (RHINO_CONFIG_SYSTEM_STATS > 0)
301 klist_t *taskhead;
302 klist_t *taskend;
303 klist_t *tmp;
304
305 taskhead = &g_kobj_list.task_head;
306 taskend = taskhead;
307
308 uint32_t ret = 0;
309
310 for (tmp = taskhead->next; tmp != taskend; tmp = tmp->next) {
311 ret ++;
312 }
313
314 return ret;
315 #else
316 return 0;
317 #endif
318 }
319
csi_kernel_task_get_stack_size(k_task_handle_t task_handle)320 uint32_t csi_kernel_task_get_stack_size(k_task_handle_t task_handle)
321 {
322 if (task_handle == NULL) {
323 return 0;
324 }
325
326 ktask_t *handle = (ktask_t *)task_handle;
327 return sizeof(cpu_stack_t) * handle->stack_size;
328 }
329
csi_kernel_task_get_stack_space(k_task_handle_t task_handle)330 uint32_t csi_kernel_task_get_stack_space(k_task_handle_t task_handle)
331 {
332 if (task_handle == NULL) {
333 return 0;
334 }
335
336 size_t stack_free;
337 kstat_t ret = krhino_task_stack_min_free(task_handle, &stack_free);
338
339 if (ret == RHINO_SUCCESS) {
340 return (uint32_t)(sizeof(cpu_stack_t) * stack_free);
341 } else {
342 return 0;
343 }
344 }
345
csi_kernel_task_list(k_task_handle_t * task_array,uint32_t array_items)346 uint32_t csi_kernel_task_list(k_task_handle_t *task_array, uint32_t array_items)
347 {
348 if (task_array == NULL || array_items == 0) {
349 return 0;
350 }
351
352 uint32_t real_tsk_num = 0;
353 #if (RHINO_CONFIG_SYSTEM_STATS > 0)
354 klist_t *taskhead;
355 klist_t *taskend;
356 klist_t *tmp;
357 ktask_t *task;
358
359 k_task_handle_t *tk_tmp = task_array;
360 taskhead = &g_kobj_list.task_head;
361 taskend = taskhead;
362
363 #ifdef CONFIG_BACKTRACE
364 uint32_t task_free;
365 size_t irq_flags;
366 #endif
367 #ifdef CONFIG_STACK_GUARD
368 int stack_flags;
369 #endif
370
371 for (tmp = taskhead->next; tmp != taskend; tmp = tmp->next) {
372 real_tsk_num ++;
373 task = krhino_list_entry(tmp, ktask_t, task_stats_item);
374 #ifdef CONFIG_BACKTRACE
375 krhino_task_stack_min_free(task, &task_free);
376 printf("\n%s:\n\t state %d, pri %d, stack: total %p, free %p\n",
377 task->task_name, task->task_state, task->prio, sizeof(cpu_stack_t) * task->stack_size, sizeof(cpu_stack_t) * task_free);
378 #endif
379 }
380
381 if (array_items < real_tsk_num) {
382 real_tsk_num = array_items;
383 }
384
385 for (tmp = taskhead->next; tmp != taskend && real_tsk_num >= 1; tmp = tmp->next) {
386 task = krhino_list_entry(tmp, ktask_t, task_stats_item);
387 *tk_tmp = task;
388 tk_tmp ++;
389 }
390
391 #ifdef CONFIG_BACKTRACE
392 irq_flags = cpu_intrpt_save();
393
394 #ifdef CONFIG_STACK_GUARD
395 extern int stack_guard_save(void);
396 stack_flags = stack_guard_save();
397 #endif
398
399 for (tmp = taskhead->next; tmp != taskend; tmp = tmp->next) {
400 task = krhino_list_entry(tmp, ktask_t, task_stats_item);
401 krhino_task_stack_min_free(task, &task_free);
402
403 printf("\n%s:\n", task->task_name);
404 extern int csky_task_backtrace(void *stack, char *buf, int len);
405 csky_task_backtrace(task->task_stack, NULL, 0);
406 }
407
408 #ifdef CONFIG_STACK_GUARD
409 extern void stack_guard_restore(int value);
410 stack_guard_restore(stack_flags);
411 #endif
412
413 cpu_intrpt_restore(irq_flags);
414
415 #endif /* CONFIG_BACKTRACE */
416 #endif /* RHINO_CONFIG_SYSTEM_STATS */
417
418 return real_tsk_num;
419 }
420
csi_kernel_intrpt_enter(void)421 k_status_t csi_kernel_intrpt_enter(void)
422 {
423 kstat_t ret = krhino_intrpt_enter();
424
425 if (ret == RHINO_SUCCESS) {
426 return 0;
427 } else {
428 return -EPERM;
429 }
430
431 return 0;
432 }
433
csi_kernel_intrpt_exit(void)434 k_status_t csi_kernel_intrpt_exit(void)
435 {
436 krhino_intrpt_exit();
437 return 0;
438 }
439
440
csi_kernel_delay(uint32_t ticks)441 k_status_t csi_kernel_delay(uint32_t ticks)
442 {
443 kstat_t ret = krhino_task_sleep(ticks);
444
445 if (ret == RHINO_SUCCESS) {
446 return 0;
447 } else {
448 return -EPERM;
449 }
450 }
451
csi_kernel_delay_until(uint64_t ticks)452 k_status_t csi_kernel_delay_until(uint64_t ticks)
453 {
454 return -EOPNOTSUPP;
455 }
456
csi_kernel_tick2ms(uint32_t ticks)457 uint64_t csi_kernel_tick2ms(uint32_t ticks)
458 {
459 return ((uint64_t)ticks * RHINO_OS_MS_PERIOD_TICK);
460 }
461
csi_kernel_ms2tick(uint32_t ms)462 uint64_t csi_kernel_ms2tick(uint32_t ms)
463 {
464 if (ms < RHINO_OS_MS_PERIOD_TICK) {
465 return 0;
466 }
467
468 return (((uint64_t)ms) / RHINO_OS_MS_PERIOD_TICK);
469 }
470
csi_kernel_delay_ms(uint32_t ms)471 k_status_t csi_kernel_delay_ms(uint32_t ms)
472 {
473 uint32_t ms_get = ms;
474
475 if ((ms < RHINO_OS_MS_PERIOD_TICK) && (ms != 0)) {
476 ms_get = RHINO_OS_MS_PERIOD_TICK;
477 }
478
479 uint64_t ticks = csi_kernel_ms2tick(ms_get);
480 kstat_t ret = krhino_task_sleep(ticks);
481
482 if (ret == RHINO_SUCCESS) {
483 return 0;
484 } else {
485 return -EPERM;
486 }
487 }
488
csi_kernel_get_ticks(void)489 uint64_t csi_kernel_get_ticks(void)
490 {
491 return (uint64_t)krhino_sys_tick_get();
492 }
493
csi_kernel_suspend_tick(void)494 uint64_t csi_kernel_suspend_tick(void)
495 {
496 // return next_task_wake_tick_get();
497 return 0;
498 }
499
csi_kernel_update_tick(uint32_t ms)500 void csi_kernel_update_tick(uint32_t ms)
501 {
502 extern uint64_t g_sys_tick_count;
503 uint32_t ticks = ms * RHINO_CONFIG_TICKS_PER_SECOND / 1000;
504
505 CPSR_ALLOC();
506
507 RHINO_CPU_INTRPT_DISABLE();
508 g_sys_tick_count += ticks;
509
510 RHINO_CPU_INTRPT_ENABLE();
511 }
512
csi_kernel_get_tick_freq(void)513 uint32_t csi_kernel_get_tick_freq(void)
514 {
515 return RHINO_CONFIG_TICKS_PER_SECOND;
516 }
517
csi_kernel_get_systimer_freq(void)518 uint32_t csi_kernel_get_systimer_freq(void)
519 {
520 return drv_get_sys_freq();
521 }
522
523 typedef struct tmr_arg {
524 void *arg;
525 k_timer_cb_t func;
526 } tmr_arg_t;
527
tmr_adapt_cb(void * timer,void * arg)528 static void tmr_adapt_cb(void *timer, void *arg)
529 {
530 ktimer_t *get = (ktimer_t *)timer;
531 tmr_arg_t *arg_above = (tmr_arg_t *)(get->timer_cb_arg);
532
533 if (arg_above->func) {
534 arg_above->func(arg_above->arg);
535 }
536
537 return;
538 }
539
csi_kernel_timer_new(k_timer_cb_t func,k_timer_type_t type,void * arg)540 k_timer_handle_t csi_kernel_timer_new(k_timer_cb_t func, k_timer_type_t type, void *arg)
541 {
542 if (type < 0 || type > 1 || func == NULL) {
543 return NULL;
544 }
545
546 tick_t first = TMR_ONE_SHOT_DLY;
547 tick_t round;
548
549 if (type == KTIMER_TYPE_ONCE) {
550 round = 0;
551 } else {
552 round = TMR_PERIODIC_PERIOD;
553 }
554
555 tmr_arg_t *get_arg = (tmr_arg_t *)malloc(sizeof(tmr_arg_t));
556
557 if (get_arg == NULL) {
558 return NULL;
559 }
560
561 ktimer_t *tmr;
562 get_arg->arg = arg;
563 get_arg->func = func;
564
565 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
566 kstat_t ret = krhino_timer_dyn_create((ktimer_t **)&tmr, "UserTmr", (timer_cb_t)tmr_adapt_cb, first, round, get_arg, 0);
567
568 if (ret == RHINO_SUCCESS) {
569 return tmr;
570 } else {
571 free((void *)get_arg);
572 return NULL;
573 }
574
575 #else
576 return NULL;
577 #endif
578 }
579
csi_kernel_timer_del(k_timer_handle_t timer_handle)580 k_status_t csi_kernel_timer_del(k_timer_handle_t timer_handle)
581 {
582 if (timer_handle == NULL) {
583 return -EINVAL;
584 }
585
586 k_status_t rc = -1;
587 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
588
589 kstat_t ret = krhino_timer_dyn_del((ktimer_t *)timer_handle);
590
591 if (ret == RHINO_SUCCESS) {
592 free(((ktimer_t *)timer_handle)->timer_cb_arg);
593 return 0;
594 } else {
595 return -EPERM;
596 }
597
598 #endif
599 return rc;
600 }
601
csi_kernel_timer_start(k_timer_handle_t timer_handle,uint32_t ticks)602 k_status_t csi_kernel_timer_start(k_timer_handle_t timer_handle, uint32_t ticks)
603 {
604 if (timer_handle == NULL || ticks == 0) {
605 return -EINVAL;
606 }
607
608 tick_t round;
609 ktimer_t *handle = (ktimer_t *)timer_handle;
610
611 round = handle->round_ticks;
612
613 tick_t tr;
614 tick_t tf = ticks;
615
616 if (round != 0) {
617 tr = ticks;
618 } else {
619 tr = 0;
620 }
621
622 kstat_t ret1 = krhino_timer_change(handle, tf, tr);
623
624 if (ret1 == RHINO_SUCCESS) {
625 kstat_t ret2 = krhino_timer_start(handle);
626
627 if (ret2 == RHINO_SUCCESS) {
628 return 0;
629 } else {
630 return -EPERM;
631 }
632 } else {
633 return -EPERM;
634 }
635 }
636
csi_kernel_timer_stop(k_timer_handle_t timer_handle)637 k_status_t csi_kernel_timer_stop(k_timer_handle_t timer_handle)
638 {
639 if (timer_handle == NULL) {
640 return -EINVAL;
641 }
642
643 ktimer_t *handle = (ktimer_t *)timer_handle;
644
645 kstat_t ret = krhino_timer_stop(handle);
646
647 if (ret == RHINO_SUCCESS) {
648 return 0;
649 } else {
650 return -EPERM;
651 }
652 }
653
csi_kernel_timer_get_stat(k_timer_handle_t timer_handle)654 k_timer_stat_t csi_kernel_timer_get_stat(k_timer_handle_t timer_handle)
655 {
656 if (timer_handle == NULL) {
657 return KTIMER_ST_INACTIVE;
658 }
659
660 ktimer_t *handle = (ktimer_t *)(timer_handle);
661 k_timer_state_t get = handle->timer_state;
662
663 if (get == TIMER_DEACTIVE) {
664 return KTIMER_ST_INACTIVE;
665 } else {
666 return KTIMER_ST_ACTIVE;
667 }
668 }
669
csi_kernel_event_new(void)670 k_event_handle_t csi_kernel_event_new(void)
671 {
672 #if (RHINO_CONFIG_EVENT_FLAG > 0)
673 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
674 kevent_t *event_handle;
675 kstat_t ret = krhino_event_dyn_create((kevent_t **)&event_handle, "UserEvent", 0);
676
677 if (ret == RHINO_SUCCESS) {
678 return event_handle;
679 } else {
680 return NULL;
681 }
682
683 #else
684 return NULL:
685 #endif
686
687 #else
688 return NULL;
689 #endif
690
691 }
692
csi_kernel_event_del(k_event_handle_t ev_handle)693 k_status_t csi_kernel_event_del(k_event_handle_t ev_handle)
694 {
695 #if (RHINO_CONFIG_EVENT_FLAG > 0)
696
697 if (ev_handle == NULL) {
698 return -EINVAL;
699 }
700
701 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
702 kstat_t ret = krhino_event_dyn_del(ev_handle);
703
704 if (ret == RHINO_SUCCESS) {
705 return 0;
706 } else {
707 return -EPERM;
708 }
709
710 #else
711 return -EPERM;
712 #endif
713
714 #else
715 return -EOPNOTSUPP;
716 #endif
717 }
718
csi_kernel_event_set(k_event_handle_t ev_handle,uint32_t flags,uint32_t * ret_flags)719 k_status_t csi_kernel_event_set(k_event_handle_t ev_handle, uint32_t flags, uint32_t *ret_flags)
720 {
721 #if (RHINO_CONFIG_EVENT_FLAG > 0)
722
723 if (ev_handle == NULL || ret_flags == NULL) {
724 return -EINVAL;
725 }
726
727 kstat_t ret = krhino_event_set(ev_handle, flags, RHINO_OR);
728
729 if (ret == RHINO_SUCCESS) {
730 kevent_t *handle = (kevent_t *)ev_handle;
731 *ret_flags = handle->flags;
732 return 0;
733 } else {
734 return -EPERM;
735 }
736
737 #else
738 return -EOPNOTSUPP;
739 #endif
740 }
741
csi_kernel_event_clear(k_event_handle_t ev_handle,uint32_t flags,uint32_t * ret_flags)742 k_status_t csi_kernel_event_clear(k_event_handle_t ev_handle, uint32_t flags, uint32_t *ret_flags)
743 {
744 return -EOPNOTSUPP;
745 }
746
csi_kernel_event_get(k_event_handle_t ev_handle,uint32_t * ret_flags)747 k_status_t csi_kernel_event_get(k_event_handle_t ev_handle, uint32_t *ret_flags)
748 {
749 #if (RHINO_CONFIG_EVENT_FLAG > 0)
750
751 if (ev_handle == NULL || ret_flags == NULL) {
752 return -EINVAL;
753 }
754
755 kevent_t *handle = (kevent_t *)ev_handle;
756 *ret_flags = handle->flags;
757 return 0;
758 #else
759 return -EOPNOTSUPP;
760 #endif
761 }
762
csi_kernel_event_wait(k_event_handle_t ev_handle,uint32_t flags,k_event_opt_t options,uint8_t clr_on_exit,uint32_t * actl_flags,int64_t timeout)763 k_status_t csi_kernel_event_wait(k_event_handle_t ev_handle, uint32_t flags,
764 k_event_opt_t options, uint8_t clr_on_exit,
765 uint32_t *actl_flags, int64_t timeout)
766 {
767 #if (RHINO_CONFIG_EVENT_FLAG > 0)
768
769 if (ev_handle == NULL || actl_flags == NULL
770 || ((clr_on_exit != 0) && (clr_on_exit != 1))) {
771 return -EINVAL;
772 }
773
774 if (options == KEVENT_OPT_CLR_ANY || options == KEVENT_OPT_CLR_ALL) {
775 return -EOPNOTSUPP;
776 }
777
778 uint8_t opt = 0;
779
780 if (options == KEVENT_OPT_SET_ANY) {
781 if (clr_on_exit == 1) {
782 opt = RHINO_OR_CLEAR;
783 } else {
784 opt = RHINO_OR;
785 }
786 } else if (options == KEVENT_OPT_SET_ALL) {
787 if (clr_on_exit == 1) {
788 opt = RHINO_AND_CLEAR;
789 } else {
790 opt = RHINO_AND;
791 }
792 }
793
794 tick_t t;
795
796 if (timeout < 0) {
797 t = RHINO_WAIT_FOREVER;
798 } else {
799 t = timeout;
800 }
801
802 kstat_t ret = krhino_event_get(ev_handle, flags, opt, actl_flags, t);
803
804 if (ret == RHINO_SUCCESS) {
805 return 0;
806 } else if (ret == RHINO_BLK_TIMEOUT) {
807 return -ETIMEDOUT;
808 } else {
809 return -EPERM;
810 }
811
812 #else
813 return -EOPNOTSUPP;
814 #endif
815 }
816
csi_kernel_mutex_new(void)817 k_mutex_handle_t csi_kernel_mutex_new(void)
818 {
819 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
820 kmutex_t *mutex_handle;
821 kstat_t ret = krhino_mutex_dyn_create((kmutex_t **)&mutex_handle, "UserMutex");
822
823 if (ret == RHINO_SUCCESS) {
824 return mutex_handle;
825 } else {
826 return NULL;
827 }
828
829 #else
830 return NULL;
831 #endif
832 }
833
csi_kernel_mutex_del(k_mutex_handle_t mutex_handle)834 k_status_t csi_kernel_mutex_del(k_mutex_handle_t mutex_handle)
835 {
836 if (mutex_handle == NULL) {
837 return -EINVAL;
838 }
839
840 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
841 kstat_t ret = krhino_mutex_dyn_del(mutex_handle);
842
843 if (ret == RHINO_SUCCESS) {
844 return 0;
845 } else {
846 return -EPERM;
847 }
848
849 #else
850 return -EPERM;
851 #endif
852 }
853
csi_kernel_mutex_lock(k_mutex_handle_t mutex_handle,int64_t timeout)854 k_status_t csi_kernel_mutex_lock(k_mutex_handle_t mutex_handle, int64_t timeout)
855 {
856 if (mutex_handle == NULL) {
857 return -EINVAL;
858 }
859
860 tick_t t;
861
862 if (timeout < 0) {
863 t = RHINO_WAIT_FOREVER;
864 } else {
865 t = timeout;
866 }
867
868 kstat_t ret = krhino_mutex_lock(mutex_handle, t);
869
870 if (ret == RHINO_SUCCESS || ret == RHINO_MUTEX_OWNER_NESTED) {
871 return 0;
872 } else if (ret == RHINO_BLK_TIMEOUT) {
873 return -ETIMEDOUT;
874 } else {
875 return -EPERM;
876 }
877 }
878
csi_kernel_mutex_unlock(k_mutex_handle_t mutex_handle)879 k_status_t csi_kernel_mutex_unlock(k_mutex_handle_t mutex_handle)
880 {
881 if (mutex_handle == NULL) {
882 return -EINVAL;
883 }
884
885 kstat_t ret = krhino_mutex_unlock(mutex_handle);
886
887 if (ret == RHINO_SUCCESS || ret == RHINO_MUTEX_OWNER_NESTED) {
888 return 0;
889 } else {
890 return -EPERM;
891 }
892 }
893
csi_kernel_mutex_get_owner(k_mutex_handle_t mutex_handle)894 k_task_handle_t csi_kernel_mutex_get_owner(k_mutex_handle_t mutex_handle)
895 {
896 if (mutex_handle == NULL) {
897 return NULL;
898 }
899
900 kmutex_t *handle = (kmutex_t *)mutex_handle;
901 return handle->mutex_task;
902 }
903
csi_kernel_sem_new(int32_t max_count,int32_t initial_count)904 k_sem_handle_t csi_kernel_sem_new(int32_t max_count, int32_t initial_count)
905 {
906 if (max_count <= 0 || initial_count < 0) {
907 return NULL;
908 }
909
910 if (max_count < initial_count) {
911 return NULL;
912 }
913
914 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
915 ksem_t *sem_handle;
916 kstat_t ret = krhino_sem_dyn_create((ksem_t **)&sem_handle, "UserSem", initial_count);
917
918 if (ret == RHINO_SUCCESS) {
919 return sem_handle;
920 } else {
921 return NULL;
922 }
923
924 #else
925 return NULL;
926 #endif
927 }
928
csi_kernel_sem_del(k_sem_handle_t sem_handle)929 k_status_t csi_kernel_sem_del(k_sem_handle_t sem_handle)
930 {
931 if (sem_handle == NULL) {
932 return -EINVAL;
933 }
934
935 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
936 kstat_t ret = krhino_sem_dyn_del(sem_handle);
937
938 if (ret == RHINO_SUCCESS) {
939 return 0;
940 } else {
941 return -EPERM;
942 }
943
944 #else
945 return -EPERM;
946 #endif
947 }
948
csi_kernel_sem_wait(k_sem_handle_t sem_handle,int64_t timeout)949 k_status_t csi_kernel_sem_wait(k_sem_handle_t sem_handle, int64_t timeout)
950 {
951 if (sem_handle == NULL) {
952 return -EINVAL;
953 }
954
955 tick_t t;
956
957 if (timeout < 0) {
958 t = RHINO_WAIT_FOREVER;
959 } else {
960 t = timeout;
961 }
962
963 kstat_t ret = krhino_sem_take(sem_handle, t);
964
965 if (ret == RHINO_SUCCESS) {
966 return 0;
967 } else if (ret == RHINO_BLK_TIMEOUT) {
968 return -ETIMEDOUT;
969 } else {
970 return -EPERM;
971 }
972 }
973
csi_kernel_sem_post(k_sem_handle_t sem_handle)974 k_status_t csi_kernel_sem_post(k_sem_handle_t sem_handle)
975 {
976 if (sem_handle == NULL) {
977 return -EINVAL;
978 }
979
980 kstat_t ret = krhino_sem_give(sem_handle);
981
982 if (ret == RHINO_SUCCESS) {
983 return 0;
984 } else {
985 return -EPERM;
986 }
987 }
988
csi_kernel_sem_get_count(k_sem_handle_t sem_handle)989 int32_t csi_kernel_sem_get_count(k_sem_handle_t sem_handle)
990 {
991 if (sem_handle == NULL) {
992 return -EINVAL;
993 }
994
995 sem_count_t cnt;
996 kstat_t ret = krhino_sem_count_get(sem_handle, &cnt);
997
998 if (ret == RHINO_SUCCESS) {
999 return (int32_t)cnt;
1000 } else {
1001 return -EPERM;
1002 }
1003 }
1004
csi_kernel_mpool_new(void * p_addr,int32_t block_count,int32_t block_size)1005 k_mpool_handle_t csi_kernel_mpool_new(void *p_addr, int32_t block_count, int32_t block_size)
1006 {
1007 if (p_addr == NULL || block_count < 0 || block_size <= 0 || (block_size % 4 != 0)) {
1008 return NULL;
1009 }
1010
1011 #if (RHINO_CONFIG_MM_BLK > 0)
1012 mblk_pool_t *handle = (mblk_pool_t *)malloc(sizeof(mblk_pool_t));
1013
1014 if (handle == NULL) {
1015 return NULL;
1016 }
1017
1018 kstat_t ret = krhino_mblk_pool_init(handle, "UserMp", p_addr, block_count * block_size);
1019
1020 if (ret == RHINO_SUCCESS) {
1021 return handle;
1022 } else {
1023 return NULL;
1024 }
1025
1026 #else
1027 return NULL;
1028 #endif
1029 }
1030
csi_kernel_mpool_del(k_mpool_handle_t mp_handle)1031 k_status_t csi_kernel_mpool_del(k_mpool_handle_t mp_handle)
1032 {
1033 if (mp_handle == NULL) {
1034 return -EINVAL;
1035 }
1036
1037 memset(mp_handle, 0, sizeof(mblk_pool_t));
1038 free(mp_handle);
1039 return 0;
1040 }
1041
csi_kernel_mpool_alloc(k_mpool_handle_t mp_handle,uint32_t size)1042 void *csi_kernel_mpool_alloc(k_mpool_handle_t mp_handle, uint32_t size)
1043 {
1044 if (mp_handle == NULL) {
1045 return NULL;
1046 }
1047
1048 #if (RHINO_CONFIG_MM_BLK > 0)
1049 return krhino_mblk_alloc(mp_handle, size);
1050 #else
1051 return NULL;
1052 #endif
1053 }
1054
csi_kernel_mpool_free(k_mpool_handle_t mp_handle,void * block)1055 k_status_t csi_kernel_mpool_free(k_mpool_handle_t mp_handle, void *block)
1056 {
1057 if (mp_handle == NULL || block == NULL) {
1058 return -EINVAL;
1059 }
1060
1061 #if (RHINO_CONFIG_MM_BLK > 0)
1062 kstat_t ret = krhino_mblk_free(mp_handle, block);
1063
1064 if (ret == RHINO_SUCCESS) {
1065 return 0;
1066 } else {
1067 return -EPERM;
1068 }
1069
1070 #else
1071 return -EPERM;
1072 #endif
1073 }
1074
csi_kernel_mpool_get_count(k_mpool_handle_t mp_handle)1075 int32_t csi_kernel_mpool_get_count(k_mpool_handle_t mp_handle)
1076 {
1077 return 0;
1078 }
1079
csi_kernel_mpool_get_capacity(k_mpool_handle_t mp_handle)1080 uint32_t csi_kernel_mpool_get_capacity(k_mpool_handle_t mp_handle)
1081 {
1082 return 0;
1083 }
1084
csi_kernel_mpool_get_block_size(k_mpool_handle_t mp_handle)1085 uint32_t csi_kernel_mpool_get_block_size(k_mpool_handle_t mp_handle)
1086 {
1087 return 0;
1088 }
1089
1090 typedef struct mq_adapter {
1091 kbuf_queue_t *buf_q;
1092 int32_t msg_size;
1093 int32_t msg_count;
1094 } mq_adapter_t;
csi_kernel_msgq_new(int32_t msg_count,int32_t msg_size)1095 k_msgq_handle_t csi_kernel_msgq_new(int32_t msg_count, int32_t msg_size)
1096 {
1097 if (msg_count <= 0 || msg_size <= 0) {
1098 return NULL;
1099 }
1100
1101 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
1102 mq_adapter_t *handle = (mq_adapter_t *)malloc(sizeof(mq_adapter_t));
1103
1104 if (handle == NULL) {
1105 return NULL;
1106 }
1107
1108 kstat_t ret = krhino_buf_queue_dyn_create(&(handle->buf_q), "UserMsgQ", msg_size * (msg_count + 1), msg_size);
1109
1110 if (ret == RHINO_SUCCESS) {
1111 handle->msg_count = msg_count;
1112 handle->msg_size = msg_size;
1113 return (k_msgq_handle_t)handle;
1114 } else {
1115 free(handle);
1116 return NULL;
1117 }
1118
1119 #else
1120 return NULL;
1121 #endif
1122 }
1123
csi_kernel_msgq_del(k_msgq_handle_t mq_handle)1124 k_status_t csi_kernel_msgq_del(k_msgq_handle_t mq_handle)
1125 {
1126 if (!mq_handle) {
1127 return -EINVAL;
1128 }
1129
1130 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
1131 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1132 kstat_t ret = krhino_buf_queue_dyn_del(handle->buf_q);
1133
1134 if (ret == RHINO_SUCCESS) {
1135 free(mq_handle);
1136 return 0;
1137 } else {
1138 return -EPERM;
1139 }
1140
1141 #else
1142 return -EPERM;
1143 #endif
1144 }
1145
csi_kernel_msgq_put(k_msgq_handle_t mq_handle,const void * msg_ptr,uint8_t front_or_back,int64_t timeout)1146 k_status_t csi_kernel_msgq_put(k_msgq_handle_t mq_handle, const void *msg_ptr, uint8_t front_or_back, int64_t timeout)
1147 {
1148 k_status_t retval = -EPERM;
1149
1150 if ((!mq_handle) || (msg_ptr == NULL) || ((front_or_back != 0) && (front_or_back != 1))) {
1151 return -EINVAL;
1152 }
1153
1154 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1155
1156 if (front_or_back == 0) {
1157 kstat_t ret = krhino_buf_queue_send(handle->buf_q, (void *)msg_ptr, handle->msg_size);
1158
1159 if (ret == RHINO_SUCCESS) {
1160 retval = 0 ;
1161 } else {
1162 retval = -EPERM;
1163 }
1164 } else if (front_or_back == 1) {
1165 kstat_t ret = krhino_buf_queue_send(handle->buf_q, (void *)msg_ptr, handle->msg_size);
1166
1167 if (ret == RHINO_SUCCESS) {
1168 retval = 0;
1169 } else {
1170 retval = -EPERM;
1171 }
1172 }
1173
1174 return retval;
1175 }
1176
1177
csi_kernel_msgq_get(k_msgq_handle_t mq_handle,void * msg_ptr,int64_t timeout)1178 k_status_t csi_kernel_msgq_get(k_msgq_handle_t mq_handle, void *msg_ptr, int64_t timeout)
1179 {
1180 if (mq_handle == NULL || msg_ptr == NULL) {
1181 return -EINVAL;
1182 }
1183
1184 tick_t t;
1185
1186 if (timeout < 0) {
1187 t = RHINO_WAIT_FOREVER;
1188 } else {
1189 t = timeout;
1190 }
1191
1192 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1193 size_t size;
1194 kstat_t ret = krhino_buf_queue_recv(handle->buf_q, t, msg_ptr, &size);
1195
1196 if (ret == RHINO_SUCCESS) {
1197 return 0;
1198 } else if (ret == RHINO_BLK_TIMEOUT) {
1199 return -ETIMEDOUT;
1200 } else {
1201 return -EPERM;
1202 }
1203 }
1204
csi_kernel_msgq_get_count(k_msgq_handle_t mq_handle)1205 int32_t csi_kernel_msgq_get_count(k_msgq_handle_t mq_handle)
1206 {
1207 if (mq_handle == NULL) {
1208 return -EINVAL;
1209 }
1210
1211 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1212 kbuf_queue_info_t info;
1213 kstat_t ret = krhino_buf_queue_info_get(handle->buf_q, &info);
1214
1215 if (ret == RHINO_SUCCESS) {
1216 int32_t cnt = info.cur_num;
1217 return cnt;
1218 } else {
1219 return -EPERM;
1220 }
1221 }
1222
csi_kernel_msgq_get_capacity(k_msgq_handle_t mq_handle)1223 uint32_t csi_kernel_msgq_get_capacity(k_msgq_handle_t mq_handle)
1224 {
1225 if (mq_handle == NULL) {
1226 return 0;
1227 }
1228
1229 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1230 return handle->msg_count;
1231 }
1232
csi_kernel_msgq_get_msg_size(k_msgq_handle_t mq_handle)1233 uint32_t csi_kernel_msgq_get_msg_size(k_msgq_handle_t mq_handle)
1234 {
1235 if (mq_handle == NULL) {
1236 return 0;
1237 }
1238
1239 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1240 return handle->buf_q->max_msg_size;
1241 }
1242
csi_kernel_msgq_flush(k_msgq_handle_t mq_handle)1243 k_status_t csi_kernel_msgq_flush(k_msgq_handle_t mq_handle)
1244 {
1245 if (mq_handle == NULL) {
1246 return -EINVAL;
1247 }
1248
1249 mq_adapter_t *handle = (mq_adapter_t *)mq_handle;
1250 kstat_t ret = krhino_buf_queue_flush(handle->buf_q);
1251
1252 if (ret == RHINO_SUCCESS) {
1253 return 0;
1254 } else {
1255 return -EPERM;
1256 }
1257 }
1258
csi_kernel_malloc(size_t size,void * caller)1259 void *csi_kernel_malloc(size_t size, void *caller)
1260 {
1261 void *ret;
1262 (void)caller;
1263
1264 if (size < 1) {
1265 return NULL;
1266 }
1267
1268 csi_kernel_sched_suspend();
1269 ret = krhino_mm_alloc(size);
1270 csi_kernel_sched_resume(0);
1271 return ret;
1272 }
1273
csi_kernel_free(void * ptr,void * caller)1274 void csi_kernel_free(void *ptr, void *caller)
1275 {
1276 (void)caller;
1277 csi_kernel_sched_suspend();
1278 krhino_mm_free(ptr);
1279 csi_kernel_sched_resume(0);
1280 }
1281
csi_kernel_realloc(void * ptr,size_t size,void * caller)1282 void *csi_kernel_realloc(void *ptr, size_t size, void *caller)
1283 {
1284 void *new_ptr;
1285 (void)caller;
1286
1287 new_ptr = krhino_mm_realloc(ptr, size);
1288
1289 return new_ptr;
1290 }
1291
csi_kernel_get_mminfo(int32_t * total,int32_t * used,int32_t * free,int32_t * peak)1292 k_status_t csi_kernel_get_mminfo(int32_t *total, int32_t *used, int32_t *free, int32_t *peak)
1293 {
1294 *total = g_kmm_head->used_size + g_kmm_head->free_size;
1295 *used = g_kmm_head->used_size;
1296 *free = g_kmm_head->free_size;
1297 *peak = g_kmm_head->maxused_size;
1298
1299 return 0;
1300 }
1301
csi_kernel_mm_dump(void)1302 k_status_t csi_kernel_mm_dump(void)
1303 {
1304 #if (RHINO_CONFIG_MM_DEBUG > 0u)
1305 dumpsys_mm_info_func(KMM_ERROR_UNLOCKED);
1306 #endif
1307
1308 return 0;
1309 }
1310