1 /*
2  * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3  */
4 
5 #include <stdio.h>
6 
7 #include "k_api.h"
8 
9 #if (RHINO_CONFIG_KOBJ_LIST > 0)
kobj_list_init(void)10 void kobj_list_init(void)
11 {
12     klist_init(&(g_kobj_list.task_head));
13     klist_init(&(g_kobj_list.mutex_head));
14 
15 #if (RHINO_CONFIG_SEM > 0)
16     klist_init(&(g_kobj_list.sem_head));
17 #endif
18 
19 #if (RHINO_CONFIG_QUEUE > 0)
20     klist_init(&(g_kobj_list.queue_head));
21 #endif
22 
23 #if (RHINO_CONFIG_BUF_QUEUE > 0)
24     klist_init(&(g_kobj_list.buf_queue_head));
25 #endif
26 
27 #if (RHINO_CONFIG_EVENT_FLAG > 0)
28     klist_init(&(g_kobj_list.event_head));
29 #endif
30 }
31 #endif
32 
33 #if (RHINO_CONFIG_TASK_STACK_OVF_CHECK > 0)
34 #if (RHINO_CONFIG_CPU_STACK_DOWN > 0)
krhino_stack_ovf_check(void)35 void krhino_stack_ovf_check(void)
36 {
37     ktask_t     *cur;
38     cpu_stack_t *stack_start;
39     uint8_t      i;
40 
41     cur = g_active_task[cpu_cur_get()];
42     stack_start = cur->task_stack_base;
43 
44     for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) {
45         if (*stack_start++ != RHINO_TASK_STACK_OVF_MAGIC) {
46             k_err_proc(RHINO_TASK_STACK_OVF);
47         }
48     }
49 
50     if ((cpu_stack_t *)(cur->task_stack) < stack_start) {
51         k_err_proc(RHINO_TASK_STACK_OVF);
52     }
53 
54 #if (RHINO_CONFIG_USER_SPACE > 0)
55     if (cur->pid == 0) {
56         return;
57     }
58 
59     stack_start = cur->task_ustack_base;
60 
61     for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) {
62         if (*stack_start++ != RHINO_TASK_STACK_OVF_MAGIC) {
63             k_err_proc(RHINO_TASK_STACK_OVF);
64         }
65     }
66 
67     if ((cpu_stack_t *)(cur->task_ustack) < stack_start) {
68         k_err_proc(RHINO_TASK_STACK_OVF);
69     }
70 #endif
71 }
72 
73 #else
74 
krhino_stack_ovf_check(void)75 void krhino_stack_ovf_check(void)
76 {
77     ktask_t     *cur;
78     cpu_stack_t *stack_start;
79     cpu_stack_t *stack_end;
80     uint8_t      i;
81 
82     cur = g_active_task[cpu_cur_get()];
83 
84     stack_start = cur->task_stack_base;
85     stack_end   = stack_start + cur->stack_size - RHINO_CONFIG_STK_CHK_WORDS;
86 
87     for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) {
88         if (*stack_end++ != RHINO_TASK_STACK_OVF_MAGIC) {
89             k_err_proc(RHINO_TASK_STACK_OVF);
90         }
91     }
92 
93     if ((cpu_stack_t *)(cur->task_stack) > stack_end) {
94         k_err_proc(RHINO_TASK_STACK_OVF);
95     }
96 
97 #if (RHINO_CONFIG_USER_SPACE > 0)
98     if (cur->pid == 0) {
99         return;
100     }
101 
102     stack_start = cur->task_ustack_base;
103     stack_end   = stack_start + cur->ustack_size - RHINO_CONFIG_STK_CHK_WORDS;
104 
105     for (i = 0; i < RHINO_CONFIG_STK_CHK_WORDS; i++) {
106         if (*stack_end++ != RHINO_TASK_STACK_OVF_MAGIC) {
107             k_err_proc(RHINO_TASK_STACK_OVF);
108         }
109     }
110 
111     if ((cpu_stack_t *)(cur->task_ustack) > stack_end) {
112         k_err_proc(RHINO_TASK_STACK_OVF);
113     }
114 #endif
115 }
116 #endif
117 #endif
118 
119 #if (RHINO_CONFIG_SYS_STATS > 0)
krhino_task_sched_stats_reset(void)120 void krhino_task_sched_stats_reset(void)
121 {
122     lr_timer_t cur_time;
123     uint32_t   i;
124 
125     g_cur_intrpt_disable_max_time = 0;
126     g_cur_sched_disable_max_time  = 0;
127 
128     /* system first task starting time should be measured otherwise not correct */
129     cur_time = LR_COUNT_GET();
130     for (i = 0; i < RHINO_CONFIG_CPU_NUM; i++) {
131         g_preferred_ready_task[i]->task_time_start = cur_time;
132     }
133 }
134 
krhino_task_sched_stats_get(void)135 void krhino_task_sched_stats_get(void)
136 {
137     lr_timer_t cur_time;
138     lr_timer_t exec_time;
139     hr_timer_t intrpt_disable_time;
140 
141     if (g_cur_intrpt_disable_max_time > g_sys_measure_waste) {
142         intrpt_disable_time = g_cur_intrpt_disable_max_time - g_sys_measure_waste;
143     } else {
144         intrpt_disable_time = 0;
145     }
146 
147     if (g_active_task[cpu_cur_get()]->task_intrpt_disable_time_max < intrpt_disable_time) {
148         g_active_task[cpu_cur_get()]->task_intrpt_disable_time_max = intrpt_disable_time;
149     }
150 
151     g_cur_intrpt_disable_max_time = 0;
152 
153 
154     if (g_active_task[cpu_cur_get()]->task_sched_disable_time_max < g_cur_sched_disable_max_time) {
155         g_active_task[cpu_cur_get()]->task_sched_disable_time_max = g_cur_sched_disable_max_time;
156     }
157 
158     g_cur_sched_disable_max_time = 0;
159 
160     /* Keep track of new task and total system context switch times */
161     g_preferred_ready_task[cpu_cur_get()]->task_ctx_switch_times++;
162     g_sys_ctx_switch_times++;
163 
164     cur_time  = LR_COUNT_GET();
165     exec_time = cur_time - g_active_task[cpu_cur_get()]->task_time_start;
166 
167     g_active_task[cpu_cur_get()]->task_time_total_run += (uint64_t)exec_time;
168     if (g_active_task[cpu_cur_get()]->task_state == K_RDY) {
169         g_active_task[cpu_cur_get()]->task_time_this_run += exec_time;
170     } else {
171         g_active_task[cpu_cur_get()]->task_time_this_run = 0u;
172     }
173     g_preferred_ready_task[cpu_cur_get()]->task_time_start = cur_time;
174 }
175 
intrpt_disable_measure_start(void)176 void intrpt_disable_measure_start(void)
177 {
178     g_intrpt_disable_times++;
179 
180     /* start measure interrupt disable time */
181     if (g_intrpt_disable_times == 1u) {
182         g_intrpt_disable_time_start = HR_COUNT_GET();
183     }
184 }
185 
intrpt_disable_measure_stop(void)186 void intrpt_disable_measure_stop(void)
187 {
188     hr_timer_t diff;
189 
190     g_intrpt_disable_times--;
191 
192     if (g_intrpt_disable_times == 0u) {
193         diff = HR_COUNT_GET() - g_intrpt_disable_time_start;
194 
195         if (g_intrpt_disable_max_time < diff) {
196             g_intrpt_disable_max_time = diff;
197         }
198 
199         if (g_cur_intrpt_disable_max_time < diff) {
200             g_cur_intrpt_disable_max_time = diff;
201         }
202     }
203 }
204 #endif
205 
206 #if (RHINO_CONFIG_HW_COUNT > 0)
krhino_overhead_measure(void)207 void krhino_overhead_measure(void)
208 {
209     hr_timer_t diff;
210     hr_timer_t m1;
211     hr_timer_t m2;
212 
213     m1 = HR_COUNT_GET();
214     m2 = HR_COUNT_GET();
215 
216     diff = m2 - m1;
217 
218     /* measure time overhead */
219     g_sys_measure_waste = diff;
220 }
221 #endif
222 
223 /*it should be called in cpu_stats task*/
224 #if (RHINO_CONFIG_CPU_USAGE_STATS > 0)
cpu_usage_task_entry(void * arg)225 static void cpu_usage_task_entry(void *arg)
226 {
227     idle_count_t idle_count;
228 
229     (void)arg;
230 
231     while (1) {
232         idle_count_set(0u);
233 
234         krhino_task_sleep(RHINO_CONFIG_TICKS_PER_SECOND / 2);
235 
236         idle_count = idle_count_get();
237 
238         if (idle_count > g_idle_count_max) {
239             g_idle_count_max = idle_count;
240         }
241 
242         if (idle_count < g_idle_count_max) {
243             /* use 64bit for cpu_task_idle_count  to avoid overflow quickly */
244             g_cpu_usage = 10000 - (uint32_t)((idle_count * 10000) / g_idle_count_max);
245         } else {
246             g_cpu_usage = 10000;
247         }
248     }
249 }
250 
cpu_usage_stats_start(void)251 void cpu_usage_stats_start(void)
252 {
253     /* create a statistic task to calculate cpu usage */
254     krhino_task_create(&g_cpu_usage_task, "cpu_stats", 0,
255                        RHINO_CONFIG_CPU_USAGE_TASK_PRI,
256                        0, g_cpu_task_stack, RHINO_CONFIG_CPU_USAGE_TASK_STACK, cpu_usage_task_entry,
257                        1);
258 }
259 
krhino_get_cpu_usage(void)260 uint32_t krhino_get_cpu_usage(void)
261 {
262     return g_cpu_usage;
263 }
264 
265 #endif /* RHINO_CONFIG_CPU_USAGE_STATS */
266 
267