1 /*
2 * Copyright (C) 2015-2017 Alibaba Group Holding Limited
3 */
4
5 #include "k_api.h"
6
7 #if (RHINO_CONFIG_EVENT_FLAG > 0)
event_create(kevent_t * event,const name_t * name,uint32_t flags,uint8_t mm_alloc_flag)8 static kstat_t event_create(kevent_t *event, const name_t *name, uint32_t flags, uint8_t mm_alloc_flag)
9 {
10 #if (RHINO_CONFIG_KOBJ_LIST > 0)
11 CPSR_ALLOC();
12 #endif
13
14 NULL_PARA_CHK(event);
15 NULL_PARA_CHK(name);
16
17 memset(event, 0, sizeof(kevent_t));
18
19 /* init the list */
20 klist_init(&event->blk_obj.blk_list);
21 event->blk_obj.blk_policy = BLK_POLICY_PRI;
22 event->blk_obj.name = name;
23 event->flags = flags;
24 event->mm_alloc_flag = mm_alloc_flag;
25 #if (RHINO_CONFIG_TASK_DEL > 0)
26 event->blk_obj.cancel = 1u;
27 #endif
28
29 #if (RHINO_CONFIG_KOBJ_LIST > 0)
30 RHINO_CRITICAL_ENTER();
31 klist_insert(&(g_kobj_list.event_head), &event->event_item);
32 RHINO_CRITICAL_EXIT();
33 #endif
34
35 TRACE_EVENT_CREATE(krhino_cur_task_get(), event, name, flags);
36
37 event->blk_obj.obj_type = RHINO_EVENT_OBJ_TYPE;
38
39 return RHINO_SUCCESS;
40 }
41
krhino_event_create(kevent_t * event,const name_t * name,uint32_t flags)42 kstat_t krhino_event_create(kevent_t *event, const name_t *name, uint32_t flags)
43 {
44 return event_create(event, name, flags, K_OBJ_STATIC_ALLOC);
45 }
46
krhino_event_del(kevent_t * event)47 kstat_t krhino_event_del(kevent_t *event)
48 {
49 CPSR_ALLOC();
50 klist_t *blk_list_head;
51
52 NULL_PARA_CHK(event);
53
54 RHINO_CRITICAL_ENTER();
55
56 INTRPT_NESTED_LEVEL_CHK();
57
58 if (event->blk_obj.obj_type != RHINO_EVENT_OBJ_TYPE) {
59 RHINO_CRITICAL_EXIT();
60 return RHINO_KOBJ_TYPE_ERR;
61 }
62
63 if (event->mm_alloc_flag != K_OBJ_STATIC_ALLOC) {
64 RHINO_CRITICAL_EXIT();
65 return RHINO_KOBJ_DEL_ERR;
66 }
67
68 blk_list_head = &event->blk_obj.blk_list;
69
70 event->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
71
72 while (!is_klist_empty(blk_list_head)) {
73 pend_task_rm(krhino_list_entry(blk_list_head->next, ktask_t, task_list));
74 }
75
76 event->flags = 0u;
77
78 #if (RHINO_CONFIG_KOBJ_LIST > 0)
79 klist_rm(&event->event_item);
80 #endif
81
82 TRACE_EVENT_DEL(g_active_task[cpu_cur_get()], event);
83
84 RHINO_CRITICAL_EXIT_SCHED();
85
86 return RHINO_SUCCESS;
87 }
88
89 #if (RHINO_CONFIG_KOBJ_DYN_ALLOC > 0)
krhino_event_dyn_create(kevent_t ** event,const name_t * name,uint32_t flags)90 kstat_t krhino_event_dyn_create(kevent_t **event, const name_t *name, uint32_t flags)
91 {
92 kstat_t stat;
93 kevent_t *event_obj;
94
95 if (event == NULL) {
96 return RHINO_NULL_PTR;
97 }
98
99 event_obj = krhino_mm_alloc(sizeof(kevent_t));
100
101 if (event_obj == NULL) {
102 return RHINO_NO_MEM;
103 }
104
105 stat = event_create(event_obj, name, flags, K_OBJ_DYN_ALLOC);
106
107 if (stat != RHINO_SUCCESS) {
108 krhino_mm_free(event_obj);
109 return stat;
110 }
111
112 *event = event_obj;
113
114 return stat;
115 }
116
krhino_event_dyn_del(kevent_t * event)117 kstat_t krhino_event_dyn_del(kevent_t *event)
118 {
119 CPSR_ALLOC();
120 klist_t *blk_list_head;
121
122 NULL_PARA_CHK(event);
123
124 RHINO_CRITICAL_ENTER();
125
126 INTRPT_NESTED_LEVEL_CHK();
127
128 if (event->blk_obj.obj_type != RHINO_EVENT_OBJ_TYPE) {
129 RHINO_CRITICAL_EXIT();
130 return RHINO_KOBJ_TYPE_ERR;
131 }
132
133 if (event->mm_alloc_flag != K_OBJ_DYN_ALLOC) {
134 RHINO_CRITICAL_EXIT();
135 return RHINO_KOBJ_DEL_ERR;
136 }
137
138 blk_list_head = &event->blk_obj.blk_list;
139
140 event->blk_obj.obj_type = RHINO_OBJ_TYPE_NONE;
141
142 while (!is_klist_empty(blk_list_head)) {
143 pend_task_rm(krhino_list_entry(blk_list_head->next, ktask_t, task_list));
144 }
145
146 event->flags = 0u;
147
148 #if (RHINO_CONFIG_KOBJ_LIST > 0)
149 klist_rm(&event->event_item);
150 #endif
151
152 RHINO_CRITICAL_EXIT_SCHED();
153
154 krhino_mm_free(event);
155
156 return RHINO_SUCCESS;
157 }
158 #endif
159
krhino_event_get(kevent_t * event,uint32_t flags,uint8_t opt,uint32_t * actl_flags,tick_t ticks)160 kstat_t krhino_event_get(kevent_t *event, uint32_t flags, uint8_t opt,
161 uint32_t *actl_flags, tick_t ticks)
162 {
163 CPSR_ALLOC();
164 kstat_t stat;
165 uint8_t status;
166 uint8_t cur_cpu_num;
167
168 NULL_PARA_CHK(event);
169 NULL_PARA_CHK(actl_flags);
170
171 if ((opt != RHINO_AND) && (opt != RHINO_OR) && (opt != RHINO_AND_CLEAR) &&
172 (opt != RHINO_OR_CLEAR)) {
173 return RHINO_NO_THIS_EVENT_OPT;
174 }
175
176 RHINO_CRITICAL_ENTER();
177
178 cur_cpu_num = cpu_cur_get();
179 TASK_CANCEL_CHK(event);
180
181 INTRPT_NESTED_LEVEL_CHK();
182
183 if (event->blk_obj.obj_type != RHINO_EVENT_OBJ_TYPE) {
184 RHINO_CRITICAL_EXIT();
185 return RHINO_KOBJ_TYPE_ERR;
186 }
187
188 /* if option is AND MASK or OR MASK */
189 if (opt & RHINO_FLAGS_AND_MASK) {
190 if ((event->flags & flags) == flags) {
191 status = RHINO_TRUE;
192 } else {
193 status = RHINO_FALSE;
194 }
195 } else {
196 if ((event->flags & flags) > 0u) {
197 status = RHINO_TRUE;
198 } else {
199 status = RHINO_FALSE;
200 }
201 }
202
203 if (status == RHINO_TRUE) {
204 *actl_flags = event->flags;
205
206 if (opt & RHINO_FLAGS_CLEAR_MASK) {
207 event->flags &= ~flags;
208 }
209
210 TRACE_EVENT_GET(g_active_task[cur_cpu_num], event);
211 RHINO_CRITICAL_EXIT();
212
213 return RHINO_SUCCESS;
214 }
215
216 /* can't get event, and return immediately if wait_option is RHINO_NO_WAIT */
217 if (ticks == RHINO_NO_WAIT) {
218 RHINO_CRITICAL_EXIT();
219 return RHINO_NO_PEND_WAIT;
220 }
221
222 /* system is locked so task can not be blocked just return immediately */
223 if (g_sched_lock[cur_cpu_num] > 0u) {
224 RHINO_CRITICAL_EXIT();
225 return RHINO_SCHED_DISABLE;
226 }
227
228 /* remember the passed information */
229 g_active_task[cur_cpu_num]->pend_option = opt;
230 g_active_task[cur_cpu_num]->pend_flags = flags;
231 g_active_task[cur_cpu_num]->pend_info = actl_flags;
232
233 pend_to_blk_obj(&event->blk_obj, g_active_task[cur_cpu_num], ticks);
234
235 TRACE_EVENT_GET_BLK(g_active_task[cur_cpu_num], event, ticks);
236
237 RHINO_CRITICAL_EXIT_SCHED();
238
239 RHINO_CPU_INTRPT_DISABLE();
240
241 /* so the task is waked up, need know which reason cause wake up */
242 stat = pend_state_end_proc(g_active_task[cpu_cur_get()], &event->blk_obj);
243
244 RHINO_CPU_INTRPT_ENABLE();
245
246 return stat;
247 }
248
event_set(kevent_t * event,uint32_t flags,uint8_t opt)249 static kstat_t event_set(kevent_t *event, uint32_t flags, uint8_t opt)
250 {
251 CPSR_ALLOC();
252 klist_t *iter;
253 klist_t *event_head;
254 klist_t *iter_temp;
255 ktask_t *task;
256 uint8_t status;
257 uint32_t cur_event_flags;
258
259 status = RHINO_FALSE;
260
261 RHINO_CRITICAL_ENTER();
262
263 if (event->blk_obj.obj_type != RHINO_EVENT_OBJ_TYPE) {
264 RHINO_CRITICAL_EXIT();
265 return RHINO_KOBJ_TYPE_ERR;
266 }
267
268 event_head = &event->blk_obj.blk_list;
269
270 /* if the set_option is AND_MASK, it just clears the flags and will return immediately */
271 if (opt & RHINO_FLAGS_AND_MASK) {
272 event->flags &= flags;
273 RHINO_CRITICAL_EXIT();
274 return RHINO_SUCCESS;
275 } else {
276 event->flags |= flags;
277 }
278
279 cur_event_flags = event->flags;
280
281 iter = event_head->next;
282
283 /* if list is not empty */
284 while (iter != event_head) {
285 task = krhino_list_entry(iter, ktask_t, task_list);
286
287 iter_temp = iter->next;
288
289 if (task->pend_option & RHINO_FLAGS_AND_MASK) {
290 if ((cur_event_flags & task->pend_flags) == task->pend_flags) {
291 status = RHINO_TRUE;
292 } else {
293 status = RHINO_FALSE;
294 }
295 } else {
296 if (cur_event_flags & task->pend_flags) {
297 status = RHINO_TRUE;
298 } else {
299 status = RHINO_FALSE;
300 }
301 }
302
303 if (status == RHINO_TRUE) {
304 (*(uint32_t *)(task->pend_info)) = cur_event_flags;
305
306 /* the task condition is met, just wake this task */
307 pend_task_wakeup(task);
308
309 TRACE_EVENT_TASK_WAKE(g_active_task[cpu_cur_get()], task, event);
310
311 /* does it need to clear the flags */
312 if (task->pend_option & RHINO_FLAGS_CLEAR_MASK) {
313 event->flags &= ~(task->pend_flags);
314 }
315 }
316
317 iter = iter_temp;
318 }
319
320 RHINO_CRITICAL_EXIT_SCHED();
321
322 return RHINO_SUCCESS;
323 }
324
krhino_event_set(kevent_t * event,uint32_t flags,uint8_t opt)325 kstat_t krhino_event_set(kevent_t *event, uint32_t flags, uint8_t opt)
326 {
327 NULL_PARA_CHK(event);
328
329 if ((opt != RHINO_AND) && (opt != RHINO_OR)) {
330 return RHINO_NO_THIS_EVENT_OPT;
331 }
332
333 return event_set(event, flags, opt);
334 }
335
336 #endif /* RHINO_CONFIG_EVENT_FLAG */
337
338