1 /*
2 * Copyright (c) 2017 Wind River Systems, Inc.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 /**
8 * @file
9 *
10 * @brief Kernel asynchronous event polling interface.
11 *
12 * This polling mechanism allows waiting on multiple events concurrently,
13 * either events triggered directly, or from kernel objects or other kernel
14 * constructs.
15 */
16
17 #include <stdio.h>
18
19 #include <ble_os.h>
20 #include <ble_types/types.h>
21 #include <misc/slist.h>
22 #include <misc/dlist.h>
23 #include <misc/__assert.h>
24
25 struct event_cb {
26 sys_dlist_t next;
27 struct k_poll_event *events;
28 int num_events;
29 struct k_sem sem;
30 };
31
32 static sys_dlist_t event_cb_list = SYS_DLIST_STATIC_INIT(&event_cb_list);
33 static volatile int event_cb_counter = 0;
34 static uint8_t is_poll_init = 0;
35
k_poll_event_init(struct k_poll_event * event,bt_u32_t type,int mode,void * obj)36 void k_poll_event_init(struct k_poll_event *event, bt_u32_t type, int mode,
37 void *obj)
38 {
39 __ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
40 "only NOTIFY_ONLY mode is supported\n");
41 __ASSERT(type < (1 << _POLL_NUM_TYPES), "invalid type\n");
42 __ASSERT(obj, "must provide an object\n");
43
44 event->poller = NULL;
45 /* event->tag is left uninitialized: the user will set it if needed */
46 event->type = type;
47 event->state = K_POLL_STATE_NOT_READY;
48 event->mode = mode;
49 event->unused = 0;
50 event->obj = obj;
51 }
52
set_event_state(struct k_poll_event * event,bt_u32_t state)53 static inline void set_event_state(struct k_poll_event *event, bt_u32_t state)
54 {
55 event->poller = NULL;
56 event->state |= state;
57 }
58
59 extern int has_tx_sem(struct k_poll_event *event);
60 extern void event_callback(uint8_t event_type);
_signal_poll_event(struct k_poll_event * event,bt_u32_t state,int * must_reschedule)61 static int _signal_poll_event(struct k_poll_event *event, bt_u32_t state, int *must_reschedule)
62 {
63 *must_reschedule = 0;
64 if (event->type != K_POLL_TYPE_DATA_AVAILABLE || has_tx_sem(event)) {
65 set_event_state(event, state);
66 event_callback(K_POLL_TYPE_FIFO_DATA_AVAILABLE);
67 }
68
69 return 0;
70 }
71
_handle_obj_poll_events(sys_dlist_t * events,bt_u32_t state)72 void _handle_obj_poll_events(sys_dlist_t *events, bt_u32_t state)
73 {
74 struct k_poll_event *poll_event;
75 int must_reschedule;
76
77 poll_event = (struct k_poll_event *)sys_dlist_get(events);
78 if (poll_event) {
79 (void)_signal_poll_event(poll_event, state, &must_reschedule);
80 }
81 }
82
83 /* must be called with interrupts locked */
is_condition_met(struct k_poll_event * event,bt_u32_t * state)84 static inline int is_condition_met(struct k_poll_event *event, bt_u32_t *state)
85 {
86 switch (event->type) {
87 case K_POLL_TYPE_DATA_AVAILABLE:
88 if (has_tx_sem(event) == 0) {
89 return 0;
90 }
91 if (!k_queue_is_empty(event->queue)) {
92 *state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
93 return 1;
94 }
95 break;
96 case K_POLL_TYPE_SIGNAL:
97 if (event->signal->signaled) {
98 *state = K_POLL_STATE_SIGNALED;
99 return 1;
100 }
101 break;
102 case K_POLL_TYPE_DATA_RECV:
103 if (event->signal->signaled) {
104 *state = K_POLL_STATE_DATA_RECV;
105 event->signal->signaled = 0;
106 return 1;
107 }
108 break;
109 default:
110 __ASSERT(0, "invalid event type (0x%x)\n", event->type);
111 break;
112 }
113
114 return 0;
115 }
116
add_event(sys_dlist_t * events,struct k_poll_event * event,struct _poller * poller)117 static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
118 struct _poller *poller)
119 {
120 sys_dlist_append(events, &event->_node);
121 }
122
123 /* must be called with interrupts locked */
register_event(struct k_poll_event * event,struct _poller * poller)124 static inline int register_event(struct k_poll_event *event,
125 struct _poller * poller)
126 {
127 switch (event->type) {
128 case K_POLL_TYPE_DATA_AVAILABLE:
129 __ASSERT(event->queue, "invalid queue\n");
130 add_event(&event->queue->poll_events, event, poller);
131 break;
132 case K_POLL_TYPE_SIGNAL:
133 __ASSERT(event->signal, "invalid poll signal\n");
134 add_event(&event->signal->poll_events, event, poller);
135 break;
136 case K_POLL_TYPE_DATA_RECV:
137 __ASSERT(event->queue, "invalid queue\n");
138 add_event(&event->signal->poll_events, event, poller);
139 break;
140 default:
141 __ASSERT(0, "invalid event type\n");
142 break;
143 }
144
145 event->poller = poller;
146
147 return 0;
148 }
149
150 /* must be called with interrupts locked */
clear_event_registration(struct k_poll_event * event)151 static inline void clear_event_registration(struct k_poll_event *event)
152 {
153 event->poller = NULL;
154
155 switch (event->type) {
156 case K_POLL_TYPE_DATA_AVAILABLE:
157 __ASSERT(event->queue, "invalid queue\n");
158 sys_dlist_remove(&event->_node);
159 break;
160 case K_POLL_TYPE_SIGNAL:
161 __ASSERT(event->signal, "invalid poll signal\n");
162 sys_dlist_remove(&event->_node);
163 break;
164 case K_POLL_TYPE_DATA_RECV:
165 __ASSERT(event->queue, "invalid queue\n");
166 sys_dlist_remove(&event->_node);
167 break;
168
169 default:
170 __ASSERT(0, "invalid event type\n");
171 break;
172 }
173 }
174
175 /* must be called with interrupts locked */
clear_event_registrations(struct k_poll_event * events,int last_registered,unsigned int key)176 static inline void clear_event_registrations(struct k_poll_event *events,
177 int last_registered,
178 unsigned int key)
179 {
180 for (; last_registered >= 0; last_registered--) {
181 clear_event_registration(&events[last_registered]);
182 irq_unlock(key);
183 key = irq_lock();
184 }
185 }
186
polling_events(struct k_poll_event * events,int num_events,bt_s32_t timeout,int * last_registered)187 static bool polling_events(struct k_poll_event *events, int num_events,
188 bt_s32_t timeout, int *last_registered)
189 {
190 bool polling = true;
191 unsigned int key;
192
193 for (int ii = 0; ii < num_events; ii++) {
194 bt_u32_t state;
195 key = irq_lock();
196 if (is_condition_met(&events[ii], &state)) {
197 set_event_state(&events[ii], state);
198 polling = false;
199 } else if (timeout != K_NO_WAIT && polling) {
200 register_event(&events[ii], NULL);
201 ++(*last_registered);
202 }
203 irq_unlock(key);
204 }
205 return polling;
206 }
207
event_callback(uint8_t event_type)208 void event_callback(uint8_t event_type)
209 {
210 sys_dnode_t *event_next;
211 sys_dnode_t *event_next_save;
212 //struct k_poll_event *events;
213 unsigned int key;
214
215 key = irq_lock();
216 SYS_DLIST_FOR_EACH_NODE_SAFE(&event_cb_list, event_next, event_next_save) {
217 for (int i = 0; i < ((struct event_cb *)event_next)->num_events; i++) {
218 if (((struct event_cb *)event_next)->events[i].type == event_type ||
219 event_type == K_POLL_TYPE_EARLIER_WORK) {
220 k_sem_give(&((struct event_cb *)event_next)->sem);
221 break;
222 }
223 }
224 }
225 irq_unlock(key);
226 }
227
k_poll(struct k_poll_event * events,int num_events,bt_s32_t timeout)228 int k_poll(struct k_poll_event *events, int num_events, bt_s32_t timeout)
229 {
230 int last_registered = -1;
231 unsigned int key;
232 bool polling = true;
233 static struct event_cb eventcb;
234
235 /* find events whose condition is already fulfilled */
236 #if 0
237 polling = polling_events(events, num_events, timeout, &last_registered);
238
239 if (polling == false) {
240 goto exit;
241 }
242 #endif
243
244 if (!is_poll_init)
245 {
246 k_sem_init(&eventcb.sem, 0, 1);
247 sys_dlist_append(&event_cb_list, (sys_dnode_t *)&eventcb);
248 event_cb_counter++;
249 is_poll_init = 1;
250 }
251
252 eventcb.events = events;
253 eventcb.num_events = num_events;
254
255 polling = polling_events(events, num_events, timeout, &last_registered);
256
257 if (polling == false) {
258 goto exit;
259 }
260
261 k_sem_take(&eventcb.sem, timeout);
262
263 last_registered = -1;
264 polling_events(events, num_events, K_NO_WAIT, &last_registered);
265 exit:
266 //sys_dlist_remove((sys_dnode_t *)&eventcb);
267 //k_sem_delete(&eventcb.sem);
268
269 key = irq_lock();
270 clear_event_registrations(events, last_registered, key);
271 irq_unlock(key);
272 return 0;
273 }
274
k_poll_signal_raise(struct k_poll_signal * signal,int result)275 void k_poll_signal_raise(struct k_poll_signal *signal, int result)
276 {
277 signal->result = result;
278 signal->signaled = 1U;
279 _handle_obj_poll_events(&signal->poll_events, K_POLL_STATE_SIGNALED);
280 }
281
282