1 /*
2  * Copyright (c) 2017 Wind River Systems, Inc.
3  * Copyright (c) 2023 Arm Limited (or its affiliates). All rights reserved.
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 /**
9  * @file
10  *
11  * @brief Kernel asynchronous event polling interface.
12  *
13  * This polling mechanism allows waiting on multiple events concurrently,
14  * either events triggered directly, or from kernel objects or other kernel
15  * constructs.
16  */
17 
18 #include <zephyr/kernel.h>
19 #include <zephyr/kernel_structs.h>
20 #include <kernel_internal.h>
21 #include <wait_q.h>
22 #include <ksched.h>
23 #include <zephyr/internal/syscall_handler.h>
24 #include <zephyr/sys/dlist.h>
25 #include <zephyr/sys/util.h>
26 #include <zephyr/sys/__assert.h>
27 #include <stdbool.h>
28 
29 /* Single subsystem lock.  Locking per-event would be better on highly
30  * contended SMP systems, but the original locking scheme here is
31  * subtle (it relies on releasing/reacquiring the lock in areas for
32  * latency control and it's sometimes hard to see exactly what data is
33  * "inside" a given critical section).  Do the synchronization port
34  * later as an optimization.
35  */
36 static struct k_spinlock lock;
37 
38 enum POLL_MODE { MODE_NONE, MODE_POLL, MODE_TRIGGERED };
39 
40 static int signal_poller(struct k_poll_event *event, uint32_t state);
41 static int signal_triggered_work(struct k_poll_event *event, uint32_t status);
42 
k_poll_event_init(struct k_poll_event * event,uint32_t type,int mode,void * obj)43 void k_poll_event_init(struct k_poll_event *event, uint32_t type,
44 		       int mode, void *obj)
45 {
46 	__ASSERT(mode == K_POLL_MODE_NOTIFY_ONLY,
47 		 "only NOTIFY_ONLY mode is supported\n");
48 	__ASSERT(type < (BIT(_POLL_NUM_TYPES)), "invalid type\n");
49 	__ASSERT(obj != NULL, "must provide an object\n");
50 
51 	event->poller = NULL;
52 	/* event->tag is left uninitialized: the user will set it if needed */
53 	event->type = type;
54 	event->state = K_POLL_STATE_NOT_READY;
55 	event->mode = mode;
56 	event->unused = 0U;
57 	event->obj = obj;
58 
59 	SYS_PORT_TRACING_FUNC(k_poll_api, event_init, event);
60 }
61 
62 /* must be called with interrupts locked */
is_condition_met(struct k_poll_event * event,uint32_t * state)63 static inline bool is_condition_met(struct k_poll_event *event, uint32_t *state)
64 {
65 	switch (event->type) {
66 	case K_POLL_TYPE_SEM_AVAILABLE:
67 		if (k_sem_count_get(event->sem) > 0U) {
68 			*state = K_POLL_STATE_SEM_AVAILABLE;
69 			return true;
70 		}
71 		break;
72 	case K_POLL_TYPE_DATA_AVAILABLE:
73 		if (!k_queue_is_empty(event->queue)) {
74 			*state = K_POLL_STATE_FIFO_DATA_AVAILABLE;
75 			return true;
76 		}
77 		break;
78 	case K_POLL_TYPE_SIGNAL:
79 		if (event->signal->signaled != 0U) {
80 			*state = K_POLL_STATE_SIGNALED;
81 			return true;
82 		}
83 		break;
84 	case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
85 		if (event->msgq->used_msgs > 0) {
86 			*state = K_POLL_STATE_MSGQ_DATA_AVAILABLE;
87 			return true;
88 		}
89 		break;
90 	case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
91 #ifdef CONFIG_PIPES
92 		if (event->pipe->bytes_used != 0) {
93 #else
94 		if (!ring_buf_is_empty(&event->pipe->buf)) {
95 #endif
96 			*state = K_POLL_STATE_PIPE_DATA_AVAILABLE;
97 			return true;
98 		}
99 		break;
100 	case K_POLL_TYPE_IGNORE:
101 		break;
102 	default:
103 		__ASSERT(false, "invalid event type (0x%x)\n", event->type);
104 		break;
105 	}
106 
107 	return false;
108 }
109 
110 static struct k_thread *poller_thread(struct z_poller *p)
111 {
112 	return p ? CONTAINER_OF(p, struct k_thread, poller) : NULL;
113 }
114 
115 static inline void add_event(sys_dlist_t *events, struct k_poll_event *event,
116 			     struct z_poller *poller)
117 {
118 	struct k_poll_event *pending;
119 
120 	pending = (struct k_poll_event *)sys_dlist_peek_tail(events);
121 	if ((pending == NULL) ||
122 		(z_sched_prio_cmp(poller_thread(pending->poller),
123 							   poller_thread(poller)) > 0)) {
124 		sys_dlist_append(events, &event->_node);
125 		return;
126 	}
127 
128 	SYS_DLIST_FOR_EACH_CONTAINER(events, pending, _node) {
129 		if (z_sched_prio_cmp(poller_thread(poller),
130 					poller_thread(pending->poller)) > 0) {
131 			sys_dlist_insert(&pending->_node, &event->_node);
132 			return;
133 		}
134 	}
135 
136 	sys_dlist_append(events, &event->_node);
137 }
138 
139 /* must be called with interrupts locked */
140 static inline void register_event(struct k_poll_event *event,
141 				 struct z_poller *poller)
142 {
143 	switch (event->type) {
144 	case K_POLL_TYPE_SEM_AVAILABLE:
145 		__ASSERT(event->sem != NULL, "invalid semaphore\n");
146 		add_event(&event->sem->poll_events, event, poller);
147 		break;
148 	case K_POLL_TYPE_DATA_AVAILABLE:
149 		__ASSERT(event->queue != NULL, "invalid queue\n");
150 		add_event(&event->queue->poll_events, event, poller);
151 		break;
152 	case K_POLL_TYPE_SIGNAL:
153 		__ASSERT(event->signal != NULL, "invalid poll signal\n");
154 		add_event(&event->signal->poll_events, event, poller);
155 		break;
156 	case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
157 		__ASSERT(event->msgq != NULL, "invalid message queue\n");
158 		add_event(&event->msgq->poll_events, event, poller);
159 		break;
160 	case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
161 		__ASSERT(event->pipe != NULL, "invalid pipe\n");
162 		add_event(&event->pipe->poll_events, event, poller);
163 		break;
164 	case K_POLL_TYPE_IGNORE:
165 		/* nothing to do */
166 		break;
167 	default:
168 		__ASSERT(false, "invalid event type\n");
169 		break;
170 	}
171 
172 	event->poller = poller;
173 }
174 
175 /* must be called with interrupts locked */
176 static inline void clear_event_registration(struct k_poll_event *event)
177 {
178 	bool remove_event = false;
179 
180 	event->poller = NULL;
181 
182 	switch (event->type) {
183 	case K_POLL_TYPE_SEM_AVAILABLE:
184 		__ASSERT(event->sem != NULL, "invalid semaphore\n");
185 		remove_event = true;
186 		break;
187 	case K_POLL_TYPE_DATA_AVAILABLE:
188 		__ASSERT(event->queue != NULL, "invalid queue\n");
189 		remove_event = true;
190 		break;
191 	case K_POLL_TYPE_SIGNAL:
192 		__ASSERT(event->signal != NULL, "invalid poll signal\n");
193 		remove_event = true;
194 		break;
195 	case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
196 		__ASSERT(event->msgq != NULL, "invalid message queue\n");
197 		remove_event = true;
198 		break;
199 	case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
200 		__ASSERT(event->pipe != NULL, "invalid pipe\n");
201 		remove_event = true;
202 		break;
203 	case K_POLL_TYPE_IGNORE:
204 		/* nothing to do */
205 		break;
206 	default:
207 		__ASSERT(false, "invalid event type\n");
208 		break;
209 	}
210 	if (remove_event && sys_dnode_is_linked(&event->_node)) {
211 		sys_dlist_remove(&event->_node);
212 	}
213 }
214 
215 /* must be called with interrupts locked */
216 static inline void clear_event_registrations(struct k_poll_event *events,
217 					      int num_events,
218 					      k_spinlock_key_t key)
219 {
220 	while (num_events--) {
221 		clear_event_registration(&events[num_events]);
222 		k_spin_unlock(&lock, key);
223 		key = k_spin_lock(&lock);
224 	}
225 }
226 
227 static inline void set_event_ready(struct k_poll_event *event, uint32_t state)
228 {
229 	event->poller = NULL;
230 	event->state |= state;
231 }
232 
233 static inline int register_events(struct k_poll_event *events,
234 				  int num_events,
235 				  struct z_poller *poller,
236 				  bool just_check)
237 {
238 	int events_registered = 0;
239 
240 	for (int ii = 0; ii < num_events; ii++) {
241 		k_spinlock_key_t key;
242 		uint32_t state;
243 
244 		key = k_spin_lock(&lock);
245 		if (is_condition_met(&events[ii], &state)) {
246 			set_event_ready(&events[ii], state);
247 			poller->is_polling = false;
248 		} else if (!just_check && poller->is_polling) {
249 			register_event(&events[ii], poller);
250 			events_registered += 1;
251 		} else {
252 			/* Event is not one of those identified in is_condition_met()
253 			 * catching non-polling events, or is marked for just check,
254 			 * or not marked for polling. No action needed.
255 			 */
256 			;
257 		}
258 		k_spin_unlock(&lock, key);
259 	}
260 
261 	return events_registered;
262 }
263 
264 static int signal_poller(struct k_poll_event *event, uint32_t state)
265 {
266 	struct k_thread *thread = poller_thread(event->poller);
267 
268 	__ASSERT(thread != NULL, "poller should have a thread\n");
269 
270 	if (!z_is_thread_pending(thread)) {
271 		return 0;
272 	}
273 
274 	z_unpend_thread(thread);
275 	arch_thread_return_value_set(thread,
276 		state == K_POLL_STATE_CANCELLED ? -EINTR : 0);
277 
278 	if (!z_is_thread_ready(thread)) {
279 		return 0;
280 	}
281 
282 	z_ready_thread(thread);
283 
284 	return 0;
285 }
286 
287 int z_impl_k_poll(struct k_poll_event *events, int num_events,
288 		  k_timeout_t timeout)
289 {
290 	int events_registered;
291 	k_spinlock_key_t key;
292 	struct z_poller *poller = &_current->poller;
293 
294 	poller->is_polling = true;
295 	poller->mode = MODE_POLL;
296 
297 	__ASSERT(!arch_is_in_isr(), "");
298 	__ASSERT(events != NULL, "NULL events\n");
299 	__ASSERT(num_events >= 0, "<0 events\n");
300 
301 	SYS_PORT_TRACING_FUNC_ENTER(k_poll_api, poll, events);
302 
303 	events_registered = register_events(events, num_events, poller,
304 					    K_TIMEOUT_EQ(timeout, K_NO_WAIT));
305 
306 	key = k_spin_lock(&lock);
307 
308 	/*
309 	 * If we're not polling anymore, it means that at least one event
310 	 * condition is met, either when looping through the events here or
311 	 * because one of the events registered has had its state changed.
312 	 */
313 	if (!poller->is_polling) {
314 		clear_event_registrations(events, events_registered, key);
315 		k_spin_unlock(&lock, key);
316 
317 		SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, 0);
318 
319 		return 0;
320 	}
321 
322 	poller->is_polling = false;
323 
324 	if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
325 		k_spin_unlock(&lock, key);
326 
327 		SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, -EAGAIN);
328 
329 		return -EAGAIN;
330 	}
331 
332 	static _wait_q_t wait_q = Z_WAIT_Q_INIT(&wait_q);
333 
334 	int swap_rc = z_pend_curr(&lock, key, &wait_q, timeout);
335 
336 	/*
337 	 * Clear all event registrations. If events happen while we're in this
338 	 * loop, and we already had one that triggered, that's OK: they will
339 	 * end up in the list of events that are ready; if we timed out, and
340 	 * events happen while we're in this loop, that is OK as well since
341 	 * we've already know the return code (-EAGAIN), and even if they are
342 	 * added to the list of events that occurred, the user has to check the
343 	 * return code first, which invalidates the whole list of event states.
344 	 */
345 	key = k_spin_lock(&lock);
346 	clear_event_registrations(events, events_registered, key);
347 	k_spin_unlock(&lock, key);
348 
349 	SYS_PORT_TRACING_FUNC_EXIT(k_poll_api, poll, events, swap_rc);
350 
351 	return swap_rc;
352 }
353 
354 #ifdef CONFIG_USERSPACE
355 static inline int z_vrfy_k_poll(struct k_poll_event *events,
356 				int num_events, k_timeout_t timeout)
357 {
358 	int ret;
359 	k_spinlock_key_t key;
360 	struct k_poll_event *events_copy = NULL;
361 	uint32_t bounds;
362 
363 	/* Validate the events buffer and make a copy of it in an
364 	 * allocated kernel-side buffer.
365 	 */
366 	if (K_SYSCALL_VERIFY(num_events >= 0)) {
367 		ret = -EINVAL;
368 		goto out;
369 	}
370 	if (K_SYSCALL_VERIFY_MSG(!u32_mul_overflow(num_events,
371 						   sizeof(struct k_poll_event),
372 						   &bounds),
373 				 "num_events too large")) {
374 		ret = -EINVAL;
375 		goto out;
376 	}
377 	events_copy = z_thread_malloc(bounds);
378 	if (!events_copy) {
379 		ret = -ENOMEM;
380 		goto out;
381 	}
382 
383 	key = k_spin_lock(&lock);
384 	if (K_SYSCALL_MEMORY_WRITE(events, bounds)) {
385 		k_spin_unlock(&lock, key);
386 		goto oops_free;
387 	}
388 	(void)memcpy(events_copy, events, bounds);
389 	k_spin_unlock(&lock, key);
390 
391 	/* Validate what's inside events_copy */
392 	for (int i = 0; i < num_events; i++) {
393 		struct k_poll_event *e = &events_copy[i];
394 
395 		if (K_SYSCALL_VERIFY(e->mode == K_POLL_MODE_NOTIFY_ONLY)) {
396 			ret = -EINVAL;
397 			goto out_free;
398 		}
399 
400 		switch (e->type) {
401 		case K_POLL_TYPE_IGNORE:
402 			break;
403 		case K_POLL_TYPE_SIGNAL:
404 			K_OOPS(K_SYSCALL_OBJ(e->signal, K_OBJ_POLL_SIGNAL));
405 			break;
406 		case K_POLL_TYPE_SEM_AVAILABLE:
407 			K_OOPS(K_SYSCALL_OBJ(e->sem, K_OBJ_SEM));
408 			break;
409 		case K_POLL_TYPE_DATA_AVAILABLE:
410 			K_OOPS(K_SYSCALL_OBJ(e->queue, K_OBJ_QUEUE));
411 			break;
412 		case K_POLL_TYPE_MSGQ_DATA_AVAILABLE:
413 			K_OOPS(K_SYSCALL_OBJ(e->msgq, K_OBJ_MSGQ));
414 			break;
415 		case K_POLL_TYPE_PIPE_DATA_AVAILABLE:
416 			K_OOPS(K_SYSCALL_OBJ(e->pipe, K_OBJ_PIPE));
417 			break;
418 		default:
419 			ret = -EINVAL;
420 			goto out_free;
421 		}
422 	}
423 
424 	ret = k_poll(events_copy, num_events, timeout);
425 	(void)memcpy((void *)events, events_copy, bounds);
426 out_free:
427 	k_free(events_copy);
428 out:
429 	return ret;
430 oops_free:
431 	k_free(events_copy);
432 	K_OOPS(1);
433 }
434 #include <zephyr/syscalls/k_poll_mrsh.c>
435 #endif /* CONFIG_USERSPACE */
436 
437 /* must be called with interrupts locked */
438 static int signal_poll_event(struct k_poll_event *event, uint32_t state)
439 {
440 	struct z_poller *poller = event->poller;
441 	int retcode = 0;
442 
443 	if (poller != NULL) {
444 		if (poller->mode == MODE_POLL) {
445 			retcode = signal_poller(event, state);
446 		} else if (poller->mode == MODE_TRIGGERED) {
447 			retcode = signal_triggered_work(event, state);
448 		} else {
449 			/* Poller is not poll or triggered mode. No action needed.*/
450 			;
451 		}
452 
453 		poller->is_polling = false;
454 
455 		if (retcode < 0) {
456 			return retcode;
457 		}
458 	}
459 
460 	set_event_ready(event, state);
461 	return retcode;
462 }
463 
464 bool z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state)
465 {
466 	struct k_poll_event *poll_event;
467 	k_spinlock_key_t key = k_spin_lock(&lock);
468 
469 	poll_event = (struct k_poll_event *)sys_dlist_get(events);
470 	if (poll_event != NULL) {
471 		(void) signal_poll_event(poll_event, state);
472 	}
473 
474 	k_spin_unlock(&lock, key);
475 
476 	return (poll_event != NULL);
477 }
478 
479 void z_impl_k_poll_signal_init(struct k_poll_signal *sig)
480 {
481 	sys_dlist_init(&sig->poll_events);
482 	sig->signaled = 0U;
483 	/* signal->result is left uninitialized */
484 	k_object_init(sig);
485 
486 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_init, sig);
487 }
488 
489 #ifdef CONFIG_USERSPACE
490 static inline void z_vrfy_k_poll_signal_init(struct k_poll_signal *sig)
491 {
492 	K_OOPS(K_SYSCALL_OBJ_INIT(sig, K_OBJ_POLL_SIGNAL));
493 	z_impl_k_poll_signal_init(sig);
494 }
495 #include <zephyr/syscalls/k_poll_signal_init_mrsh.c>
496 #endif /* CONFIG_USERSPACE */
497 
498 void z_impl_k_poll_signal_reset(struct k_poll_signal *sig)
499 {
500 	sig->signaled = 0U;
501 
502 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_reset, sig);
503 }
504 
505 void z_impl_k_poll_signal_check(struct k_poll_signal *sig,
506 			       unsigned int *signaled, int *result)
507 {
508 	*signaled = sig->signaled;
509 	*result = sig->result;
510 
511 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_check, sig);
512 }
513 
514 #ifdef CONFIG_USERSPACE
515 void z_vrfy_k_poll_signal_check(struct k_poll_signal *sig,
516 			       unsigned int *signaled, int *result)
517 {
518 	K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
519 	K_OOPS(K_SYSCALL_MEMORY_WRITE(signaled, sizeof(unsigned int)));
520 	K_OOPS(K_SYSCALL_MEMORY_WRITE(result, sizeof(int)));
521 	z_impl_k_poll_signal_check(sig, signaled, result);
522 }
523 #include <zephyr/syscalls/k_poll_signal_check_mrsh.c>
524 #endif /* CONFIG_USERSPACE */
525 
526 int z_impl_k_poll_signal_raise(struct k_poll_signal *sig, int result)
527 {
528 	k_spinlock_key_t key = k_spin_lock(&lock);
529 	struct k_poll_event *poll_event;
530 
531 	sig->result = result;
532 	sig->signaled = 1U;
533 
534 	poll_event = (struct k_poll_event *)sys_dlist_get(&sig->poll_events);
535 	if (poll_event == NULL) {
536 		k_spin_unlock(&lock, key);
537 
538 		SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, 0);
539 
540 		return 0;
541 	}
542 
543 	int rc = signal_poll_event(poll_event, K_POLL_STATE_SIGNALED);
544 
545 	SYS_PORT_TRACING_FUNC(k_poll_api, signal_raise, sig, rc);
546 
547 	z_reschedule(&lock, key);
548 	return rc;
549 }
550 
551 #ifdef CONFIG_USERSPACE
552 static inline int z_vrfy_k_poll_signal_raise(struct k_poll_signal *sig,
553 					     int result)
554 {
555 	K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
556 	return z_impl_k_poll_signal_raise(sig, result);
557 }
558 #include <zephyr/syscalls/k_poll_signal_raise_mrsh.c>
559 
560 static inline void z_vrfy_k_poll_signal_reset(struct k_poll_signal *sig)
561 {
562 	K_OOPS(K_SYSCALL_OBJ(sig, K_OBJ_POLL_SIGNAL));
563 	z_impl_k_poll_signal_reset(sig);
564 }
565 #include <zephyr/syscalls/k_poll_signal_reset_mrsh.c>
566 
567 #endif /* CONFIG_USERSPACE */
568 
569 static void triggered_work_handler(struct k_work *work)
570 {
571 	struct k_work_poll *twork =
572 			CONTAINER_OF(work, struct k_work_poll, work);
573 
574 	/*
575 	 * If callback is not set, the k_work_poll_submit_to_queue()
576 	 * already cleared event registrations.
577 	 */
578 	if (twork->poller.mode != MODE_NONE) {
579 		k_spinlock_key_t key;
580 
581 		key = k_spin_lock(&lock);
582 		clear_event_registrations(twork->events,
583 					  twork->num_events, key);
584 		k_spin_unlock(&lock, key);
585 	}
586 
587 	/* Drop work ownership and execute real handler. */
588 	twork->workq = NULL;
589 	twork->real_handler(work);
590 }
591 
592 static void triggered_work_expiration_handler(struct _timeout *timeout)
593 {
594 	struct k_work_poll *twork =
595 		CONTAINER_OF(timeout, struct k_work_poll, timeout);
596 
597 	twork->poller.is_polling = false;
598 	twork->poll_result = -EAGAIN;
599 	k_work_submit_to_queue(twork->workq, &twork->work);
600 }
601 
602 extern int z_work_submit_to_queue(struct k_work_q *queue,
603 			 struct k_work *work);
604 
605 static int signal_triggered_work(struct k_poll_event *event, uint32_t status)
606 {
607 	struct z_poller *poller = event->poller;
608 	struct k_work_poll *twork =
609 		CONTAINER_OF(poller, struct k_work_poll, poller);
610 
611 	if (poller->is_polling && twork->workq != NULL) {
612 		struct k_work_q *work_q = twork->workq;
613 
614 		z_abort_timeout(&twork->timeout);
615 		twork->poll_result = 0;
616 		z_work_submit_to_queue(work_q, &twork->work);
617 	}
618 
619 	return 0;
620 }
621 
622 static int triggered_work_cancel(struct k_work_poll *work,
623 				 k_spinlock_key_t key)
624 {
625 	/* Check if the work waits for event. */
626 	if (work->poller.is_polling && work->poller.mode != MODE_NONE) {
627 		/* Remove timeout associated with the work. */
628 		z_abort_timeout(&work->timeout);
629 
630 		/*
631 		 * Prevent work execution if event arrives while we will be
632 		 * clearing registrations.
633 		 */
634 		work->poller.mode = MODE_NONE;
635 
636 		/* Clear registrations and work ownership. */
637 		clear_event_registrations(work->events, work->num_events, key);
638 		work->workq = NULL;
639 		return 0;
640 	}
641 
642 	/*
643 	 * If we reached here, the work is either being registered in
644 	 * the k_work_poll_submit_to_queue(), executed or is pending.
645 	 * Only in the last case we have a chance to cancel it, but
646 	 * unfortunately there is no public API performing this task.
647 	 */
648 
649 	return -EINVAL;
650 }
651 
652 void k_work_poll_init(struct k_work_poll *work,
653 		      k_work_handler_t handler)
654 {
655 	SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_work_poll, init, work);
656 
657 	*work = (struct k_work_poll) {};
658 	k_work_init(&work->work, triggered_work_handler);
659 	work->real_handler = handler;
660 	z_init_timeout(&work->timeout);
661 
662 	SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_work_poll, init, work);
663 }
664 
665 int k_work_poll_submit_to_queue(struct k_work_q *work_q,
666 				struct k_work_poll *work,
667 				struct k_poll_event *events,
668 				int num_events,
669 				k_timeout_t timeout)
670 {
671 	int events_registered;
672 	k_spinlock_key_t key;
673 
674 	__ASSERT(work_q != NULL, "NULL work_q\n");
675 	__ASSERT(work != NULL, "NULL work\n");
676 	__ASSERT(events != NULL, "NULL events\n");
677 	__ASSERT(num_events >= 0, "<0 events\n");
678 
679 	SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit_to_queue, work_q, work, timeout);
680 
681 	/* Take ownership of the work if it is possible. */
682 	key = k_spin_lock(&lock);
683 	if (work->workq != NULL) {
684 		if (work->workq == work_q) {
685 			int retval;
686 
687 			retval = triggered_work_cancel(work, key);
688 			if (retval < 0) {
689 				k_spin_unlock(&lock, key);
690 
691 				SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
692 					work, timeout, retval);
693 
694 				return retval;
695 			}
696 		} else {
697 			k_spin_unlock(&lock, key);
698 
699 			SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q,
700 				work, timeout, -EADDRINUSE);
701 
702 			return -EADDRINUSE;
703 		}
704 	}
705 
706 
707 	work->poller.is_polling = true;
708 	work->workq = work_q;
709 	work->poller.mode = MODE_NONE;
710 	k_spin_unlock(&lock, key);
711 
712 	/* Save list of events. */
713 	work->events = events;
714 	work->num_events = num_events;
715 
716 	/* Clear result */
717 	work->poll_result = -EINPROGRESS;
718 
719 	/* Register events */
720 	events_registered = register_events(events, num_events,
721 					    &work->poller, false);
722 
723 	key = k_spin_lock(&lock);
724 	if (work->poller.is_polling && !K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
725 		/*
726 		 * Poller is still polling.
727 		 * No event is ready and all are watched.
728 		 */
729 		__ASSERT(num_events == events_registered,
730 			 "Some events were not registered!\n");
731 
732 		/* Setup timeout if such action is requested */
733 		if (!K_TIMEOUT_EQ(timeout, K_FOREVER)) {
734 			z_add_timeout(&work->timeout,
735 				      triggered_work_expiration_handler,
736 				      timeout);
737 		}
738 
739 		/* From now, any event will result in submitted work. */
740 		work->poller.mode = MODE_TRIGGERED;
741 		k_spin_unlock(&lock, key);
742 
743 		SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
744 
745 		return 0;
746 	}
747 
748 	/*
749 	 * The K_NO_WAIT timeout was specified or at least one event
750 	 * was ready at registration time or changed state since
751 	 * registration. Hopefully, the poller mode was not set, so
752 	 * work was not submitted to workqueue.
753 	 */
754 
755 	/*
756 	 * If poller is still polling, no watched event occurred. This means
757 	 * we reached here due to K_NO_WAIT timeout "expiration".
758 	 */
759 	if (work->poller.is_polling) {
760 		work->poller.is_polling = false;
761 		work->poll_result = -EAGAIN;
762 	} else {
763 		work->poll_result = 0;
764 	}
765 
766 	/* Clear registrations. */
767 	clear_event_registrations(events, events_registered, key);
768 	k_spin_unlock(&lock, key);
769 
770 	/* Submit work. */
771 	k_work_submit_to_queue(work_q, &work->work);
772 
773 	SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit_to_queue, work_q, work, timeout, 0);
774 
775 	return 0;
776 }
777 
778 int k_work_poll_submit(struct k_work_poll *work,
779 				     struct k_poll_event *events,
780 				     int num_events,
781 				     k_timeout_t timeout)
782 {
783 	SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, submit, work, timeout);
784 
785 	int ret = k_work_poll_submit_to_queue(&k_sys_work_q, work,
786 								events, num_events, timeout);
787 
788 	SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, submit, work, timeout, ret);
789 
790 	return ret;
791 }
792 
793 int k_work_poll_cancel(struct k_work_poll *work)
794 {
795 	k_spinlock_key_t key;
796 	int retval;
797 
798 	SYS_PORT_TRACING_FUNC_ENTER(k_work_poll, cancel, work);
799 
800 	/* Check if the work was submitted. */
801 	if (work == NULL || work->workq == NULL) {
802 		SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, -EINVAL);
803 
804 		return -EINVAL;
805 	}
806 
807 	key = k_spin_lock(&lock);
808 	retval = triggered_work_cancel(work, key);
809 	k_spin_unlock(&lock, key);
810 
811 	SYS_PORT_TRACING_FUNC_EXIT(k_work_poll, cancel, work, retval);
812 
813 	return retval;
814 }
815