1 /*
2  * Copyright (c) 2020 Nordic Semiconductor ASA
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 /* This test covers deprecated API.  Avoid inappropriate diagnostics
8  * about the use of that API.
9  */
10 #include <zephyr/toolchain.h>
11 #include <zephyr/ztest.h>
12 
13 #define STACK_SIZE (1024 + CONFIG_TEST_EXTRA_STACK_SIZE)
14 #define COOPHI_PRIORITY K_PRIO_COOP(0) /* = -4 */
15 /* SYSTEM_WORKQUEUE_PRIORITY = -3 */
16 /* ZTEST_THREAD_PRIORITY = -2 */
17 #define COOPLO_PRIORITY K_PRIO_COOP(3) /* = -1 */
18 #define PREEMPT_PRIORITY K_PRIO_PREEMPT(1) /* = 1 */
19 
20 #define DELAY_MS 100
21 #define DELAY_TIMEOUT K_MSEC(DELAY_MS)
22 
23 BUILD_ASSERT(COOPHI_PRIORITY < CONFIG_SYSTEM_WORKQUEUE_PRIORITY,
24 	     "COOPHI not higher priority than system workqueue");
25 BUILD_ASSERT(CONFIG_SYSTEM_WORKQUEUE_PRIORITY < CONFIG_ZTEST_THREAD_PRIORITY,
26 	     "System workqueue not higher priority than ZTEST");
27 BUILD_ASSERT(CONFIG_ZTEST_THREAD_PRIORITY < COOPLO_PRIORITY,
28 	     "ZTEST not higher priority than COOPLO");
29 BUILD_ASSERT(COOPLO_PRIORITY < 0,
30 	     "COOPLO not cooperative");
31 
32 /* Given by work thread to signal completion. */
33 static struct k_sem sync_sem;
34 
35 static bool run_flag = true;
36 
37 /* Given by test thread to release a work item. */
38 static struct k_sem rel_sem;
39 
40 /* Common work structures, to avoid dead references to stack objects
41  * if a test fails.
42  */
43 static struct k_work common_work;
44 static struct k_work common_work1;
45 static struct k_work_delayable dwork;
46 
47 /* Work synchronization objects must be in cache-coherent memory,
48  * which excludes stacks on some architectures.
49  */
50 static struct k_work_sync work_sync;
51 
52 static struct k_thread *main_thread;
53 
54 /* We have these threads, in strictly decreasing order of priority:
55  * * coophi: a high priority cooperative work queue
56  * * system: the standard system work queue
57  * * ztest thread: priority for threads running tests
58  * * cooplo : a low-priority cooperative work queue
59  * * preempt: a preemptible work queue
60  *
61  * The test infrastructure records the number of times each work queue
62  * executes in a counter.
63  *
64  * The common work handler also supports internal re-submission if
65  * configured to do so.
66  *
67  * There are three core handlers:
68  * * The basic one (counter_handler) increments the count of handler
69  *   invocations by work queue thread, optionally resubmits, then
70  *   releases the semaphore the test is waiting for.
71  * * The blocking one (rel_handler) waits until something invokes
72  *   handler_release() to allow it to complete by invoking
73  *   counter_handler().  This makes a work queue busy for arbitrary
74  *   periods, but requires something external to trigger the release.
75  * * The delaying one (delay_handler) waits for K_MSEC(DELAY_MS) before
76  *   invoking counter_handler().
77  */
78 static atomic_t resubmits_left;
79 
80 /* k_uptime_get32() on the last invocation of the core handler. */
81 static uint32_t volatile last_handle_ms;
82 
83 static K_THREAD_STACK_DEFINE(coophi_stack, STACK_SIZE);
84 static struct k_work_q coophi_queue;
85 static struct k_work_q not_start_queue;
86 static atomic_t coophi_ctr;
coophi_counter(void)87 static inline int coophi_counter(void)
88 {
89 	return atomic_get(&coophi_ctr);
90 }
91 
92 static K_THREAD_STACK_DEFINE(cooplo_stack, STACK_SIZE);
93 static struct k_thread cooplo_thread;
94 static struct k_work_q cooplo_queue;
95 static atomic_t cooplo_ctr;
cooplo_counter(void)96 static inline int cooplo_counter(void)
97 {
98 	return atomic_get(&cooplo_ctr);
99 }
100 
coop_counter(struct k_work_q * wq)101 static inline int coop_counter(struct k_work_q *wq)
102 {
103 	return (wq == &coophi_queue) ? coophi_counter()
104 		: (wq == &cooplo_queue) ? cooplo_counter()
105 		: -1;
106 }
107 
108 static K_THREAD_STACK_DEFINE(preempt_stack, STACK_SIZE);
109 static struct k_work_q preempt_queue;
110 static atomic_t preempt_ctr;
preempt_counter(void)111 static inline int preempt_counter(void)
112 {
113 	return atomic_get(&preempt_ctr);
114 }
115 
116 static K_THREAD_STACK_DEFINE(invalid_test_stack, STACK_SIZE);
117 static struct k_work_q invalid_test_queue;
118 
119 static atomic_t system_ctr;
system_counter(void)120 static inline int system_counter(void)
121 {
122 	return atomic_get(&system_ctr);
123 }
124 
reset_counters(void)125 static inline void reset_counters(void)
126 {
127 	/* If this fails the previous test didn't clean up */
128 	zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), -EBUSY);
129 	last_handle_ms = UINT32_MAX;
130 	atomic_set(&resubmits_left, 0);
131 	atomic_set(&coophi_ctr, 0);
132 	atomic_set(&system_ctr, 0);
133 	atomic_set(&cooplo_ctr, 0);
134 	atomic_set(&preempt_ctr, 0);
135 }
136 
counter_handler(struct k_work * work)137 static void counter_handler(struct k_work *work)
138 {
139 	last_handle_ms = k_uptime_get_32();
140 	if (k_current_get() == coophi_queue.thread_id) {
141 		atomic_inc(&coophi_ctr);
142 	} else if (k_current_get() == k_sys_work_q.thread_id) {
143 		atomic_inc(&system_ctr);
144 	} else if (k_current_get() == cooplo_queue.thread_id) {
145 		atomic_inc(&cooplo_ctr);
146 	} else if (k_current_get() == preempt_queue.thread_id) {
147 		atomic_inc(&preempt_ctr);
148 	}
149 	if (atomic_dec(&resubmits_left) > 0) {
150 		(void)k_work_submit_to_queue(NULL, work);
151 	} else {
152 		k_sem_give(&sync_sem);
153 	}
154 }
155 
handler_release(void)156 static inline void handler_release(void)
157 {
158 	k_sem_give(&rel_sem);
159 }
160 
async_release_cb(struct k_timer * timer)161 static void async_release_cb(struct k_timer *timer)
162 {
163 	handler_release();
164 }
165 
166 static K_TIMER_DEFINE(async_releaser, async_release_cb, NULL);
167 
async_release(void)168 static inline void async_release(void)
169 {
170 	k_timer_start(&async_releaser, K_TICKS(1), K_NO_WAIT);
171 }
172 
rel_handler(struct k_work * work)173 static void rel_handler(struct k_work *work)
174 {
175 	(void)k_sem_take(&rel_sem, K_FOREVER);
176 	counter_handler(work);
177 }
178 
delay_handler(struct k_work * work)179 static void delay_handler(struct k_work *work)
180 {
181 	k_sleep(K_MSEC(DELAY_MS));
182 	counter_handler(work);
183 }
184 
185 /* Check that standard initializations result in expected content. */
test_work_init(void)186 static void test_work_init(void)
187 {
188 	static K_WORK_DEFINE(fnstat, counter_handler);
189 
190 	static struct k_work stack;
191 
192 	k_work_init(&stack, counter_handler);
193 	zassert_mem_equal(&stack, &fnstat, sizeof(stack),
194 			  NULL);
195 }
196 
test_delayable_init(void)197 static void test_delayable_init(void)
198 {
199 	static K_WORK_DELAYABLE_DEFINE(fnstat, counter_handler);
200 
201 	static struct k_work_delayable stack;
202 
203 	k_work_init_delayable(&stack, counter_handler);
204 	zassert_mem_equal(&stack, &fnstat, sizeof(stack),
205 			  NULL);
206 }
207 
208 /* Check that submission to an unstarted queue is diagnosed. */
ZTEST(work,test_unstarted)209 ZTEST(work, test_unstarted)
210 {
211 	int rc;
212 
213 	k_work_init(&common_work, counter_handler);
214 	zassert_equal(k_work_busy_get(&common_work), 0);
215 
216 	rc = k_work_submit_to_queue(&not_start_queue, &common_work);
217 	zassert_equal(rc, -ENODEV);
218 }
219 
cooplo_main(void * workq_ptr,void * p2,void * p3)220 static void cooplo_main(void *workq_ptr, void *p2, void *p3)
221 {
222 	ARG_UNUSED(p2);
223 	ARG_UNUSED(p3);
224 
225 	struct k_work_q *queue = (struct k_work_q *)workq_ptr;
226 
227 	struct k_work_queue_config cfg = {
228 		.name = "wq.cooplo",
229 		.no_yield = true,
230 	};
231 
232 	k_work_queue_run(queue, &cfg);
233 }
234 
test_queue_start(void)235 static void test_queue_start(void)
236 {
237 	struct k_work_queue_config cfg = {
238 		.name = "wq.preempt",
239 	};
240 	k_work_queue_init(&preempt_queue);
241 	zassert_equal(preempt_queue.flags, 0);
242 	k_work_queue_start(&preempt_queue, preempt_stack, STACK_SIZE,
243 			    PREEMPT_PRIORITY, &cfg);
244 	zassert_equal(preempt_queue.flags, K_WORK_QUEUE_STARTED);
245 
246 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
247 		const char *tn = k_thread_name_get(&preempt_queue.thread);
248 
249 		zassert_true(tn != cfg.name);
250 		zassert_true(tn != NULL);
251 		zassert_str_equal(tn, cfg.name);
252 	}
253 
254 	cfg.name = NULL;
255 	zassert_equal(invalid_test_queue.flags, 0);
256 	k_work_queue_start(&invalid_test_queue, invalid_test_stack, STACK_SIZE,
257 			    PREEMPT_PRIORITY, &cfg);
258 	zassert_equal(invalid_test_queue.flags, K_WORK_QUEUE_STARTED);
259 
260 	if (IS_ENABLED(CONFIG_THREAD_NAME)) {
261 		const char *tn = k_thread_name_get(&invalid_test_queue.thread);
262 
263 		zassert_true(tn != cfg.name);
264 		zassert_true(tn != NULL);
265 		zassert_str_equal(tn, "");
266 	}
267 
268 	cfg.name = "wq.coophi";
269 	cfg.no_yield = true;
270 	k_work_queue_start(&coophi_queue, coophi_stack, STACK_SIZE,
271 			    COOPHI_PRIORITY, &cfg);
272 	zassert_equal(coophi_queue.flags,
273 		      K_WORK_QUEUE_STARTED | K_WORK_QUEUE_NO_YIELD, NULL);
274 
275 	(void)k_thread_create(&cooplo_thread, cooplo_stack, STACK_SIZE, cooplo_main, &cooplo_queue,
276 			      NULL, NULL, COOPLO_PRIORITY, 0, K_FOREVER);
277 
278 	k_thread_start(&cooplo_thread);
279 
280 	/* Be sure the cooplo_thread has a chance to start running */
281 	k_msleep(1);
282 
283 	zassert_equal(cooplo_queue.flags,
284 		      K_WORK_QUEUE_STARTED | K_WORK_QUEUE_NO_YIELD, NULL);
285 }
286 
287 /* Check validation of submission without a destination queue. */
ZTEST(work,test_null_queue)288 ZTEST(work, test_null_queue)
289 {
290 	int rc;
291 
292 	k_work_init(&common_work, counter_handler);
293 	zassert_equal(k_work_busy_get(&common_work), 0);
294 
295 	rc = k_work_submit_to_queue(NULL, &common_work);
296 	zassert_equal(rc, -EINVAL);
297 }
298 
299 /* Basic single-CPU check submitting with a non-blocking handler. */
ZTEST(work_1cpu,test_1cpu_simple_queue)300 ZTEST(work_1cpu, test_1cpu_simple_queue)
301 {
302 	int rc;
303 
304 	/* Reset state and use the non-blocking handler */
305 	reset_counters();
306 	k_work_init(&common_work, counter_handler);
307 	zassert_equal(k_work_busy_get(&common_work), 0);
308 	zassert_equal(k_work_is_pending(&common_work), false);
309 
310 	/* Submit to the cooperative queue */
311 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
312 	zassert_equal(rc, 1);
313 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
314 	zassert_equal(k_work_is_pending(&common_work), true);
315 
316 	/* Shouldn't have been started since test thread is
317 	 * cooperative.
318 	 */
319 	zassert_equal(coophi_counter(), 0);
320 
321 	/* Let it run, then check it finished. */
322 	k_sleep(K_TICKS(1));
323 	zassert_equal(coophi_counter(), 1);
324 	zassert_equal(k_work_busy_get(&common_work), 0);
325 
326 	/* Flush the sync state from completion */
327 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
328 	zassert_equal(rc, 0);
329 }
330 
331 /* Basic SMP check submitting with a non-blocking handler. */
ZTEST(work,test_smp_simple_queue)332 ZTEST(work, test_smp_simple_queue)
333 {
334 	if (!IS_ENABLED(CONFIG_SMP)) {
335 		ztest_test_skip();
336 		return;
337 	}
338 
339 	int rc;
340 
341 	/* Reset state and use the non-blocking handler */
342 	reset_counters();
343 	k_work_init(&common_work, counter_handler);
344 	zassert_equal(k_work_busy_get(&common_work), 0);
345 	zassert_equal(k_work_is_pending(&common_work), false);
346 
347 	/* Submit to the cooperative queue */
348 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
349 	zassert_equal(rc, 1);
350 
351 	/* It should run and finish without this thread yielding. */
352 	int64_t ts0 = k_uptime_ticks();
353 	uint32_t delay;
354 
355 	do {
356 		delay = k_ticks_to_ms_floor32(k_uptime_ticks() - ts0);
357 	} while (k_work_is_pending(&common_work) && (delay < DELAY_MS));
358 
359 	zassert_equal(k_work_busy_get(&common_work), 0);
360 	zassert_equal(coophi_counter(), 1);
361 
362 	/* Flush the sync state from completion */
363 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
364 	zassert_equal(rc, 0);
365 }
366 
367 /* Basic single-CPU check submitting with a blocking handler */
ZTEST(work_1cpu,test_1cpu_sync_queue)368 ZTEST(work_1cpu, test_1cpu_sync_queue)
369 {
370 	int rc;
371 
372 	/* Reset state and use the blocking handler */
373 	reset_counters();
374 	k_work_init(&common_work, rel_handler);
375 	zassert_equal(k_work_busy_get(&common_work), 0);
376 
377 	/* Submit to the cooperative queue */
378 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
379 	zassert_equal(rc, 1);
380 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
381 
382 	/* Shouldn't have been started since test thread is
383 	 * cooperative.
384 	 */
385 	zassert_equal(coophi_counter(), 0);
386 
387 	/* Let it run, then check it didn't finish. */
388 	k_sleep(K_TICKS(1));
389 	zassert_equal(coophi_counter(), 0);
390 	zassert_equal(k_work_busy_get(&common_work), K_WORK_RUNNING);
391 
392 	/* Make it ready so it can finish when this thread yields. */
393 	handler_release();
394 	zassert_equal(coophi_counter(), 0);
395 
396 	/* Wait for then verify finish */
397 	rc = k_sem_take(&sync_sem, K_FOREVER);
398 	zassert_equal(rc, 0);
399 	zassert_equal(coophi_counter(), 1);
400 }
401 
402 /* Verify that if a work item is submitted while it is being run by a
403  * queue thread it gets submitted to the queue it's running on, to
404  * prevent reentrant invocation, at least on a single CPU.
405  */
ZTEST(work_1cpu,test_1cpu_reentrant_queue)406 ZTEST(work_1cpu, test_1cpu_reentrant_queue)
407 {
408 	int rc;
409 
410 	/* Reset state and use the blocking handler */
411 	reset_counters();
412 	k_work_init(&common_work, rel_handler);
413 
414 	/* Submit to the cooperative queue. */
415 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
416 	zassert_equal(rc, 1);
417 	zassert_equal(coophi_counter(), 0);
418 
419 	/* Release it so it's running and can be rescheduled. */
420 	k_sleep(K_TICKS(1));
421 	zassert_equal(coophi_counter(), 0);
422 
423 	/* Resubmit to a different queue. */
424 	rc = k_work_submit_to_queue(&preempt_queue, &common_work);
425 	zassert_equal(rc, 2);
426 
427 	/* Release the first submission. */
428 	handler_release();
429 	rc = k_sem_take(&sync_sem, K_FOREVER);
430 	zassert_equal(rc, 0);
431 	zassert_equal(coophi_counter(), 1);
432 
433 	/* Confirm the second submission was redirected to the running
434 	 * queue to avoid re-entrancy problems.
435 	 */
436 	handler_release();
437 	rc = k_sem_take(&sync_sem, K_FOREVER);
438 	zassert_equal(rc, 0);
439 	zassert_equal(coophi_counter(), 2);
440 }
441 
442 /* Single CPU submit two work items and wait for flush in order
443  * before they get started.
444  */
ZTEST(work_1cpu,test_1cpu_queued_flush)445 ZTEST(work_1cpu, test_1cpu_queued_flush)
446 {
447 	int rc;
448 
449 	/* Reset state and use the delaying handler */
450 	reset_counters();
451 	k_work_init(&common_work, delay_handler);
452 	k_work_init(&common_work1, delay_handler);
453 
454 	/* Submit to the cooperative queue. */
455 	rc = k_work_submit_to_queue(&coophi_queue, &common_work1);
456 	zassert_equal(rc, 1);
457 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
458 	zassert_equal(rc, 1);
459 	zassert_equal(coophi_counter(), 0);
460 
461 	/* Confirm that it's still in the queue, then wait for completion.
462 	 * This should wait.
463 	 */
464 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
465 	zassert_equal(k_work_busy_get(&common_work1), K_WORK_QUEUED);
466 	zassert_true(k_work_flush(&common_work, &work_sync));
467 	zassert_false(k_work_flush(&common_work1, &work_sync));
468 
469 	/* Verify completion. */
470 	zassert_equal(coophi_counter(), 2);
471 	zassert_true(!k_work_is_pending(&common_work));
472 	zassert_true(!k_work_is_pending(&common_work1));
473 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
474 	zassert_equal(rc, 0);
475 
476 	/* After completion flush should be a no-op */
477 	zassert_false(k_work_flush(&common_work, &work_sync));
478 	zassert_false(k_work_flush(&common_work1, &work_sync));
479 }
480 
481 /* Single CPU submit a work item and wait for flush after it's started.
482  */
ZTEST(work_1cpu,test_1cpu_running_flush)483 ZTEST(work_1cpu, test_1cpu_running_flush)
484 {
485 	int rc;
486 
487 	/* Reset state and use the delaying handler */
488 	reset_counters();
489 	k_work_init(&common_work, delay_handler);
490 
491 	/* Submit to the cooperative queue. */
492 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
493 	zassert_equal(rc, 1);
494 	zassert_equal(coophi_counter(), 0);
495 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
496 
497 	/* Release it so it's running. */
498 	k_sleep(K_TICKS(1));
499 	zassert_equal(k_work_busy_get(&common_work), K_WORK_RUNNING);
500 	zassert_equal(coophi_counter(), 0);
501 
502 	/* Wait for completion.  This should be released by the delay
503 	 * handler.
504 	 */
505 	zassert_true(k_work_flush(&common_work, &work_sync));
506 
507 	/* Verify completion. */
508 	zassert_equal(coophi_counter(), 1);
509 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
510 	zassert_equal(rc, 0);
511 }
512 
513 /* Single CPU schedule a work item and wait for flush. */
ZTEST(work_1cpu,test_1cpu_delayed_flush)514 ZTEST(work_1cpu, test_1cpu_delayed_flush)
515 {
516 	int rc;
517 	uint32_t flush_ms;
518 	uint32_t wait_ms;
519 
520 	/* Reset state and use non-blocking handler */
521 	reset_counters();
522 	k_work_init_delayable(&dwork, counter_handler);
523 
524 	/* Unscheduled completes immediately. */
525 	zassert_false(k_work_flush_delayable(&dwork, &work_sync));
526 
527 	/* Submit to the cooperative queue. */
528 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
529 	zassert_equal(rc, 1);
530 	zassert_equal(coophi_counter(), 0);
531 
532 	/* Align to tick then flush. */
533 	k_sleep(K_TICKS(1));
534 	flush_ms = k_uptime_get_32();
535 	zassert_true(k_work_flush_delayable(&dwork, &work_sync));
536 	wait_ms = last_handle_ms - flush_ms;
537 	zassert_true(wait_ms <= 1, "waited %u", wait_ms);
538 
539 	/* Verify completion. */
540 	zassert_equal(coophi_counter(), 1);
541 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
542 	zassert_equal(rc, 0);
543 }
544 
545 /* Single CPU cancel before work item is unqueued should complete
546  * immediately.
547  */
ZTEST(work_1cpu,test_1cpu_queued_cancel)548 ZTEST(work_1cpu, test_1cpu_queued_cancel)
549 {
550 	int rc;
551 
552 	/* Reset state and use the blocking handler */
553 	reset_counters();
554 	k_work_init(&common_work, rel_handler);
555 
556 	/* Submit to the cooperative queue. */
557 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
558 	zassert_equal(rc, 1);
559 	zassert_equal(coophi_counter(), 0);
560 
561 	/* Cancellation should complete immediately. */
562 	zassert_equal(k_work_cancel(&common_work), 0);
563 
564 	/* Shouldn't have run. */
565 	zassert_equal(coophi_counter(), 0);
566 }
567 
568 /* Single CPU cancel before work item is unqueued should not wait. */
ZTEST(work_1cpu,test_1cpu_queued_cancel_sync)569 ZTEST(work_1cpu, test_1cpu_queued_cancel_sync)
570 {
571 	int rc;
572 
573 	/* Reset state and use the blocking handler */
574 	reset_counters();
575 	k_work_init(&common_work, rel_handler);
576 
577 	/* Cancel an unqueued work item should not affect the work
578 	 * and return false.
579 	 */
580 	zassert_false(k_work_cancel_sync(&common_work, &work_sync));
581 
582 	/* Submit to the cooperative queue. */
583 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
584 	zassert_equal(rc, 1);
585 	zassert_equal(coophi_counter(), 0);
586 
587 	/* Cancellation should complete immediately, indicating that
588 	 * work was pending.
589 	 */
590 	zassert_true(k_work_cancel_sync(&common_work, &work_sync));
591 
592 	/* Shouldn't have run. */
593 	zassert_equal(coophi_counter(), 0);
594 }
595 
596 /* Single CPU cancel before scheduled work item is queued should
597  * complete immediately.
598  */
ZTEST(work_1cpu,test_1cpu_delayed_cancel)599 ZTEST(work_1cpu, test_1cpu_delayed_cancel)
600 {
601 	int rc;
602 
603 	/* Reset state and use the blocking handler */
604 	reset_counters();
605 	k_work_init_delayable(&dwork, rel_handler);
606 
607 	/* Submit to the cooperative queue. */
608 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
609 	zassert_equal(rc, 1);
610 	zassert_equal(coophi_counter(), 0);
611 
612 	/* Cancellation should complete immediately. */
613 	zassert_equal(k_work_cancel_delayable(&dwork), 0);
614 
615 	/* Shouldn't have run. */
616 	zassert_equal(coophi_counter(), 0);
617 }
618 
619 
620 /* Single CPU cancel before scheduled work item is queued should not wait. */
ZTEST(work_1cpu,test_1cpu_delayed_cancel_sync)621 ZTEST(work_1cpu, test_1cpu_delayed_cancel_sync)
622 {
623 	int rc;
624 
625 	/* Reset state and use the blocking handler */
626 	reset_counters();
627 	k_work_init_delayable(&dwork, rel_handler);
628 
629 	/* Cancel an unqueued delayable work item should not affect the work
630 	 * and return false.
631 	 */
632 	zassert_false(k_work_cancel_delayable_sync(&dwork, &work_sync));
633 
634 	/* Submit to the cooperative queue. */
635 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
636 	zassert_equal(rc, 1);
637 	zassert_equal(coophi_counter(), 0);
638 
639 	/* Cancellation should complete immediately, indicating that
640 	 * work was pending.
641 	 */
642 	zassert_true(k_work_cancel_delayable_sync(&dwork, &work_sync));
643 
644 	/* Shouldn't have run. */
645 	zassert_equal(coophi_counter(), 0);
646 }
647 
648 /* Single CPU cancel after delayable item starts should wait. */
ZTEST(work_1cpu,test_1cpu_delayed_cancel_sync_wait)649 ZTEST(work_1cpu, test_1cpu_delayed_cancel_sync_wait)
650 {
651 	int rc;
652 
653 	/* Reset state and use the blocking handler */
654 	reset_counters();
655 	k_work_init_delayable(&dwork, rel_handler);
656 
657 	/* Submit to the cooperative queue. */
658 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
659 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_QUEUED);
660 	zassert_equal(coophi_counter(), 0);
661 
662 	/* Get it to running, where it will block. */
663 	k_sleep(K_TICKS(1));
664 	zassert_equal(coophi_counter(), 0);
665 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_RUNNING);
666 
667 	/* Schedule to release, then cancel should delay. */
668 	async_release();
669 	zassert_true(k_work_cancel_delayable_sync(&dwork, &work_sync));
670 
671 	/* Verify completion. */
672 	zassert_equal(coophi_counter(), 1);
673 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
674 	zassert_equal(rc, 0);
675 }
676 
677 /* Infrastructure to capture behavior of work item that's being
678  * cancelled.
679  */
680 struct test_running_cancel_timer {
681 	struct k_timer timer;
682 	struct k_work work;
683 	int submit_rc;
684 	int busy_rc;
685 };
686 
687 static struct test_running_cancel_timer test_running_cancel_ctx;
688 
test_running_cancel_cb(struct k_timer * timer)689 static void test_running_cancel_cb(struct k_timer *timer)
690 {
691 	struct test_running_cancel_timer *ctx =
692 		CONTAINER_OF(timer, struct test_running_cancel_timer, timer);
693 
694 	ctx->busy_rc = k_work_busy_get(&ctx->work);
695 	ctx->submit_rc = k_work_submit_to_queue(&coophi_queue, &ctx->work);
696 	handler_release();
697 }
698 
699 /* Single CPU test cancellation after work starts. */
ZTEST(work_1cpu,test_1cpu_running_cancel)700 ZTEST(work_1cpu, test_1cpu_running_cancel)
701 {
702 	struct test_running_cancel_timer *ctx = &test_running_cancel_ctx;
703 	struct k_work *wp = &ctx->work;
704 	static const uint32_t ms_timeout = 10;
705 	int rc;
706 
707 	/* Reset state and use the blocking handler */
708 	reset_counters();
709 	k_work_init(wp, rel_handler);
710 
711 	/* Submit to the cooperative queue. */
712 	rc = k_work_submit_to_queue(&coophi_queue, wp);
713 	zassert_equal(rc, 1);
714 	zassert_equal(coophi_counter(), 0);
715 
716 	/* Release it so it's running. */
717 	k_sleep(K_TICKS(1));
718 	zassert_equal(coophi_counter(), 0);
719 
720 	/* Schedule the async process to capture state and release work. */
721 	ctx->submit_rc = INT_MAX;
722 	ctx->busy_rc = INT_MAX;
723 	k_timer_init(&ctx->timer, test_running_cancel_cb, NULL);
724 	k_timer_start(&ctx->timer, K_MSEC(ms_timeout), K_NO_WAIT);
725 
726 	/* Cancellation should not complete. */
727 	zassert_equal(k_work_cancel(wp), K_WORK_RUNNING | K_WORK_CANCELING,
728 		      NULL);
729 
730 	/* Handler should not have run. */
731 	zassert_equal(coophi_counter(), 0);
732 
733 	/* Busy wait until timer expires. Thread context is blocked so cancelling
734 	 * of work won't be completed.
735 	 */
736 	k_busy_wait(1000 * (ms_timeout + 1));
737 
738 	zassert_equal(k_timer_status_get(&ctx->timer), 1);
739 
740 	/* Wait for cancellation to complete. */
741 	zassert_true(k_work_cancel_sync(wp, &work_sync));
742 
743 	/* Verify completion */
744 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
745 	zassert_equal(rc, 0);
746 
747 	/* Handler should have detected running and canceling. */
748 	zassert_equal(ctx->busy_rc, K_WORK_RUNNING | K_WORK_CANCELING);
749 
750 	/* Attempt to submit while cancelling should have been
751 	 * rejected.
752 	 */
753 	zassert_equal(ctx->submit_rc, -EBUSY);
754 
755 	/* Post-cancellation should have no flags. */
756 	rc = k_work_busy_get(wp);
757 	zassert_equal(rc, 0, "bad: %d", rc);
758 }
759 
760 /* Single CPU test wait-for-cancellation after the work item has
761  * started running.
762  */
ZTEST(work_1cpu,test_1cpu_running_cancel_sync)763 ZTEST(work_1cpu, test_1cpu_running_cancel_sync)
764 {
765 	struct test_running_cancel_timer *ctx = &test_running_cancel_ctx;
766 	struct k_work *wp = &ctx->work;
767 	static const uint32_t ms_timeout = 10;
768 	int rc;
769 
770 	/* Reset state and use the blocking handler */
771 	reset_counters();
772 	k_work_init(wp, rel_handler);
773 
774 	/* Submit to the cooperative queue. */
775 	rc = k_work_submit_to_queue(&coophi_queue, wp);
776 	zassert_equal(rc, 1);
777 	zassert_equal(coophi_counter(), 0);
778 
779 	/* Release it so it's running. */
780 	k_sleep(K_TICKS(1));
781 	zassert_equal(coophi_counter(), 0);
782 
783 	/* Schedule the async process to capture state and release work. */
784 	ctx->submit_rc = INT_MAX;
785 	ctx->busy_rc = INT_MAX;
786 	k_timer_init(&ctx->timer, test_running_cancel_cb, NULL);
787 	k_timer_start(&ctx->timer, K_MSEC(ms_timeout), K_NO_WAIT);
788 
789 	/* Cancellation should wait. */
790 	zassert_true(k_work_cancel_sync(wp, &work_sync));
791 
792 	/* Handler should have run. */
793 	zassert_equal(coophi_counter(), 1);
794 
795 	/* Busy wait until timer expires. Thread context is blocked so cancelling
796 	 * of work won't be completed.
797 	 */
798 	k_busy_wait(1000 * (ms_timeout + 1));
799 
800 	zassert_equal(k_timer_status_get(&ctx->timer), 1);
801 
802 	/* Verify completion */
803 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
804 	zassert_equal(rc, 0);
805 
806 	/* Handler should have detected running and canceling. */
807 	zassert_equal(ctx->busy_rc, K_WORK_RUNNING | K_WORK_CANCELING,
808 		      NULL);
809 
810 	/* Attempt to submit while cancelling should have been
811 	 * rejected.
812 	 */
813 	zassert_equal(ctx->submit_rc, -EBUSY);
814 
815 	/* Post-cancellation should have no flags. */
816 	rc = k_work_busy_get(wp);
817 	zassert_equal(rc, 0, "bad: %d", rc);
818 }
819 
820 /* SMP cancel after work item is started should succeed but require
821  * wait.
822  */
ZTEST(work,test_smp_running_cancel)823 ZTEST(work, test_smp_running_cancel)
824 {
825 	int rc;
826 
827 	if (!IS_ENABLED(CONFIG_SMP)) {
828 		ztest_test_skip();
829 		return;
830 	}
831 
832 	/* Reset state and use the delaying handler */
833 	reset_counters();
834 	k_work_init(&common_work, delay_handler);
835 
836 	/* Submit to the cooperative queue. */
837 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
838 	zassert_equal(rc, 1);
839 
840 	/* It should advance to running without this thread yielding. */
841 	int64_t ts0 = k_uptime_ticks();
842 	uint32_t delay;
843 
844 	do {
845 		delay = k_ticks_to_ms_floor32(k_uptime_ticks() - ts0);
846 	} while ((k_work_busy_get(&common_work) != K_WORK_RUNNING)
847 		 && (delay < DELAY_MS));
848 
849 	/* Cancellation should not succeed immediately because the
850 	 * work is running.
851 	 */
852 	rc = k_work_cancel(&common_work);
853 	zassert_equal(rc, K_WORK_RUNNING | K_WORK_CANCELING, "rc %x", rc);
854 
855 	/* Sync should wait. */
856 	zassert_equal(k_work_cancel_sync(&common_work, &work_sync), true);
857 
858 	/* Should have completed. */
859 	zassert_equal(coophi_counter(), 1);
860 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
861 	zassert_equal(rc, 0);
862 }
863 
864 /* Drain with no active workers completes immediately. */
ZTEST(work,test_drain_empty)865 ZTEST(work, test_drain_empty)
866 {
867 	int rc;
868 
869 	rc = k_work_queue_drain(&coophi_queue, false);
870 	zassert_equal(rc, 0);
871 }
872 
873 struct test_drain_wait_timer {
874 	struct k_timer timer;
875 	struct k_work work;
876 	int submit_rc;
877 };
878 
879 static struct test_drain_wait_timer test_drain_wait_ctx;
880 
test_drain_wait_cb(struct k_timer * timer)881 static void test_drain_wait_cb(struct k_timer *timer)
882 {
883 	struct test_drain_wait_timer *ctx =
884 		CONTAINER_OF(timer, struct test_drain_wait_timer, timer);
885 
886 	ctx->submit_rc = k_work_submit_to_queue(&coophi_queue, &ctx->work);
887 }
888 
889 /* Single CPU submit an item and wait for it to drain. */
ZTEST(work_1cpu,test_1cpu_drain_wait)890 ZTEST(work_1cpu, test_1cpu_drain_wait)
891 {
892 	struct test_drain_wait_timer *ctx = &test_drain_wait_ctx;
893 	struct k_work *wp = &ctx->work;
894 	int rc;
895 
896 	/* Reset state, allow one re-submission, and use the delaying
897 	 * handler.
898 	 */
899 	reset_counters();
900 	atomic_set(&resubmits_left, 1);
901 	k_work_init(wp, delay_handler);
902 
903 	/* Submit to the cooperative queue. */
904 	rc = k_work_submit_to_queue(&coophi_queue, wp);
905 	zassert_equal(rc, 1);
906 	zassert_equal(coophi_counter(), 0);
907 
908 	/* Schedule the async process to capture submission state
909 	 * while draining.
910 	 */
911 	ctx->submit_rc = INT_MAX;
912 	k_timer_init(&ctx->timer, test_drain_wait_cb, NULL);
913 	k_timer_start(&ctx->timer, K_MSEC(10), K_NO_WAIT);
914 
915 	/* Wait to drain */
916 	rc = k_work_queue_drain(&coophi_queue, false);
917 	zassert_equal(rc, 1);
918 
919 	/* Wait until timer expires. */
920 	(void)k_timer_status_sync(&ctx->timer);
921 
922 	/* Verify completion */
923 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
924 	zassert_equal(rc, 0);
925 
926 	/* Confirm that chained submission worked, and non-chained
927 	 * submission failed.
928 	 */
929 	zassert_equal(coophi_counter(), 2);
930 	zassert_equal(ctx->submit_rc, -EBUSY);
931 }
932 
933 /* Single CPU submit item, drain with plug, test, then unplug. */
ZTEST(work_1cpu,test_1cpu_plugged_drain)934 ZTEST(work_1cpu, test_1cpu_plugged_drain)
935 {
936 	int rc;
937 
938 	/* Reset state and use the delaying handler. */
939 	reset_counters();
940 	k_work_init(&common_work, delay_handler);
941 
942 	/* Submit to the cooperative queue */
943 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
944 	zassert_equal(rc, 1);
945 
946 	/* Wait to drain, and plug. */
947 	rc = k_work_queue_drain(&coophi_queue, true);
948 	zassert_equal(rc, 1);
949 
950 	/* Verify completion */
951 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
952 	zassert_equal(rc, 0);
953 	zassert_equal(coophi_counter(), 1);
954 
955 	/* Queue should be plugged */
956 	zassert_equal(coophi_queue.flags,
957 		      K_WORK_QUEUE_STARTED
958 		      | K_WORK_QUEUE_PLUGGED
959 		      | K_WORK_QUEUE_NO_YIELD,
960 		      NULL);
961 
962 	/* Switch to the non-blocking handler. */
963 	k_work_init(&common_work, counter_handler);
964 
965 	/* Resubmission should fail because queue is plugged */
966 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
967 	zassert_equal(rc, -EBUSY);
968 
969 	/* Unplug the queue */
970 	rc = k_work_queue_unplug(&coophi_queue);
971 	zassert_equal(rc, 0);
972 
973 	/* Unplug the unplugged queue should not affect the queue */
974 	rc = k_work_queue_unplug(&coophi_queue);
975 	zassert_equal(rc, -EALREADY);
976 	zassert_equal(coophi_queue.flags,
977 		      K_WORK_QUEUE_STARTED | K_WORK_QUEUE_NO_YIELD,
978 		      NULL);
979 
980 	/* Resubmission should succeed and complete */
981 	rc = k_work_submit_to_queue(&coophi_queue, &common_work);
982 	zassert_equal(rc, 1);
983 
984 	/* Flush the sync state and verify completion */
985 	rc = k_sem_take(&sync_sem, K_FOREVER);
986 	zassert_equal(rc, 0);
987 	zassert_equal(coophi_counter(), 2);
988 }
989 
990 /* Single CPU test delayed submission */
ZTEST(work_1cpu,test_1cpu_basic_schedule)991 ZTEST(work_1cpu, test_1cpu_basic_schedule)
992 {
993 	int rc;
994 	uint32_t sched_ms;
995 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
996 				+ k_ms_to_ticks_ceil32(DELAY_MS));
997 	uint32_t elapsed_ms;
998 	struct k_work *wp = &dwork.work; /* whitebox testing */
999 
1000 	/* Reset state and use non-blocking handler */
1001 	reset_counters();
1002 	k_work_init_delayable(&dwork, counter_handler);
1003 
1004 	/* Verify that work is idle and marked delayable. */
1005 	zassert_equal(k_work_busy_get(wp), 0);
1006 	zassert_equal(wp->flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1007 		       NULL);
1008 
1009 	/* Align to tick, then schedule after normal delay. */
1010 	k_sleep(K_TICKS(1));
1011 	sched_ms = k_uptime_get_32();
1012 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_MSEC(DELAY_MS));
1013 	zassert_equal(rc, 1);
1014 	rc = k_work_busy_get(wp);
1015 	zassert_equal(rc, K_WORK_DELAYED);
1016 	zassert_equal(k_work_delayable_busy_get(&dwork), rc);
1017 	zassert_equal(k_work_delayable_is_pending(&dwork), true);
1018 
1019 	/* Scheduling again does nothing. */
1020 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1021 	zassert_equal(rc, 0);
1022 
1023 	/* Wait for completion */
1024 	rc = k_sem_take(&sync_sem, K_FOREVER);
1025 	zassert_equal(rc, 0);
1026 
1027 	/* Make sure it ran and is now idle */
1028 	zassert_equal(coophi_counter(), 1);
1029 	zassert_equal(k_work_busy_get(wp), 0);
1030 
1031 	/* Check that the delay is within the expected range. */
1032 	elapsed_ms = last_handle_ms - sched_ms;
1033 	zassert_true(elapsed_ms >= DELAY_MS,
1034 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1035 	zassert_true(elapsed_ms <= max_ms,
1036 		     "long %u > %u\n", elapsed_ms, max_ms);
1037 }
1038 
1039 struct state_1cpu_basic_schedule_running {
1040 	struct k_work_delayable dwork;
1041 	int schedule_res;
1042 };
1043 
handle_1cpu_basic_schedule_running(struct k_work * work)1044 static void handle_1cpu_basic_schedule_running(struct k_work *work)
1045 {
1046 	struct k_work_delayable *one_dwork = k_work_delayable_from_work(work);
1047 	struct state_1cpu_basic_schedule_running *state
1048 		= CONTAINER_OF(one_dwork, struct state_1cpu_basic_schedule_running,
1049 			       dwork);
1050 
1051 	/* Co-opt the resubmits so we can test the schedule API
1052 	 * explicitly.
1053 	 */
1054 	if (atomic_dec(&resubmits_left) > 0) {
1055 		/* Schedule again on current queue */
1056 		state->schedule_res = k_work_schedule_for_queue(one_dwork->work.queue, one_dwork,
1057 								K_MSEC(DELAY_MS));
1058 	} else {
1059 		/* Flag that it didn't schedule */
1060 		state->schedule_res = -EALREADY;
1061 	}
1062 
1063 	counter_handler(work);
1064 }
1065 
1066 /* Single CPU test that schedules when running */
ZTEST(work_1cpu,test_1cpu_basic_schedule_running)1067 ZTEST(work_1cpu, test_1cpu_basic_schedule_running)
1068 {
1069 	int rc;
1070 	static struct state_1cpu_basic_schedule_running state = {
1071 		.schedule_res = -1,
1072 	};
1073 
1074 	/* Reset state and set for one resubmit.  Use a test-specific
1075 	 * handler.
1076 	 */
1077 	reset_counters();
1078 	atomic_set(&resubmits_left, 1);
1079 	k_work_init_delayable(&state.dwork, handle_1cpu_basic_schedule_running);
1080 
1081 	zassert_equal(state.schedule_res, -1);
1082 
1083 	rc = k_work_schedule_for_queue(&coophi_queue, &state.dwork,
1084 				       K_MSEC(DELAY_MS));
1085 	zassert_equal(rc, 1);
1086 
1087 	zassert_equal(coop_counter(&coophi_queue), 0);
1088 
1089 	/* Wait for completion */
1090 	rc = k_sem_take(&sync_sem, K_FOREVER);
1091 	zassert_equal(rc, 0);
1092 	zassert_equal(state.schedule_res, 1);
1093 	zassert_equal(coop_counter(&coophi_queue), 1);
1094 
1095 	/* Wait for completion */
1096 	rc = k_sem_take(&sync_sem, K_FOREVER);
1097 	zassert_equal(rc, 0);
1098 	zassert_equal(state.schedule_res, -EALREADY);
1099 	zassert_equal(coop_counter(&coophi_queue), 2);
1100 }
1101 
1102 /* Single CPU test schedule without delay is queued immediately. */
ZTEST(work_1cpu,test_1cpu_immed_schedule)1103 ZTEST(work_1cpu, test_1cpu_immed_schedule)
1104 {
1105 	int rc;
1106 	struct k_work *wp = &dwork.work; /* whitebox testing */
1107 
1108 	/* Reset state and use the non-blocking handler */
1109 	reset_counters();
1110 	k_work_init_delayable(&dwork, counter_handler);
1111 	zassert_equal(k_work_busy_get(wp), 0);
1112 
1113 	/* Submit to the cooperative queue */
1114 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1115 	zassert_equal(rc, 1);
1116 	rc = k_work_busy_get(wp);
1117 	zassert_equal(rc, K_WORK_QUEUED);
1118 	zassert_equal(k_work_delayable_busy_get(&dwork), rc);
1119 	zassert_equal(k_work_delayable_is_pending(&dwork), true);
1120 
1121 	/* Scheduling again does nothing. */
1122 	rc = k_work_schedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1123 	zassert_equal(rc, 0);
1124 
1125 	/* Shouldn't have been started since test thread is
1126 	 * cooperative.
1127 	 */
1128 	zassert_equal(coophi_counter(), 0);
1129 
1130 	/* Let it run, then check it didn't finish. */
1131 	k_sleep(K_TICKS(1));
1132 	zassert_equal(coophi_counter(), 1);
1133 	zassert_equal(k_work_busy_get(wp), 0);
1134 
1135 	/* Flush the sync state from completion */
1136 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
1137 	zassert_equal(rc, 0);
1138 }
1139 
1140 /* Single CPU test that delayed work can be rescheduled. */
ZTEST(work_1cpu,test_1cpu_basic_reschedule)1141 ZTEST(work_1cpu, test_1cpu_basic_reschedule)
1142 {
1143 	int rc;
1144 	uint32_t sched_ms;
1145 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
1146 				+ k_ms_to_ticks_ceil32(DELAY_MS));
1147 	uint32_t elapsed_ms;
1148 	struct k_work *wp = &dwork.work; /* whitebox testing */
1149 
1150 	/* Reset state and use non-blocking handler */
1151 	reset_counters();
1152 	k_work_init_delayable(&dwork, counter_handler);
1153 
1154 	/* Verify that work is idle and marked delayable. */
1155 	zassert_equal(k_work_busy_get(wp), 0);
1156 	zassert_equal(wp->flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1157 		       NULL);
1158 
1159 	/* Schedule to the preempt queue after twice the standard
1160 	 * delay.
1161 	 */
1162 	rc = k_work_reschedule_for_queue(&preempt_queue, &dwork,
1163 					  K_MSEC(2U * DELAY_MS));
1164 	zassert_equal(rc, 1);
1165 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED);
1166 
1167 	/* Align to tick then reschedule on the cooperative queue for
1168 	 * the standard delay.
1169 	 */
1170 	k_sleep(K_TICKS(1));
1171 	sched_ms = k_uptime_get_32();
1172 	rc = k_work_reschedule_for_queue(&coophi_queue, &dwork,
1173 					  K_MSEC(DELAY_MS));
1174 	zassert_equal(rc, 1);
1175 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED);
1176 
1177 	/* Wait for completion */
1178 	rc = k_sem_take(&sync_sem, K_FOREVER);
1179 	zassert_equal(rc, 0);
1180 
1181 	/* Make sure it ran on the coop queue and is now idle */
1182 	zassert_equal(coophi_counter(), 1);
1183 	zassert_equal(k_work_busy_get(wp), 0);
1184 
1185 	/* Check that the delay is within the expected range. */
1186 	elapsed_ms = last_handle_ms - sched_ms;
1187 	zassert_true(elapsed_ms >= DELAY_MS,
1188 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1189 	zassert_true(elapsed_ms <= max_ms,
1190 		     "long %u > %u\n", elapsed_ms, max_ms);
1191 }
1192 
1193 /* Single CPU test that delayed work can be immediately queued by
1194  * reschedule API.
1195  */
ZTEST(work_1cpu,test_1cpu_immed_reschedule)1196 ZTEST(work_1cpu, test_1cpu_immed_reschedule)
1197 {
1198 	int rc;
1199 	struct k_work *wp = &dwork.work; /* whitebox testing */
1200 
1201 	/* Reset state and use the delay handler */
1202 	reset_counters();
1203 	k_work_init_delayable(&dwork, delay_handler);
1204 	zassert_equal(k_work_busy_get(wp), 0);
1205 
1206 	/* Schedule immediately to the cooperative queue */
1207 	rc = k_work_reschedule_for_queue(&coophi_queue, &dwork, K_NO_WAIT);
1208 	zassert_equal(rc, 1);
1209 	zassert_equal(k_work_busy_get(wp), K_WORK_QUEUED);
1210 
1211 	/* Shouldn't have been started since test thread is
1212 	 * cooperative.
1213 	 */
1214 	zassert_equal(coophi_counter(), 0);
1215 
1216 	/* Let it run, then check it didn't finish. */
1217 	k_sleep(K_TICKS(1));
1218 	zassert_equal(coophi_counter(), 0);
1219 	zassert_equal(k_work_busy_get(wp), K_WORK_RUNNING);
1220 
1221 	/* Schedule immediately to the preemptive queue (will divert
1222 	 * to coop since running).
1223 	 */
1224 	rc = k_work_reschedule_for_queue(&preempt_queue, &dwork, K_NO_WAIT);
1225 	zassert_equal(rc, 2);
1226 	zassert_equal(k_work_busy_get(wp), K_WORK_QUEUED | K_WORK_RUNNING,
1227 		      NULL);
1228 
1229 	/* Schedule after 3x the delay to the preemptive queue
1230 	 * (will not divert since previous submissions will have
1231 	 * completed).
1232 	 */
1233 	rc = k_work_reschedule_for_queue(&preempt_queue, &dwork,
1234 					  K_MSEC(3 * DELAY_MS));
1235 	zassert_equal(rc, 1);
1236 	zassert_equal(k_work_busy_get(wp),
1237 		      K_WORK_DELAYED | K_WORK_QUEUED | K_WORK_RUNNING,
1238 		      NULL);
1239 
1240 	/* Wait for the original no-wait submission (total 1 delay)) */
1241 	rc = k_sem_take(&sync_sem, K_FOREVER);
1242 	zassert_equal(rc, 0);
1243 
1244 	/* Check that coop ran once, and work is still delayed and
1245 	 * also running.
1246 	 */
1247 	zassert_equal(coophi_counter(), 1);
1248 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED | K_WORK_RUNNING,
1249 		      NULL);
1250 
1251 	/* Wait for the queued no-wait submission (total 2 delay) */
1252 	rc = k_sem_take(&sync_sem, K_FOREVER);
1253 	zassert_equal(rc, 0);
1254 
1255 	/* Check that got diverted to coop and ran, and work is still
1256 	 * delayed.
1257 	 */
1258 	zassert_equal(coophi_counter(), 2);
1259 	zassert_equal(preempt_counter(), 0);
1260 	zassert_equal(k_work_busy_get(wp), K_WORK_DELAYED,
1261 		      NULL);
1262 
1263 	/* Wait for the delayed submission (total 3 delay) */
1264 	rc = k_sem_take(&sync_sem, K_FOREVER);
1265 	zassert_equal(rc, 0);
1266 
1267 	/* Check that ran on preempt.  In fact we're here because the
1268 	 * test thread is higher priority, so the work will still be
1269 	 * marked running.
1270 	 */
1271 	zassert_equal(coophi_counter(), 2);
1272 	zassert_equal(preempt_counter(), 1);
1273 	zassert_equal(k_work_busy_get(wp), K_WORK_RUNNING,
1274 		      NULL);
1275 
1276 	/* Wait for preempt to drain */
1277 	rc = k_work_queue_drain(&preempt_queue, false);
1278 	zassert_equal(rc, 1);
1279 }
1280 
1281 /* Test no-yield behavior, returns true if and only if work queue priority is
1282  * higher than test thread priority
1283  */
try_queue_no_yield(struct k_work_q * wq)1284 static bool try_queue_no_yield(struct k_work_q *wq)
1285 {
1286 	int rc;
1287 	bool is_high = (k_thread_priority_get(k_work_queue_thread_get(wq))
1288 			< k_thread_priority_get(k_current_get()));
1289 
1290 	TC_PRINT("Testing no-yield on %s-priority queue\n",
1291 		 is_high ? "high" : "low");
1292 	reset_counters();
1293 
1294 	/* Submit two work items directly to the cooperative queue. */
1295 
1296 	k_work_init(&common_work, counter_handler);
1297 	k_work_init_delayable(&dwork, counter_handler);
1298 
1299 	rc = k_work_submit_to_queue(wq, &common_work);
1300 	zassert_equal(rc, 1);
1301 	rc = k_work_schedule_for_queue(wq, &dwork, K_NO_WAIT);
1302 	zassert_equal(rc, 1);
1303 
1304 	/* Wait for completion */
1305 	zassert_equal(k_work_is_pending(&common_work), true);
1306 	zassert_equal(k_work_delayable_is_pending(&dwork), true);
1307 	rc = k_sem_take(&sync_sem, K_FOREVER);
1308 	zassert_equal(rc, 0);
1309 
1310 	/* Because there was no yield both should have run, and
1311 	 * another yield won't cause anything to happen.
1312 	 */
1313 	zassert_equal(coop_counter(wq), 2);
1314 	zassert_equal(k_work_is_pending(&common_work), false);
1315 	zassert_equal(k_work_delayable_is_pending(&dwork), false);
1316 
1317 	/* The first give unblocked this thread; we need to consume
1318 	 * the give from the second work task.
1319 	 */
1320 	zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), 0);
1321 
1322 	zassert_equal(k_sem_take(&sync_sem, K_NO_WAIT), -EBUSY);
1323 
1324 	return is_high;
1325 }
1326 
1327 /* Verify that no-yield policy works */
ZTEST(work_1cpu,test_1cpu_queue_no_yield)1328 ZTEST(work_1cpu, test_1cpu_queue_no_yield)
1329 {
1330 	/* This test needs two slots available in the sem! */
1331 	k_sem_init(&sync_sem, 0, 2);
1332 	zassert_equal(try_queue_no_yield(&coophi_queue), true);
1333 	zassert_equal(try_queue_no_yield(&cooplo_queue), false);
1334 	k_sem_init(&sync_sem, 0, 1);
1335 }
1336 
1337 /* Basic functionality with the system work queue. */
ZTEST(work_1cpu,test_1cpu_system_queue)1338 ZTEST(work_1cpu, test_1cpu_system_queue)
1339 {
1340 	int rc;
1341 
1342 	/* Reset state and use the non-blocking handler */
1343 	reset_counters();
1344 	k_work_init(&common_work, counter_handler);
1345 	zassert_equal(k_work_busy_get(&common_work), 0);
1346 
1347 	/* Submit to the system queue */
1348 	rc = k_work_submit(&common_work);
1349 	zassert_equal(rc, 1);
1350 	zassert_equal(k_work_busy_get(&common_work), K_WORK_QUEUED);
1351 
1352 	/* Shouldn't have been started since test thread is
1353 	 * cooperative.
1354 	 */
1355 	zassert_equal(system_counter(), 0);
1356 
1357 	/* Let it run, then check it didn't finish. */
1358 	k_sleep(K_TICKS(1));
1359 	zassert_equal(system_counter(), 1);
1360 	zassert_equal(k_work_busy_get(&common_work), 0);
1361 
1362 	/* Flush the sync state from completion */
1363 	rc = k_sem_take(&sync_sem, K_NO_WAIT);
1364 	zassert_equal(rc, 0);
1365 }
1366 
ZTEST(work_1cpu,test_1cpu_system_schedule)1367 ZTEST(work_1cpu, test_1cpu_system_schedule)
1368 {
1369 	int rc;
1370 	uint32_t sched_ms;
1371 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
1372 				+ k_ms_to_ticks_ceil32(DELAY_MS));
1373 	uint32_t elapsed_ms;
1374 
1375 	/* Reset state and use non-blocking handler */
1376 	reset_counters();
1377 	k_work_init_delayable(&dwork, counter_handler);
1378 
1379 	/* Verify that work is idle and marked delayable. */
1380 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1381 	zassert_equal(dwork.work.flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1382 		       NULL);
1383 
1384 	/* Align to tick, then schedule after normal delay. */
1385 	k_sleep(K_TICKS(1));
1386 	sched_ms = k_uptime_get_32();
1387 	rc = k_work_schedule(&dwork, K_MSEC(DELAY_MS));
1388 	zassert_equal(rc, 1);
1389 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_DELAYED);
1390 
1391 	/* Scheduling again does nothing. */
1392 	rc = k_work_schedule(&dwork, K_NO_WAIT);
1393 	zassert_equal(rc, 0);
1394 
1395 	/* Wait for completion */
1396 	rc = k_sem_take(&sync_sem, K_FOREVER);
1397 	zassert_equal(rc, 0);
1398 
1399 	/* Make sure it ran and is now idle */
1400 	zassert_equal(system_counter(), 1);
1401 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1402 
1403 	/* Check that the delay is within the expected range. */
1404 	elapsed_ms = last_handle_ms - sched_ms;
1405 	zassert_true(elapsed_ms >= DELAY_MS,
1406 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1407 	zassert_true(elapsed_ms <= max_ms,
1408 		     "long %u > %u\n", elapsed_ms, max_ms);
1409 }
1410 
ZTEST(work_1cpu,test_1cpu_system_reschedule)1411 ZTEST(work_1cpu, test_1cpu_system_reschedule)
1412 {
1413 	int rc;
1414 	uint32_t sched_ms;
1415 	uint32_t max_ms = k_ticks_to_ms_ceil32(1U
1416 				+ k_ms_to_ticks_ceil32(DELAY_MS));
1417 	uint32_t elapsed_ms;
1418 
1419 	/* Reset state and use non-blocking handler */
1420 	reset_counters();
1421 	k_work_init_delayable(&dwork, counter_handler);
1422 
1423 	/* Verify that work is idle and marked delayable. */
1424 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1425 	zassert_equal(dwork.work.flags & K_WORK_DELAYABLE, K_WORK_DELAYABLE,
1426 		       NULL);
1427 
1428 	/* Schedule to the preempt queue after twice the standard
1429 	 * delay.
1430 	 */
1431 	rc = k_work_reschedule(&dwork, K_MSEC(2U * DELAY_MS));
1432 	zassert_equal(rc, 1);
1433 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_DELAYED);
1434 
1435 	/* Align to tick then reschedule on the system queue for
1436 	 * the standard delay.
1437 	 */
1438 	k_sleep(K_TICKS(1));
1439 	sched_ms = k_uptime_get_32();
1440 	rc = k_work_reschedule(&dwork, K_MSEC(DELAY_MS));
1441 	zassert_equal(rc, 1);
1442 	zassert_equal(k_work_delayable_busy_get(&dwork), K_WORK_DELAYED);
1443 
1444 	/* Wait for completion */
1445 	rc = k_sem_take(&sync_sem, K_FOREVER);
1446 	zassert_equal(rc, 0);
1447 
1448 	/* Make sure it ran on the system queue and is now idle */
1449 	zassert_equal(system_counter(), 1);
1450 	zassert_equal(k_work_delayable_busy_get(&dwork), 0);
1451 
1452 	/* Check that the delay is within the expected range. */
1453 	elapsed_ms = last_handle_ms - sched_ms;
1454 	zassert_true(elapsed_ms >= DELAY_MS,
1455 		     "short %u < %u\n", elapsed_ms, DELAY_MS);
1456 	zassert_true(elapsed_ms <= max_ms,
1457 		     "long %u > %u\n", elapsed_ms, max_ms);
1458 }
1459 
ZTEST(work,test_nop)1460 ZTEST(work, test_nop)
1461 {
1462 	ztest_test_skip();
1463 }
1464 
workq_setup(void)1465 void *workq_setup(void)
1466 {
1467 	main_thread = k_current_get();
1468 	k_sem_init(&sync_sem, 0, 1);
1469 	k_sem_init(&rel_sem, 0, 1);
1470 
1471 	test_work_init();
1472 	test_delayable_init();
1473 
1474 	if (run_flag) {
1475 		test_queue_start();
1476 		run_flag = false;
1477 	}
1478 
1479 	return NULL;
1480 }
1481 
1482 ZTEST_SUITE(work_1cpu, NULL, workq_setup, ztest_simple_1cpu_before, ztest_simple_1cpu_after, NULL);
1483 ZTEST_SUITE(work, NULL, workq_setup, NULL, NULL, NULL);
1484