1 /*
2  * Copyright (c) 2022-2025 Nordic Semiconductor ASA
3  * Copyright (c) 2023 Codecoup
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 #include <errno.h>
8 #include <stdbool.h>
9 #include <stddef.h>
10 #include <stdint.h>
11 
12 #include <zephyr/fff.h>
13 #include <zephyr/kernel.h>
14 #include <zephyr/kernel_structs.h>
15 #include <zephyr/sys_clock.h>
16 #include <zephyr/sys/slist.h>
17 
18 #include "mock_kernel.h"
19 
20 /* List of fakes used by this unit tester */
21 #define FFF_FAKES_LIST(FAKE)                                                                       \
22 	FAKE(z_timeout_remaining)                                                                  \
23 	FAKE(k_work_cancel_delayable_sync)                                                         \
24 	FAKE(k_sem_take)                                                                           \
25 	FAKE(k_sem_give)
26 
27 /* List of k_work items to be worked. */
28 static sys_slist_t work_pending;
29 
30 DEFINE_FAKE_VALUE_FUNC(k_ticks_t, z_timeout_remaining, const struct _timeout *);
31 DEFINE_FAKE_VALUE_FUNC(bool, k_work_cancel_delayable_sync, struct k_work_delayable *,
32 		       struct k_work_sync *);
33 DEFINE_FAKE_VALUE_FUNC(int, k_sem_take, struct k_sem *, k_timeout_t);
34 DEFINE_FAKE_VOID_FUNC(k_sem_give, struct k_sem *);
35 
k_work_init_delayable(struct k_work_delayable * dwork,k_work_handler_t handler)36 void k_work_init_delayable(struct k_work_delayable *dwork, k_work_handler_t handler)
37 {
38 	dwork->work.handler = handler;
39 }
40 
k_work_reschedule(struct k_work_delayable * dwork,k_timeout_t delay)41 int k_work_reschedule(struct k_work_delayable *dwork, k_timeout_t delay)
42 {
43 	bool on_list = false;
44 	struct k_work *work;
45 
46 	dwork->timeout.dticks = delay.ticks;
47 
48 	/* Determine whether the work item is queued already. */
49 	SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
50 		on_list = work == &dwork->work;
51 		if (on_list) {
52 			break;
53 		}
54 	}
55 
56 	if (dwork->timeout.dticks == 0) {
57 		dwork->work.handler(&dwork->work);
58 		if (on_list) {
59 			(void)sys_slist_remove(&work_pending, NULL, &dwork->work.node);
60 		}
61 	} else if (!on_list) {
62 		sys_slist_append(&work_pending, &dwork->work.node);
63 	}
64 
65 	return 0;
66 }
67 
k_work_schedule(struct k_work_delayable * dwork,k_timeout_t delay)68 int k_work_schedule(struct k_work_delayable *dwork, k_timeout_t delay)
69 {
70 	struct k_work *work;
71 
72 	/* Determine whether the work item is queued already. */
73 	SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
74 		if (work == &dwork->work) {
75 			return 0;
76 		}
77 	}
78 
79 	dwork->timeout.dticks = delay.ticks;
80 	if (dwork->timeout.dticks == 0) {
81 		dwork->work.handler(&dwork->work);
82 	} else {
83 		sys_slist_append(&work_pending, &dwork->work.node);
84 	}
85 
86 	return 0;
87 }
88 
k_work_cancel_delayable(struct k_work_delayable * dwork)89 int k_work_cancel_delayable(struct k_work_delayable *dwork)
90 {
91 	(void)sys_slist_find_and_remove(&work_pending, &dwork->work.node);
92 
93 	return 0;
94 }
95 
k_work_cancel(struct k_work * work)96 int k_work_cancel(struct k_work *work)
97 {
98 	(void)sys_slist_find_and_remove(&work_pending, &work->node);
99 
100 	return 0;
101 }
102 
k_work_init(struct k_work * work,k_work_handler_t handler)103 void k_work_init(struct k_work *work, k_work_handler_t handler)
104 {
105 	work->handler = handler;
106 }
107 
k_work_submit(struct k_work * work)108 int k_work_submit(struct k_work *work)
109 {
110 	work->handler(work);
111 
112 	return 0;
113 }
114 
k_work_busy_get(const struct k_work * work)115 int k_work_busy_get(const struct k_work *work)
116 {
117 	return 0;
118 }
119 
k_sleep(k_timeout_t timeout)120 int32_t k_sleep(k_timeout_t timeout)
121 {
122 	struct k_work *work;
123 
124 	SYS_SLIST_FOR_EACH_CONTAINER(&work_pending, work, node) {
125 		if (work->flags & K_WORK_DELAYED) {
126 			struct k_work_delayable *dwork = k_work_delayable_from_work(work);
127 
128 			if (dwork->timeout.dticks > timeout.ticks) {
129 				dwork->timeout.dticks -= timeout.ticks;
130 				continue;
131 			}
132 		}
133 
134 		(void)sys_slist_remove(&work_pending, NULL, &work->node);
135 		work->handler(work);
136 	}
137 
138 	return 0;
139 }
140 static bool mutex_locked;
141 
k_mutex_init(struct k_mutex * mutex)142 int k_mutex_init(struct k_mutex *mutex)
143 {
144 	mutex_locked = false;
145 
146 	return 0;
147 }
148 
k_mutex_lock(struct k_mutex * mutex,k_timeout_t timeout)149 int k_mutex_lock(struct k_mutex *mutex, k_timeout_t timeout)
150 {
151 	if (mutex_locked) {
152 		if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
153 			return -EBUSY;
154 		} else {
155 			return -EAGAIN;
156 		}
157 	}
158 
159 	mutex_locked = true;
160 
161 	return 0;
162 }
163 
k_mutex_unlock(struct k_mutex * mutex)164 int k_mutex_unlock(struct k_mutex *mutex)
165 {
166 	if (!mutex_locked) {
167 		return -EINVAL;
168 	}
169 
170 	mutex_locked = false;
171 
172 	return 0;
173 }
174 
mock_kernel_init(void)175 void mock_kernel_init(void)
176 {
177 	FFF_FAKES_LIST(RESET_FAKE);
178 
179 	sys_slist_init(&work_pending);
180 }
181 
mock_kernel_cleanup(void)182 void mock_kernel_cleanup(void)
183 {
184 	struct k_work *work, *tmp;
185 
186 	/* Run all pending works */
187 	SYS_SLIST_FOR_EACH_CONTAINER_SAFE(&work_pending, work, tmp, node) {
188 		(void)sys_slist_remove(&work_pending, NULL, &work->node);
189 		work->handler(work);
190 	}
191 }
192