1 /*
2 * Copyright (c) 2018 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/kernel.h>
8 #include <zephyr/spinlock.h>
9 #include <ksched.h>
10 #include <timeout_q.h>
11 #include <zephyr/internal/syscall_handler.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/sys_clock.h>
14
15 static uint64_t curr_tick;
16
17 static sys_dlist_t timeout_list = SYS_DLIST_STATIC_INIT(&timeout_list);
18
19 /*
20 * The timeout code shall take no locks other than its own (timeout_lock), nor
21 * shall it call any other subsystem while holding this lock.
22 */
23 static struct k_spinlock timeout_lock;
24
25 /* Ticks left to process in the currently-executing sys_clock_announce() */
26 static int announce_remaining;
27
28 #if defined(CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME)
29 unsigned int z_clock_hw_cycles_per_sec = CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC;
30
31 #ifdef CONFIG_USERSPACE
z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)32 static inline unsigned int z_vrfy_sys_clock_hw_cycles_per_sec_runtime_get(void)
33 {
34 return z_impl_sys_clock_hw_cycles_per_sec_runtime_get();
35 }
36 #include <zephyr/syscalls/sys_clock_hw_cycles_per_sec_runtime_get_mrsh.c>
37 #endif /* CONFIG_USERSPACE */
38 #endif /* CONFIG_TIMER_READS_ITS_FREQUENCY_AT_RUNTIME */
39
first(void)40 static struct _timeout *first(void)
41 {
42 sys_dnode_t *t = sys_dlist_peek_head(&timeout_list);
43
44 return (t == NULL) ? NULL : CONTAINER_OF(t, struct _timeout, node);
45 }
46
next(struct _timeout * t)47 static struct _timeout *next(struct _timeout *t)
48 {
49 sys_dnode_t *n = sys_dlist_peek_next(&timeout_list, &t->node);
50
51 return (n == NULL) ? NULL : CONTAINER_OF(n, struct _timeout, node);
52 }
53
remove_timeout(struct _timeout * t)54 static void remove_timeout(struct _timeout *t)
55 {
56 if (next(t) != NULL) {
57 next(t)->dticks += t->dticks;
58 }
59
60 sys_dlist_remove(&t->node);
61 }
62
elapsed(void)63 static int32_t elapsed(void)
64 {
65 /* While sys_clock_announce() is executing, new relative timeouts will be
66 * scheduled relatively to the currently firing timeout's original tick
67 * value (=curr_tick) rather than relative to the current
68 * sys_clock_elapsed().
69 *
70 * This means that timeouts being scheduled from within timeout callbacks
71 * will be scheduled at well-defined offsets from the currently firing
72 * timeout.
73 *
74 * As a side effect, the same will happen if an ISR with higher priority
75 * preempts a timeout callback and schedules a timeout.
76 *
77 * The distinction is implemented by looking at announce_remaining which
78 * will be non-zero while sys_clock_announce() is executing and zero
79 * otherwise.
80 */
81 return announce_remaining == 0 ? sys_clock_elapsed() : 0U;
82 }
83
next_timeout(int32_t ticks_elapsed)84 static int32_t next_timeout(int32_t ticks_elapsed)
85 {
86 struct _timeout *to = first();
87 int32_t ret;
88
89 if ((to == NULL) ||
90 ((int64_t)(to->dticks - ticks_elapsed) > (int64_t)INT_MAX)) {
91 ret = SYS_CLOCK_MAX_WAIT;
92 } else {
93 ret = MAX(0, to->dticks - ticks_elapsed);
94 }
95
96 return ret;
97 }
98
z_add_timeout(struct _timeout * to,_timeout_func_t fn,k_timeout_t timeout)99 k_ticks_t z_add_timeout(struct _timeout *to, _timeout_func_t fn, k_timeout_t timeout)
100 {
101 k_ticks_t ticks = 0;
102
103 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
104 return 0;
105 }
106
107 #ifdef CONFIG_KERNEL_COHERENCE
108 __ASSERT_NO_MSG(arch_mem_coherent(to));
109 #endif /* CONFIG_KERNEL_COHERENCE */
110
111 __ASSERT(!sys_dnode_is_linked(&to->node), "");
112 to->fn = fn;
113
114 K_SPINLOCK(&timeout_lock) {
115 struct _timeout *t;
116 int32_t ticks_elapsed;
117 bool has_elapsed = false;
118
119 if (Z_IS_TIMEOUT_RELATIVE(timeout)) {
120 ticks_elapsed = elapsed();
121 has_elapsed = true;
122 to->dticks = timeout.ticks + 1 + ticks_elapsed;
123 ticks = curr_tick + to->dticks;
124 } else {
125 k_ticks_t dticks = Z_TICK_ABS(timeout.ticks) - curr_tick;
126
127 to->dticks = MAX(1, dticks);
128 ticks = timeout.ticks;
129 }
130
131 for (t = first(); t != NULL; t = next(t)) {
132 if (t->dticks > to->dticks) {
133 t->dticks -= to->dticks;
134 sys_dlist_insert(&t->node, &to->node);
135 break;
136 }
137 to->dticks -= t->dticks;
138 }
139
140 if (t == NULL) {
141 sys_dlist_append(&timeout_list, &to->node);
142 }
143
144 if (to == first() && announce_remaining == 0) {
145 if (!has_elapsed) {
146 /* In case of absolute timeout that is first to expire
147 * elapsed need to be read from the system clock.
148 */
149 ticks_elapsed = elapsed();
150 }
151 sys_clock_set_timeout(next_timeout(ticks_elapsed), false);
152 }
153 }
154
155 return ticks;
156 }
157
z_abort_timeout(struct _timeout * to)158 int z_abort_timeout(struct _timeout *to)
159 {
160 int ret = -EINVAL;
161
162 K_SPINLOCK(&timeout_lock) {
163 if (sys_dnode_is_linked(&to->node)) {
164 bool is_first = (to == first());
165
166 remove_timeout(to);
167 to->dticks = TIMEOUT_DTICKS_ABORTED;
168 ret = 0;
169 if (is_first) {
170 sys_clock_set_timeout(next_timeout(elapsed()), false);
171 }
172 }
173 }
174
175 return ret;
176 }
177
178 /* must be locked */
timeout_rem(const struct _timeout * timeout)179 static k_ticks_t timeout_rem(const struct _timeout *timeout)
180 {
181 k_ticks_t ticks = 0;
182
183 for (struct _timeout *t = first(); t != NULL; t = next(t)) {
184 ticks += t->dticks;
185 if (timeout == t) {
186 break;
187 }
188 }
189
190 return ticks;
191 }
192
z_timeout_remaining(const struct _timeout * timeout)193 k_ticks_t z_timeout_remaining(const struct _timeout *timeout)
194 {
195 k_ticks_t ticks = 0;
196
197 K_SPINLOCK(&timeout_lock) {
198 if (!z_is_inactive_timeout(timeout)) {
199 ticks = timeout_rem(timeout) - elapsed();
200 }
201 }
202
203 return ticks;
204 }
205
z_timeout_expires(const struct _timeout * timeout)206 k_ticks_t z_timeout_expires(const struct _timeout *timeout)
207 {
208 k_ticks_t ticks = 0;
209
210 K_SPINLOCK(&timeout_lock) {
211 ticks = curr_tick;
212 if (!z_is_inactive_timeout(timeout)) {
213 ticks += timeout_rem(timeout);
214 }
215 }
216
217 return ticks;
218 }
219
z_get_next_timeout_expiry(void)220 int32_t z_get_next_timeout_expiry(void)
221 {
222 int32_t ret = (int32_t) K_TICKS_FOREVER;
223
224 K_SPINLOCK(&timeout_lock) {
225 ret = next_timeout(elapsed());
226 }
227 return ret;
228 }
229
sys_clock_announce(int32_t ticks)230 void sys_clock_announce(int32_t ticks)
231 {
232 k_spinlock_key_t key = k_spin_lock(&timeout_lock);
233
234 /* We release the lock around the callbacks below, so on SMP
235 * systems someone might be already running the loop. Don't
236 * race (which will cause parallel execution of "sequential"
237 * timeouts and confuse apps), just increment the tick count
238 * and return.
239 */
240 if (IS_ENABLED(CONFIG_SMP) && (announce_remaining != 0)) {
241 announce_remaining += ticks;
242 k_spin_unlock(&timeout_lock, key);
243 return;
244 }
245
246 announce_remaining = ticks;
247
248 struct _timeout *t;
249
250 for (t = first();
251 (t != NULL) && (t->dticks <= announce_remaining);
252 t = first()) {
253 int dt = t->dticks;
254
255 curr_tick += dt;
256 t->dticks = 0;
257 remove_timeout(t);
258
259 k_spin_unlock(&timeout_lock, key);
260 t->fn(t);
261 key = k_spin_lock(&timeout_lock);
262 announce_remaining -= dt;
263 }
264
265 if (t != NULL) {
266 t->dticks -= announce_remaining;
267 }
268
269 curr_tick += announce_remaining;
270 announce_remaining = 0;
271
272 sys_clock_set_timeout(next_timeout(0), false);
273
274 k_spin_unlock(&timeout_lock, key);
275
276 #ifdef CONFIG_TIMESLICING
277 z_time_slice();
278 #endif /* CONFIG_TIMESLICING */
279 }
280
sys_clock_tick_get(void)281 int64_t sys_clock_tick_get(void)
282 {
283 uint64_t t = 0U;
284
285 K_SPINLOCK(&timeout_lock) {
286 t = curr_tick + elapsed();
287 }
288 return t;
289 }
290
sys_clock_tick_get_32(void)291 uint32_t sys_clock_tick_get_32(void)
292 {
293 #ifdef CONFIG_TICKLESS_KERNEL
294 return (uint32_t)sys_clock_tick_get();
295 #else
296 return (uint32_t)curr_tick;
297 #endif /* CONFIG_TICKLESS_KERNEL */
298 }
299
z_impl_k_uptime_ticks(void)300 int64_t z_impl_k_uptime_ticks(void)
301 {
302 return sys_clock_tick_get();
303 }
304
305 #ifdef CONFIG_USERSPACE
z_vrfy_k_uptime_ticks(void)306 static inline int64_t z_vrfy_k_uptime_ticks(void)
307 {
308 return z_impl_k_uptime_ticks();
309 }
310 #include <zephyr/syscalls/k_uptime_ticks_mrsh.c>
311 #endif /* CONFIG_USERSPACE */
312
sys_timepoint_calc(k_timeout_t timeout)313 k_timepoint_t sys_timepoint_calc(k_timeout_t timeout)
314 {
315 k_timepoint_t timepoint;
316
317 if (K_TIMEOUT_EQ(timeout, K_FOREVER)) {
318 timepoint.tick = UINT64_MAX;
319 } else if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
320 timepoint.tick = 0;
321 } else {
322 k_ticks_t dt = timeout.ticks;
323
324 if (Z_IS_TIMEOUT_RELATIVE(timeout)) {
325 timepoint.tick = sys_clock_tick_get() + MAX(1, dt);
326 } else {
327 timepoint.tick = Z_TICK_ABS(dt);
328 }
329 }
330
331 return timepoint;
332 }
333
sys_timepoint_timeout(k_timepoint_t timepoint)334 k_timeout_t sys_timepoint_timeout(k_timepoint_t timepoint)
335 {
336 uint64_t now, remaining;
337
338 if (timepoint.tick == UINT64_MAX) {
339 return K_FOREVER;
340 }
341 if (timepoint.tick == 0) {
342 return K_NO_WAIT;
343 }
344
345 now = sys_clock_tick_get();
346 remaining = (timepoint.tick > now) ? (timepoint.tick - now) : 0;
347 return K_TICKS(remaining);
348 }
349
350 #ifdef CONFIG_ZTEST
z_impl_sys_clock_tick_set(uint64_t tick)351 void z_impl_sys_clock_tick_set(uint64_t tick)
352 {
353 curr_tick = tick;
354 }
355
z_vrfy_sys_clock_tick_set(uint64_t tick)356 void z_vrfy_sys_clock_tick_set(uint64_t tick)
357 {
358 z_impl_sys_clock_tick_set(tick);
359 }
360 #endif /* CONFIG_ZTEST */
361