1 /*
2 * Copyright (c) 2008-2014 Travis Geiselbrecht
3 *
4 * Use of this source code is governed by a MIT-style
5 * license that can be found in the LICENSE file or at
6 * https://opensource.org/licenses/MIT
7 */
8
9 /**
10 * @file
11 * @brief Kernel timer subsystem
12 * @defgroup timer Timers
13 *
14 * The timer subsystem allows functions to be scheduled for later
15 * execution. Each timer object is used to cause one function to
16 * be executed at a later time.
17 *
18 * Timer callback functions are called in interrupt context.
19 *
20 * @{
21 */
22 #include <kernel/timer.h>
23
24 #include <assert.h>
25 #include <kernel/debug.h>
26 #include <kernel/spinlock.h>
27 #include <kernel/thread.h>
28 #include <lk/debug.h>
29 #include <lk/list.h>
30 #include <lk/trace.h>
31 #include <platform.h>
32 #include <platform/timer.h>
33
34 #define LOCAL_TRACE 0
35
36 spin_lock_t timer_lock;
37
38 struct timer_state {
39 struct list_node timer_queue;
40 } __CPU_ALIGN;
41
42 static struct timer_state timers[SMP_MAX_CPUS];
43
44 static enum handler_return timer_tick(void *arg, lk_time_t now);
45
46 /**
47 * @brief Initialize a timer object
48 */
timer_initialize(timer_t * timer)49 void timer_initialize(timer_t *timer) {
50 *timer = (timer_t)TIMER_INITIAL_VALUE(*timer);
51 }
52
insert_timer_in_queue(uint cpu,timer_t * timer)53 static void insert_timer_in_queue(uint cpu, timer_t *timer) {
54 timer_t *entry;
55
56 DEBUG_ASSERT(arch_ints_disabled());
57
58 LTRACEF("timer %p, cpu %u, scheduled %u, periodic %u\n", timer, cpu, timer->scheduled_time, timer->periodic_time);
59
60 list_for_every_entry(&timers[cpu].timer_queue, entry, timer_t, node) {
61 if (TIME_GT(entry->scheduled_time, timer->scheduled_time)) {
62 list_add_before(&entry->node, &timer->node);
63 return;
64 }
65 }
66
67 /* walked off the end of the list */
68 list_add_tail(&timers[cpu].timer_queue, &timer->node);
69 }
70
timer_set(timer_t * timer,lk_time_t delay,lk_time_t period,timer_callback callback,void * arg)71 static void timer_set(timer_t *timer, lk_time_t delay, lk_time_t period, timer_callback callback, void *arg) {
72 lk_time_t now;
73
74 LTRACEF("timer %p, delay %u, period %u, callback %p, arg %p\n", timer, delay, period, callback, arg);
75
76 DEBUG_ASSERT(timer->magic == TIMER_MAGIC);
77
78 if (list_in_list(&timer->node)) {
79 panic("timer %p already in list\n", timer);
80 }
81
82 now = current_time();
83 timer->scheduled_time = now + delay;
84 timer->periodic_time = period;
85 timer->callback = callback;
86 timer->arg = arg;
87
88 LTRACEF("scheduled time %u\n", timer->scheduled_time);
89
90 spin_lock_saved_state_t state;
91 spin_lock_irqsave(&timer_lock, state);
92
93 uint cpu = arch_curr_cpu_num();
94 insert_timer_in_queue(cpu, timer);
95
96 #if PLATFORM_HAS_DYNAMIC_TIMER
97 if (list_peek_head_type(&timers[cpu].timer_queue, timer_t, node) == timer) {
98 /* we just modified the head of the timer queue */
99 LTRACEF("setting new timer for %u msecs\n", delay);
100 platform_set_oneshot_timer(timer_tick, NULL, delay);
101 }
102 #endif
103
104 spin_unlock_irqrestore(&timer_lock, state);
105 }
106
107 /**
108 * @brief Set up a timer that executes once
109 *
110 * This function specifies a callback function to be called after a specified
111 * delay. The function will be called one time.
112 *
113 * @param timer The timer to use
114 * @param delay The delay, in ms, before the timer is executed
115 * @param callback The function to call when the timer expires
116 * @param arg The argument to pass to the callback
117 *
118 * The timer function is declared as:
119 * enum handler_return callback(struct timer *, lk_time_t now, void *arg) { ... }
120 */
timer_set_oneshot(timer_t * timer,lk_time_t delay,timer_callback callback,void * arg)121 void timer_set_oneshot(timer_t *timer, lk_time_t delay, timer_callback callback, void *arg) {
122 if (delay == 0)
123 delay = 1;
124 timer_set(timer, delay, 0, callback, arg);
125 }
126
127 /**
128 * @brief Set up a timer that executes repeatedly
129 *
130 * This function specifies a callback function to be called after a specified
131 * delay. The function will be called repeatedly.
132 *
133 * @param timer The timer to use
134 * @param period The delay, in ms, between timer executions (first execution occurs one period after timer set)
135 * @param callback The function to call when the timer expires
136 * @param arg The argument to pass to the callback
137 *
138 * The timer function is declared as:
139 * enum handler_return callback(struct timer *, lk_time_t now, void *arg) { ... }
140 */
timer_set_periodic(timer_t * timer,lk_time_t period,timer_callback callback,void * arg)141 void timer_set_periodic(timer_t *timer, lk_time_t period, timer_callback callback, void *arg) {
142 if (period == 0)
143 period = 1;
144 timer_set(timer, period, period, callback, arg);
145 }
146
147 /**
148 * @brief Cancel a pending timer
149 */
timer_cancel(timer_t * timer)150 void timer_cancel(timer_t *timer) {
151 DEBUG_ASSERT(timer->magic == TIMER_MAGIC);
152
153 spin_lock_saved_state_t state;
154 spin_lock_irqsave(&timer_lock, state);
155
156 #if PLATFORM_HAS_DYNAMIC_TIMER
157 uint cpu = arch_curr_cpu_num();
158
159 timer_t *oldhead = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
160 #endif
161
162 if (list_in_list(&timer->node))
163 list_delete(&timer->node);
164
165 /* to keep it from being reinserted into the queue if called from
166 * periodic timer callback.
167 */
168 timer->periodic_time = 0;
169 timer->callback = NULL;
170 timer->arg = NULL;
171
172 #if PLATFORM_HAS_DYNAMIC_TIMER
173 /* see if we've just modified the head of the timer queue */
174 timer_t *newhead = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
175 if (newhead == NULL) {
176 LTRACEF("clearing old hw timer, nothing in the queue\n");
177 platform_stop_timer();
178 } else if (newhead != oldhead) {
179 lk_time_t delay;
180 lk_time_t now = current_time();
181
182 if (TIME_LT(newhead->scheduled_time, now))
183 delay = 0;
184 else
185 delay = newhead->scheduled_time - now;
186
187 LTRACEF("setting new timer to %u\n", (uint) delay);
188 platform_set_oneshot_timer(timer_tick, NULL, delay);
189 }
190 #endif
191
192 spin_unlock_irqrestore(&timer_lock, state);
193 }
194
195 /* called at interrupt time to process any pending timers */
timer_tick(void * arg,lk_time_t now)196 static enum handler_return timer_tick(void *arg, lk_time_t now) {
197 timer_t *timer;
198 enum handler_return ret = INT_NO_RESCHEDULE;
199
200 DEBUG_ASSERT(arch_ints_disabled());
201
202 THREAD_STATS_INC(timer_ints);
203 // KEVLOG_TIMER_TICK(); // enable only if necessary
204
205 uint cpu = arch_curr_cpu_num();
206
207 LTRACEF("cpu %u now %u, sp %p\n", cpu, now, __GET_FRAME());
208
209 spin_lock(&timer_lock);
210
211 for (;;) {
212 /* see if there's an event to process */
213 timer = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
214 if (likely(timer == 0))
215 break;
216 LTRACEF("next item on timer queue %p at %u now %u (%p, arg %p)\n", timer, timer->scheduled_time, now, timer->callback, timer->arg);
217 if (likely(TIME_LT(now, timer->scheduled_time)))
218 break;
219
220 /* process it */
221 LTRACEF("timer %p\n", timer);
222 DEBUG_ASSERT(timer && timer->magic == TIMER_MAGIC);
223 list_delete(&timer->node);
224
225 /* we pulled it off the list, release the list lock to handle it */
226 spin_unlock(&timer_lock);
227
228 LTRACEF("dequeued timer %p, scheduled %u periodic %u\n", timer, timer->scheduled_time, timer->periodic_time);
229
230 THREAD_STATS_INC(timers);
231
232 bool periodic = timer->periodic_time > 0;
233
234 LTRACEF("timer %p firing callback %p, arg %p\n", timer, timer->callback, timer->arg);
235 KEVLOG_TIMER_CALL(timer->callback, timer->arg);
236 if (timer->callback(timer, now, timer->arg) == INT_RESCHEDULE)
237 ret = INT_RESCHEDULE;
238
239 /* it may have been requeued or periodic, grab the lock so we can safely inspect it */
240 spin_lock(&timer_lock);
241
242 /* if it was a periodic timer and it hasn't been requeued
243 * by the callback put it back in the list
244 */
245 if (periodic && !list_in_list(&timer->node) && timer->periodic_time > 0) {
246 LTRACEF("periodic timer, period %u\n", timer->periodic_time);
247 timer->scheduled_time += timer->periodic_time;
248 if (unlikely(TIME_LT(timer->scheduled_time, now))) {
249 timer->scheduled_time = now + timer->periodic_time;
250 }
251 insert_timer_in_queue(cpu, timer);
252 }
253 }
254
255 #if PLATFORM_HAS_DYNAMIC_TIMER
256 /* reset the timer to the next event */
257 timer = list_peek_head_type(&timers[cpu].timer_queue, timer_t, node);
258 if (timer) {
259 /* has to be the case or it would have fired already */
260 DEBUG_ASSERT(TIME_GT(timer->scheduled_time, now));
261
262 lk_time_t delay = timer->scheduled_time - now;
263
264 LTRACEF("setting new timer for %u msecs for event %p\n", (uint)delay, timer);
265 platform_set_oneshot_timer(timer_tick, NULL, delay);
266 }
267
268 /* we're done manipulating the timer queue */
269 spin_unlock(&timer_lock);
270 #else
271 /* release the timer lock before calling the tick handler */
272 spin_unlock(&timer_lock);
273
274 /* let the scheduler have a shot to do quantum expiration, etc */
275 /* in case of dynamic timer, the scheduler will set up a periodic timer */
276 if (thread_timer_tick(NULL, now, NULL) == INT_RESCHEDULE)
277 ret = INT_RESCHEDULE;
278 #endif
279
280 return ret;
281 }
282
timer_init(void)283 void timer_init(void) {
284 timer_lock = SPIN_LOCK_INITIAL_VALUE;
285 for (uint i = 0; i < SMP_MAX_CPUS; i++) {
286 list_initialize(&timers[i].timer_queue);
287 }
288 #if !PLATFORM_HAS_DYNAMIC_TIMER
289 /* register for a periodic timer tick */
290 platform_set_periodic_timer(timer_tick, NULL, 10); /* 10ms */
291 #endif
292 }
293