1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4
5 #include <assert.h>
6 #include <hyptypes.h>
7
8 #include <hypcontainers.h>
9
10 #include <atomic.h>
11 #include <compiler.h>
12 #include <cpulocal.h>
13 #include <ipi.h>
14 #include <list.h>
15 #include <object.h>
16 #include <panic.h>
17 #include <partition.h>
18 #include <platform_cpu.h>
19 #include <platform_timer.h>
20 #include <preempt.h>
21 #include <spinlock.h>
22 #include <timer_queue.h>
23 #include <util.h>
24
25 #include <events/timer.h>
26
27 #include "event_handlers.h"
28
29 CPULOCAL_DECLARE_STATIC(timer_queue_t, timer_queue);
30
31 void
timer_handle_boot_cold_init(cpu_index_t boot_cpu_index)32 timer_handle_boot_cold_init(cpu_index_t boot_cpu_index)
33 {
34 // Initialise all timer queues here as online CPUs may try to move
35 // timers to CPUs that have not booted yet
36 // Secondary CPUs will be set online by `power_cpu_online()` handler
37 for (cpu_index_t cpu_index = 0U; cpu_index < PLATFORM_MAX_CORES;
38 cpu_index++) {
39 timer_queue_t *tq = &CPULOCAL_BY_INDEX(timer_queue, cpu_index);
40 spinlock_init(&tq->lock);
41 list_init(&tq->list);
42 tq->timeout = TIMER_INVALID_TIMEOUT;
43 tq->online = (cpu_index == boot_cpu_index);
44 }
45 }
46
47 #if !defined(UNITTESTS) || !UNITTESTS
48 void
timer_handle_rootvm_init(hyp_env_data_t * hyp_env)49 timer_handle_rootvm_init(hyp_env_data_t *hyp_env)
50 {
51 hyp_env->timer_freq = timer_get_timer_frequency();
52 }
53 #endif
54
55 uint32_t
timer_get_timer_frequency(void)56 timer_get_timer_frequency(void)
57 {
58 return platform_timer_get_frequency();
59 }
60
61 ticks_t
timer_get_current_timer_ticks(void)62 timer_get_current_timer_ticks(void)
63 {
64 return platform_timer_get_current_ticks();
65 }
66
67 ticks_t
timer_convert_ns_to_ticks(nanoseconds_t ns)68 timer_convert_ns_to_ticks(nanoseconds_t ns)
69 {
70 return platform_timer_convert_ns_to_ticks(ns);
71 }
72
73 nanoseconds_t
timer_convert_ticks_to_ns(ticks_t ticks)74 timer_convert_ticks_to_ns(ticks_t ticks)
75 {
76 return platform_timer_convert_ticks_to_ns(ticks);
77 }
78
79 static bool
is_timeout_a_smaller_than_b(list_node_t * node_a,list_node_t * node_b)80 is_timeout_a_smaller_than_b(list_node_t *node_a, list_node_t *node_b)
81 {
82 ticks_t timeout_a = timer_container_of_list_node(node_a)->timeout;
83 ticks_t timeout_b = timer_container_of_list_node(node_b)->timeout;
84
85 return timeout_a < timeout_b;
86 }
87
88 void
timer_init_object(timer_t * timer,timer_action_t action)89 timer_init_object(timer_t *timer, timer_action_t action)
90 {
91 assert(timer != NULL);
92
93 timer->timeout = TIMER_INVALID_TIMEOUT;
94 timer->action = action;
95 atomic_init(&timer->queue, NULL);
96 }
97
98 bool
timer_is_queued(timer_t * timer)99 timer_is_queued(timer_t *timer)
100 {
101 assert(timer != NULL);
102
103 return atomic_load_relaxed(&timer->queue) != NULL;
104 }
105
106 ticks_t
timer_queue_get_next_timeout(void)107 timer_queue_get_next_timeout(void)
108 {
109 timer_queue_t *tq = &CPULOCAL(timer_queue);
110 ticks_t timeout;
111
112 spinlock_acquire_nopreempt(&tq->lock);
113 timeout = tq->timeout;
114 spinlock_release_nopreempt(&tq->lock);
115
116 return timeout;
117 }
118
119 static void
timer_update_timeout(timer_queue_t * tq)120 timer_update_timeout(timer_queue_t *tq) REQUIRE_SPINLOCK(tq->lock)
121 {
122 assert_preempt_disabled();
123 assert(tq == &CPULOCAL(timer_queue));
124
125 if (tq->timeout != TIMER_INVALID_TIMEOUT) {
126 platform_timer_set_timeout(tq->timeout);
127 } else {
128 platform_timer_cancel_timeout();
129 }
130 }
131
132 static void
timer_enqueue_internal(timer_queue_t * tq,timer_t * timer,ticks_t timeout)133 timer_enqueue_internal(timer_queue_t *tq, timer_t *timer, ticks_t timeout)
134 REQUIRE_SPINLOCK(tq->lock)
135 {
136 assert_preempt_disabled();
137 assert(tq == &CPULOCAL(timer_queue));
138 assert(tq->online);
139
140 // Set the timer's queue pointer. We need acquire ordering to ensure we
141 // observe any previous dequeues on other CPUs.
142 timer_queue_t *old_tq = NULL;
143 if (!atomic_compare_exchange_strong_explicit(&timer->queue, &old_tq, tq,
144 memory_order_acquire,
145 memory_order_relaxed)) {
146 // This timer is already queued; it is the caller's
147 // responsibility to avoid this.
148 panic("Request to enqueue a timer that is already queued");
149 }
150
151 // There is no need to check if the timeout is already in the past, as
152 // the timer module generates a level-triggered interrupt if the timer
153 // condition is already met.
154 timer->timeout = timeout;
155
156 bool new_head = list_insert_in_order(&tq->list, &timer->list_node,
157 is_timeout_a_smaller_than_b);
158 if (new_head) {
159 tq->timeout = timeout;
160 timer_update_timeout(tq);
161 }
162 }
163
164 static bool
timer_dequeue_internal(timer_queue_t * tq,timer_t * timer)165 timer_dequeue_internal(timer_queue_t *tq, timer_t *timer)
166 REQUIRE_SPINLOCK(tq->lock)
167 {
168 assert_preempt_disabled();
169
170 bool new_timeout = false;
171
172 // The timer may have expired between loading the timer's queue and
173 // acquiring its lock. Ensure the timer's queue has not changed before
174 // dequeuing.
175 if (compiler_expected(atomic_load_relaxed(&timer->queue) == tq)) {
176 bool new_head = list_delete_node(&tq->list, &timer->list_node);
177 if (new_head) {
178 list_node_t *head = list_get_head(&tq->list);
179 tq->timeout =
180 timer_container_of_list_node(head)->timeout;
181 new_timeout = true;
182 } else if (list_is_empty(&tq->list)) {
183 tq->timeout = TIMER_INVALID_TIMEOUT;
184 new_timeout = true;
185 } else {
186 // The queue's timeout has not changed.
187 }
188
189 // Clear the timer's queue pointer. We need release ordering to
190 // ensure this dequeue is observed by the next enqueue.
191 atomic_store_release(&timer->queue, NULL);
192 }
193
194 return new_timeout;
195 }
196
197 static void
timer_update_internal(timer_queue_t * tq,timer_t * timer,ticks_t timeout)198 timer_update_internal(timer_queue_t *tq, timer_t *timer, ticks_t timeout)
199 REQUIRE_SPINLOCK(tq->lock)
200 {
201 assert_preempt_disabled();
202 assert(tq == &CPULOCAL(timer_queue));
203 assert(tq->online);
204
205 if (compiler_unexpected(tq != atomic_load_relaxed(&timer->queue))) {
206 // There is a race with timer updates; it is the caller's
207 // responsibility to prevent this.
208 panic("Request to update a timer that is not queued on this CPU");
209 }
210
211 if (compiler_expected(timer->timeout != timeout)) {
212 // There is no need to check if the timeout is already in the
213 // past, as the timer module generates a level-triggered
214 // interrupt if the timer condition is already met.
215
216 // Delete timer from queue, update it, and add it again to queue
217
218 bool new_head_delete =
219 list_delete_node(&tq->list, &timer->list_node);
220
221 timer->timeout = timeout;
222
223 bool new_head_insert =
224 list_insert_in_order(&tq->list, &timer->list_node,
225 is_timeout_a_smaller_than_b);
226
227 if (new_head_delete || new_head_insert) {
228 list_node_t *head = list_get_head(&tq->list);
229 tq->timeout =
230 timer_container_of_list_node(head)->timeout;
231 timer_update_timeout(tq);
232 }
233 }
234 }
235
236 void
timer_enqueue(timer_t * timer,ticks_t timeout)237 timer_enqueue(timer_t *timer, ticks_t timeout)
238 {
239 assert(timer != NULL);
240
241 preempt_disable();
242
243 timer_queue_t *tq = &CPULOCAL(timer_queue);
244
245 spinlock_acquire_nopreempt(&tq->lock);
246 timer_enqueue_internal(tq, timer, timeout);
247 spinlock_release_nopreempt(&tq->lock);
248
249 preempt_enable();
250 }
251
252 void
timer_dequeue(timer_t * timer)253 timer_dequeue(timer_t *timer)
254 {
255 assert(timer != NULL);
256
257 timer_queue_t *tq = atomic_load_relaxed(&timer->queue);
258
259 if (tq != NULL) {
260 spinlock_acquire(&tq->lock);
261 if (timer_dequeue_internal(tq, timer) &&
262 (tq == &CPULOCAL(timer_queue))) {
263 timer_update_timeout(tq);
264 }
265 spinlock_release(&tq->lock);
266 }
267 }
268
269 void
timer_update(timer_t * timer,ticks_t timeout)270 timer_update(timer_t *timer, ticks_t timeout)
271 {
272 assert(timer != NULL);
273
274 preempt_disable();
275
276 timer_queue_t *old_tq = atomic_load_relaxed(&timer->queue);
277 timer_queue_t *new_tq = &CPULOCAL(timer_queue);
278
279 // If timer is queued on another CPU, it needs to be dequeued.
280 if ((old_tq != NULL) && (old_tq != new_tq)) {
281 spinlock_acquire_nopreempt(&old_tq->lock);
282 (void)timer_dequeue_internal(old_tq, timer);
283 spinlock_release_nopreempt(&old_tq->lock);
284 }
285
286 spinlock_acquire_nopreempt(&new_tq->lock);
287 if (old_tq == new_tq) {
288 timer_update_internal(new_tq, timer, timeout);
289 } else {
290 timer_enqueue_internal(new_tq, timer, timeout);
291 }
292 spinlock_release_nopreempt(&new_tq->lock);
293
294 preempt_enable();
295 }
296
297 static void
timer_dequeue_expired(void)298 timer_dequeue_expired(void) REQUIRE_PREEMPT_DISABLED
299 {
300 ticks_t current_ticks = timer_get_current_timer_ticks();
301 timer_queue_t *tq = &CPULOCAL(timer_queue);
302
303 assert_preempt_disabled();
304
305 spinlock_acquire_nopreempt(&tq->lock);
306
307 while (tq->timeout <= current_ticks) {
308 list_node_t *head = list_get_head(&tq->list);
309 timer_t *timer = timer_container_of_list_node(head);
310 (void)timer_dequeue_internal(tq, timer);
311 spinlock_release_nopreempt(&tq->lock);
312 (void)trigger_timer_action_event(timer->action, timer);
313 spinlock_acquire_nopreempt(&tq->lock);
314 }
315
316 timer_update_timeout(tq);
317 spinlock_release_nopreempt(&tq->lock);
318 }
319
320 void
timer_handle_platform_timer_expiry(void)321 timer_handle_platform_timer_expiry(void)
322 {
323 timer_dequeue_expired();
324 }
325
326 error_t
timer_handle_power_cpu_suspend(void)327 timer_handle_power_cpu_suspend(void)
328 {
329 // TODO: Delay or reject attempted suspend if timeout is due to expire
330 // sooner than the CPU can reach the requested power state.
331
332 #if defined(MODULE_CORE_TIMER_LP) && MODULE_CORE_TIMER_LP
333 // The timer_lp module will enqueue the timeout on the global low power
334 // timer, so we can cancel the core-local timer to avoid redundant
335 // interrupts if the suspend finishes without entering a state that
336 // stops the timer.
337 platform_timer_cancel_timeout();
338 #endif
339
340 return OK;
341 }
342
343 // Also handles power_cpu_resume
344 void
timer_handle_power_cpu_online(void)345 timer_handle_power_cpu_online(void)
346 {
347 timer_dequeue_expired();
348
349 // Mark this CPU timer queue as online
350 timer_queue_t *tq = &CPULOCAL(timer_queue);
351 assert_preempt_disabled();
352 spinlock_acquire_nopreempt(&tq->lock);
353 tq->online = true;
354 spinlock_release_nopreempt(&tq->lock);
355 }
356
357 // A timer_queue operation has occurred that requires synchronisation, process
358 // our timer queue. Handle any expired timers as the timer might have expired
359 // since it was queued on this CPU and reprogram the platform timer if required.
360 bool NOINLINE
timer_handle_ipi_received(void)361 timer_handle_ipi_received(void)
362 {
363 timer_dequeue_expired();
364
365 return true;
366 }
367
368 static bool
timer_try_move_to_cpu(timer_t * timer,cpu_index_t target)369 timer_try_move_to_cpu(timer_t *timer, cpu_index_t target)
370 REQUIRE_PREEMPT_DISABLED
371 {
372 bool moved = false;
373 timer_queue_t *ttq = &CPULOCAL_BY_INDEX(timer_queue, target);
374
375 assert_preempt_disabled();
376
377 spinlock_acquire_nopreempt(&ttq->lock);
378
379 // We can only use active CPU timer queues
380 if (ttq->online) {
381 // Update the timer queue to be on the new CPU
382 timer_queue_t *old_ttq = NULL;
383 if (!atomic_compare_exchange_strong_explicit(
384 &timer->queue, &old_ttq, ttq, memory_order_acquire,
385 memory_order_relaxed)) {
386 panic("Request to move timer that is already queued");
387 }
388
389 // Call IPI if the queue HEAD changed so the target CPU can
390 // update its local timer
391 bool new_head =
392 list_insert_in_order(&ttq->list, &timer->list_node,
393 is_timeout_a_smaller_than_b);
394 if (new_head) {
395 ttq->timeout = timer->timeout;
396 spinlock_release_nopreempt(&ttq->lock);
397 ipi_one(IPI_REASON_TIMER_QUEUE_SYNC, target);
398 } else {
399 spinlock_release_nopreempt(&ttq->lock);
400 }
401 moved = true;
402 } else {
403 spinlock_release_nopreempt(&ttq->lock);
404 }
405
406 return moved;
407 }
408
409 void
timer_handle_power_cpu_offline(void)410 timer_handle_power_cpu_offline(void)
411 {
412 // Try to move any timers to the next one up from this one.
413 // If this is the last core, wrap around
414 cpu_index_t our_index = cpulocal_get_index();
415 cpu_index_t start =
416 (cpu_index_t)((our_index + 1U) % PLATFORM_MAX_CORES);
417 timer_queue_t *tq = &CPULOCAL(timer_queue);
418
419 assert_preempt_disabled();
420 spinlock_acquire_nopreempt(&tq->lock);
421
422 // Mark this CPU timer queue as going down and cancel any pending timers
423 tq->online = false;
424 platform_timer_cancel_timeout();
425
426 // Move all active timers in this CPU timer queue to an active CPU
427 while (tq->timeout != TIMER_INVALID_TIMEOUT) {
428 list_node_t *head = list_get_head(&tq->list);
429 timer_t *timer = timer_container_of_list_node(head);
430
431 // Remove timer from this core.
432 (void)timer_dequeue_internal(tq, timer);
433 spinlock_release_nopreempt(&tq->lock);
434
435 // The target core might go down while we are searching, so
436 // always check if the target is active. Hopefully the target
437 // Queue stays online so we check the last-used CPU first. If
438 // we cannot find any active timer queues we panic. In reality
439 // at least one CPU timer queue should always be online.
440 bool found_target = false;
441 cpu_index_t target = start;
442 while (!found_target) {
443 if (platform_cpu_exists(target)) {
444 if (timer_try_move_to_cpu(timer, target)) {
445 found_target = true;
446 start = target;
447 break;
448 }
449 }
450
451 // Skip our CPU as we know we are going down
452 // This could happen if the previously saved core is
453 // down now and the initial search wrapped.
454 target = (cpu_index_t)((target + 1U) %
455 PLATFORM_MAX_CORES);
456 if (target == our_index) {
457 target = (cpu_index_t)((target + 1U) %
458 PLATFORM_MAX_CORES);
459 }
460 if (target == start) {
461 // we looped around without finding a target,
462 // this should never happen.
463 break;
464 }
465 }
466
467 if (!found_target) {
468 panic("Could not find target CPU for timer migration");
469 }
470
471 // Get the lock back to check the next timer
472 spinlock_acquire_nopreempt(&tq->lock);
473 }
474
475 spinlock_release_nopreempt(&tq->lock);
476 }
477