1 /*
2 * Copyright (c) 2023 Antmicro <www.antmicro.com>
3 * Copyright (c) 2025, Ambiq Micro Inc. <www.ambiq.com>
4 *
5 * SPDX-License-Identifier: Apache-2.0
6 */
7
8 #define DT_DRV_COMPAT ambiq_stimer
9
10 /**
11 * @file
12 * @brief Ambiq Apollo STIMER-based sys_clock driver
13 *
14 */
15
16 #include <zephyr/init.h>
17 #include <zephyr/kernel.h>
18 #include <zephyr/drivers/timer/system_timer.h>
19 #include <zephyr/sys_clock.h>
20 #include <zephyr/irq.h>
21 #include <zephyr/spinlock.h>
22
23 /* ambiq-sdk includes */
24 #include <soc.h>
25
26 #define COUNTER_MAX UINT32_MAX
27
28 #define CYC_PER_TICK (sys_clock_hw_cycles_per_sec() / CONFIG_SYS_CLOCK_TICKS_PER_SEC)
29 #define MAX_TICKS ((k_ticks_t)(COUNTER_MAX / CYC_PER_TICK) - 1)
30 #define MAX_CYCLES (MAX_TICKS * CYC_PER_TICK)
31 #if defined(CONFIG_SOC_SERIES_APOLLO3X) || defined(CONFIG_SOC_SERIES_APOLLO5X)
32 #define MIN_DELAY 1
33 #else
34 #define MIN_DELAY 4
35 #endif
36
37 #if defined(CONFIG_SOC_SERIES_APOLLO5X)
38 #define COMPARE_INTERRUPT AM_HAL_STIMER_INT_COMPAREA
39 #else
40 /* A Possible clock glitch could rarely cause the Stimer interrupt to be lost.
41 * Set up a backup comparator to handle this case
42 */
43 #define COMPARE_INTERRUPT (AM_HAL_STIMER_INT_COMPAREA | AM_HAL_STIMER_INT_COMPAREB)
44 #endif
45
46 #define COMPAREA_IRQ (DT_INST_IRQN(0))
47 #define COMPAREB_IRQ (COMPAREA_IRQ + 1)
48
49 #define TIMER_CLKSRC (DT_INST_PROP(0, clk_source))
50
51 #if defined(CONFIG_TEST)
52 const int32_t z_sys_timer_irq_for_test = COMPAREA_IRQ;
53 #endif
54
55 /* Elapsed ticks since the previous kernel tick was announced, It will get accumulated every time
56 * stimer_isr is triggered, or sys_clock_set_timeout/sys_clock_elapsed API is called.
57 * It will be cleared after sys_clock_announce is called,.
58 */
59 static uint32_t g_tick_elapsed;
60
61 /* Value of STIMER counter when the previous timer API is called, this value is
62 * aligned to tick boundary. It is updated along with the g_tick_elapsed value.
63 */
64 static uint32_t g_last_time_stamp;
65
66 /* Spinlock to sync between Compare ISR and update of Compare register */
67 static struct k_spinlock g_lock;
68
update_tick_counter(void)69 static void update_tick_counter(void)
70 {
71 /* Read current cycle count. */
72 uint32_t now = am_hal_stimer_counter_get();
73
74 /* If current cycle count is smaller than the last time stamp, a counter overflow happened.
75 * We need to extend the current counter value to 64 bits and add it with 0xFFFFFFFF
76 * to get the correct elapsed cycles.
77 */
78 uint64_t now_64 = (g_last_time_stamp <= now) ? (uint64_t)now : (uint64_t)now + COUNTER_MAX;
79
80 /* Get elapsed cycles */
81 uint32_t elapsed_cycle = (now_64 - g_last_time_stamp);
82
83 /* Get elapsed ticks. */
84 uint32_t dticks = elapsed_cycle / CYC_PER_TICK;
85
86 g_last_time_stamp += dticks * CYC_PER_TICK;
87 g_tick_elapsed += dticks;
88 }
89
ambiq_stimer_delta_set(uint32_t ui32Delta)90 static void ambiq_stimer_delta_set(uint32_t ui32Delta)
91 {
92 am_hal_stimer_compare_delta_set(0, ui32Delta);
93 #if !defined(CONFIG_SOC_SERIES_APOLLO5X)
94 am_hal_stimer_compare_delta_set(1, ui32Delta + 1);
95 #endif
96 }
97
stimer_isr(const void * arg)98 void stimer_isr(const void *arg)
99 {
100 ARG_UNUSED(arg);
101
102 uint32_t irq_status = am_hal_stimer_int_status_get(false);
103
104 if (irq_status & COMPARE_INTERRUPT) {
105 am_hal_stimer_int_clear(COMPARE_INTERRUPT);
106
107 k_spinlock_key_t key = k_spin_lock(&g_lock);
108
109 /*Calculate the elapsed ticks based on the current cycle count*/
110 update_tick_counter();
111
112 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
113
114 /* Get the counter value to trigger the next tick interrupt. */
115 uint64_t next = (uint64_t)g_last_time_stamp + CYC_PER_TICK;
116
117 /* Read current cycle count. */
118 uint32_t now = am_hal_stimer_counter_get();
119
120 /* If current cycle count is smaller than the last time stamp, a counter
121 * overflow happened. We need to extend the current counter value to 64 bits
122 * and add 0xFFFFFFFF to get the correct elapsed cycles.
123 */
124 uint64_t now_64 = (g_last_time_stamp <= now) ? (uint64_t)now
125 : (uint64_t)now + COUNTER_MAX;
126
127 uint32_t delta = (now_64 + MIN_DELAY < next) ? (next - now_64) : MIN_DELAY;
128
129 /* Set delta. */
130 ambiq_stimer_delta_set(delta);
131 }
132
133 k_spin_unlock(&g_lock, key);
134
135 sys_clock_announce(g_tick_elapsed);
136 g_tick_elapsed = 0;
137 }
138 }
139
sys_clock_set_timeout(int32_t ticks,bool idle)140 void sys_clock_set_timeout(int32_t ticks, bool idle)
141 {
142 ARG_UNUSED(idle);
143
144 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
145 return;
146 }
147
148 /* Adjust the ticks to the range of [1, MAX_TICKS]. */
149 ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : ticks;
150 ticks = CLAMP(ticks, 1, (int32_t)MAX_TICKS);
151
152 k_spinlock_key_t key = k_spin_lock(&g_lock);
153
154 /* Update the internal tick counter*/
155 update_tick_counter();
156
157 /* Get current hardware counter value.*/
158 uint32_t now = am_hal_stimer_counter_get();
159
160 /* last: the last recorded counter value.
161 * now_64: current counter value. Extended to uint64_t to easy the handing of hardware
162 * counter overflow.
163 * next: counter values where to trigger the scheduled timeout.
164 * last < now_64 < next
165 */
166 uint64_t last = (uint64_t)g_last_time_stamp;
167 uint64_t now_64 = (g_last_time_stamp <= now) ? (uint64_t)now : (uint64_t)now + COUNTER_MAX;
168 uint64_t next = now_64 + ticks * CYC_PER_TICK;
169
170 uint32_t gap = next - last;
171 uint32_t gap_aligned = (gap / CYC_PER_TICK) * CYC_PER_TICK;
172 uint64_t next_aligned = last + gap_aligned;
173
174 uint32_t delta = next_aligned - now_64;
175
176 if (delta <= MIN_DELAY) {
177 /*If the delta value is smaller than MIN_DELAY, trigger a interrupt immediately*/
178 am_hal_stimer_int_set(COMPARE_INTERRUPT);
179 } else {
180 ambiq_stimer_delta_set(delta);
181 }
182
183 k_spin_unlock(&g_lock, key);
184 }
185
sys_clock_elapsed(void)186 uint32_t sys_clock_elapsed(void)
187 {
188 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
189 return 0;
190 }
191
192 k_spinlock_key_t key = k_spin_lock(&g_lock);
193 update_tick_counter();
194 k_spin_unlock(&g_lock, key);
195
196 return g_tick_elapsed;
197 }
198
sys_clock_cycle_get_32(void)199 uint32_t sys_clock_cycle_get_32(void)
200 {
201 return am_hal_stimer_counter_get();
202 }
203
stimer_init(void)204 static int stimer_init(void)
205 {
206 uint32_t oldCfg;
207
208 oldCfg = am_hal_stimer_config(TIMER_CLKSRC | AM_HAL_STIMER_CFG_FREEZE);
209
210 #if defined(CONFIG_SOC_SERIES_APOLLO3X)
211 am_hal_stimer_config((oldCfg & ~(AM_HAL_STIMER_CFG_FREEZE | CTIMER_STCFG_CLKSEL_Msk)) |
212 TIMER_CLKSRC | AM_HAL_STIMER_CFG_COMPARE_A_ENABLE |
213 AM_HAL_STIMER_CFG_COMPARE_B_ENABLE);
214 #elif defined(CONFIG_SOC_SERIES_APOLLO4X)
215 am_hal_stimer_config((oldCfg & ~(AM_HAL_STIMER_CFG_FREEZE | STIMER_STCFG_CLKSEL_Msk)) |
216 TIMER_CLKSRC | AM_HAL_STIMER_CFG_COMPARE_A_ENABLE |
217 AM_HAL_STIMER_CFG_COMPARE_B_ENABLE);
218 #elif defined(CONFIG_SOC_SERIES_APOLLO5X)
219 /* No need for backup comparator any more */
220 am_hal_stimer_config((oldCfg & ~(AM_HAL_STIMER_CFG_FREEZE | STIMER_STCFG_CLKSEL_Msk)) |
221 TIMER_CLKSRC | AM_HAL_STIMER_CFG_COMPARE_A_ENABLE);
222 #endif
223 g_last_time_stamp = am_hal_stimer_counter_get();
224
225 /* A Possible clock glitch could rarely cause the Stimer interrupt to be lost.
226 * Set up a backup comparator to handle this case
227 */
228 NVIC_ClearPendingIRQ(COMPAREA_IRQ);
229 IRQ_CONNECT(COMPAREA_IRQ, 0, stimer_isr, 0, 0);
230 irq_enable(COMPAREA_IRQ);
231 #if !defined(CONFIG_SOC_SERIES_APOLLO5X)
232 NVIC_ClearPendingIRQ(COMPAREB_IRQ);
233 IRQ_CONNECT(COMPAREB_IRQ, 0, stimer_isr, 0, 0);
234 irq_enable(COMPAREB_IRQ);
235 #endif
236 am_hal_stimer_int_enable(COMPARE_INTERRUPT);
237 /* Start timer with period CYC_PER_TICK if tickless is not enabled */
238 if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
239 ambiq_stimer_delta_set(CYC_PER_TICK);
240 }
241 return 0;
242 }
243
244 SYS_INIT(stimer_init, PRE_KERNEL_2, CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
245