1 /*
2 * Copyright (c) 2020 Libre Solar Technologies GmbH
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7 #include <zephyr/task_wdt/task_wdt.h>
8
9 #include <zephyr/drivers/watchdog.h>
10 #include <zephyr/sys/reboot.h>
11 #include <zephyr/device.h>
12 #include <errno.h>
13
14 #define LOG_LEVEL CONFIG_WDT_LOG_LEVEL
15 #include <zephyr/logging/log.h>
16 LOG_MODULE_REGISTER(task_wdt);
17
18 /*
19 * This dummy channel is used to continue feeding the hardware watchdog if the
20 * task watchdog timeouts are too long for regular updates
21 */
22 #define TASK_WDT_BACKGROUND_CHANNEL UINTPTR_MAX
23
24 /*
25 * Task watchdog channel data
26 */
27 struct task_wdt_channel {
28 /* period in milliseconds used to reset the timeout, set to 0 to
29 * indicate that the channel is available
30 */
31 uint32_t reload_period;
32 /* abs. ticks when this channel expires (updated by task_wdt_feed) */
33 int64_t timeout_abs_ticks;
34 /* user data passed to the callback function */
35 void *user_data;
36 /* function to be called when watchdog timer expired */
37 task_wdt_callback_t callback;
38 };
39
40 /* array of all task watchdog channels */
41 static struct task_wdt_channel channels[CONFIG_TASK_WDT_CHANNELS];
42 static struct k_spinlock channels_lock;
43
44 /* timer used for watchdog handling */
45 static struct k_timer timer;
46
47 /* Tell whether the Task Watchdog has been fully initialized. */
48 static bool task_wdt_initialized;
49
50 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
51 /* pointer to the hardware watchdog used as a fallback */
52 static const struct device *hw_wdt_dev;
53 static int hw_wdt_channel;
54 static bool hw_wdt_started;
55 #endif
56
schedule_next_timeout(int64_t current_ticks)57 static void schedule_next_timeout(int64_t current_ticks)
58 {
59 uintptr_t next_channel_id; /* channel which will time out next */
60 int64_t next_timeout; /* timeout in absolute ticks of this channel */
61
62 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
63 next_channel_id = TASK_WDT_BACKGROUND_CHANNEL;
64 next_timeout = current_ticks +
65 k_ms_to_ticks_ceil64(CONFIG_TASK_WDT_MIN_TIMEOUT);
66 #else
67 next_channel_id = 0;
68 next_timeout = INT64_MAX;
69 #endif
70
71 /* find minimum timeout of all channels */
72 for (int id = 0; id < ARRAY_SIZE(channels); id++) {
73 if (channels[id].reload_period != 0 &&
74 channels[id].timeout_abs_ticks < next_timeout) {
75 next_channel_id = id;
76 next_timeout = channels[id].timeout_abs_ticks;
77 }
78 }
79
80 /* update task wdt kernel timer */
81 k_timer_user_data_set(&timer, (void *)next_channel_id);
82 k_timer_start(&timer, K_TIMEOUT_ABS_TICKS(next_timeout), K_FOREVER);
83
84 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
85 if (hw_wdt_started) {
86 wdt_feed(hw_wdt_dev, hw_wdt_channel);
87 }
88 #endif
89 }
90
91 /**
92 * @brief Task watchdog timer callback.
93 *
94 * If the device operates as intended, this function will never be called,
95 * as the timer is continuously restarted with the next due timeout in the
96 * task_wdt_feed() function.
97 *
98 * If all task watchdogs have longer timeouts than the hardware watchdog,
99 * this function is called regularly (via the background channel). This
100 * should be avoided by setting CONFIG_TASK_WDT_MIN_TIMEOUT to the minimum
101 * task watchdog timeout used in the application.
102 *
103 * @param timer_id Pointer to the timer which called the function
104 */
task_wdt_trigger(struct k_timer * timer_id)105 static void task_wdt_trigger(struct k_timer *timer_id)
106 {
107 uintptr_t channel_id = (uintptr_t)k_timer_user_data_get(timer_id);
108 bool bg_channel = IS_ENABLED(CONFIG_TASK_WDT_HW_FALLBACK) &&
109 (channel_id == TASK_WDT_BACKGROUND_CHANNEL);
110
111 /* If the timeout expired for the background channel (so the hardware
112 * watchdog needs to be fed) or for a channel that has been deleted,
113 * only schedule a new timeout (the hardware watchdog, if used, will be
114 * fed right after that new timeout is scheduled).
115 */
116 if (bg_channel || channels[channel_id].reload_period == 0) {
117 schedule_next_timeout(sys_clock_tick_get());
118 return;
119 }
120
121 if (channels[channel_id].callback) {
122 channels[channel_id].callback(channel_id,
123 channels[channel_id].user_data);
124 } else {
125 sys_reboot(SYS_REBOOT_COLD);
126 }
127 }
128
task_wdt_init(const struct device * hw_wdt)129 int task_wdt_init(const struct device *hw_wdt)
130 {
131 if (hw_wdt) {
132 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
133 struct wdt_timeout_cfg wdt_config;
134
135 wdt_config.flags = WDT_FLAG_RESET_SOC;
136 wdt_config.window.min = 0U;
137 wdt_config.window.max = CONFIG_TASK_WDT_MIN_TIMEOUT +
138 CONFIG_TASK_WDT_HW_FALLBACK_DELAY;
139 wdt_config.callback = NULL;
140
141 hw_wdt_dev = hw_wdt;
142 hw_wdt_channel = wdt_install_timeout(hw_wdt_dev, &wdt_config);
143 if (hw_wdt_channel < 0) {
144 LOG_ERR("hw_wdt install timeout failed: %d", hw_wdt_channel);
145 return hw_wdt_channel;
146 }
147 #else
148 return -ENOTSUP;
149 #endif
150 }
151
152 k_timer_init(&timer, task_wdt_trigger, NULL);
153 schedule_next_timeout(sys_clock_tick_get());
154
155 task_wdt_initialized = true;
156
157 return 0;
158 }
159
task_wdt_add(uint32_t reload_period,task_wdt_callback_t callback,void * user_data)160 int task_wdt_add(uint32_t reload_period, task_wdt_callback_t callback,
161 void *user_data)
162 {
163 k_spinlock_key_t key;
164
165 if (reload_period == 0) {
166 return -EINVAL;
167 }
168
169 /*
170 * k_spin_lock instead of k_sched_lock required here to avoid being interrupted by a
171 * triggering other task watchdog channel (executed in ISR context).
172 */
173 key = k_spin_lock(&channels_lock);
174
175 /* look for unused channel (reload_period set to 0) */
176 for (int id = 0; id < ARRAY_SIZE(channels); id++) {
177 if (channels[id].reload_period == 0) {
178 channels[id].reload_period = reload_period;
179 channels[id].user_data = user_data;
180 channels[id].timeout_abs_ticks = K_TICKS_FOREVER;
181 channels[id].callback = callback;
182
183 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
184 if (!hw_wdt_started && hw_wdt_dev) {
185 /* also start fallback hw wdt */
186 wdt_setup(hw_wdt_dev,
187 WDT_OPT_PAUSE_HALTED_BY_DBG
188 #ifdef CONFIG_TASK_WDT_HW_FALLBACK_PAUSE_IN_SLEEP
189 | WDT_OPT_PAUSE_IN_SLEEP
190 #endif
191 );
192 hw_wdt_started = true;
193 }
194 #endif
195 /* must be called after hw wdt has been started */
196 task_wdt_feed(id);
197
198 k_spin_unlock(&channels_lock, key);
199
200 return id;
201 }
202 }
203
204 k_spin_unlock(&channels_lock, key);
205
206 return -ENOMEM;
207 }
208
task_wdt_delete(int channel_id)209 int task_wdt_delete(int channel_id)
210 {
211 k_spinlock_key_t key;
212
213 if (channel_id < 0 || channel_id >= ARRAY_SIZE(channels)) {
214 return -EINVAL;
215 }
216
217 key = k_spin_lock(&channels_lock);
218
219 channels[channel_id].reload_period = 0;
220
221 k_spin_unlock(&channels_lock, key);
222
223 return 0;
224 }
225
task_wdt_feed(int channel_id)226 int task_wdt_feed(int channel_id)
227 {
228 int64_t current_ticks;
229
230 if (channel_id < 0 || channel_id >= ARRAY_SIZE(channels)) {
231 return -EINVAL;
232 }
233
234 /*
235 * We need a critical section instead of a mutex while updating the
236 * channels array in order to prevent priority inversion. Otherwise,
237 * a low priority thread could be preempted before releasing the mutex
238 * and block a high priority thread that wants to feed its task wdt.
239 */
240 k_sched_lock();
241
242 current_ticks = sys_clock_tick_get();
243
244 /* feed the specified channel */
245 channels[channel_id].timeout_abs_ticks = current_ticks +
246 k_ms_to_ticks_ceil64(channels[channel_id].reload_period);
247
248 schedule_next_timeout(current_ticks);
249
250 k_sched_unlock();
251
252 return 0;
253 }
254
task_wdt_suspend(void)255 void task_wdt_suspend(void)
256 {
257 k_spinlock_key_t key;
258
259 /*
260 * Allow the function to be called from a custom PM policy callback, even when
261 * the Task Watchdog was not initialized yet.
262 */
263 if (!task_wdt_initialized) {
264 return;
265 }
266
267 /*
268 * Prevent all task watchdog channels from triggering.
269 * Protect the timer access with the spinlock to avoid the timer being started
270 * concurrently by a call to schedule_next_timeout().
271 */
272 key = k_spin_lock(&channels_lock);
273 k_timer_stop(&timer);
274 k_spin_unlock(&channels_lock, key);
275
276 #ifdef CONFIG_TASK_WDT_HW_FALLBACK
277 /*
278 * Give a whole hardware watchdog timer period of time to the application to put
279 * the system in a suspend mode that will pause the hardware watchdog.
280 */
281 if (hw_wdt_started) {
282 wdt_feed(hw_wdt_dev, hw_wdt_channel);
283 }
284 #endif
285 }
286
task_wdt_resume(void)287 void task_wdt_resume(void)
288 {
289 k_spinlock_key_t key;
290 int64_t current_ticks;
291
292 /*
293 * Allow the function to be called from a custom PM policy callback, even when
294 * the Task Watchdog was not initialized yet.
295 */
296 if (!task_wdt_initialized) {
297 return;
298 }
299
300 key = k_spin_lock(&channels_lock);
301
302 /*
303 * Feed all enabled channels, so the application threads have time to resume
304 * feeding the channels by themselves.
305 */
306 current_ticks = sys_clock_tick_get();
307 for (size_t id = 0; id < ARRAY_SIZE(channels); id++) {
308 if (channels[id].reload_period != 0) {
309 channels[id].timeout_abs_ticks = current_ticks +
310 k_ms_to_ticks_ceil64(channels[id].reload_period);
311 }
312 }
313
314 /* Restart the Task Watchdog timer */
315 schedule_next_timeout(current_ticks);
316
317 k_spin_unlock(&channels_lock, key);
318 }
319