1 /*
2 * Copyright (c) 2018 Intel Corporation.
3 * Copyright (c) 2021 Nordic Semiconductor ASA.
4 * Copyright (c) 2025 HubbleNetwork.
5 *
6 * SPDX-License-Identifier: Apache-2.0
7 */
8
9 #include <zephyr/pm/device.h>
10 #include <zephyr/pm/device_runtime.h>
11 #include <zephyr/sys/__assert.h>
12
13 #include <zephyr/logging/log.h>
14 LOG_MODULE_DECLARE(pm_device, CONFIG_PM_DEVICE_LOG_LEVEL);
15
16 #ifdef CONFIG_PM_DEVICE_POWER_DOMAIN
17 #define PM_DOMAIN(_pm) \
18 (_pm)->domain
19 #else
20 #define PM_DOMAIN(_pm) NULL
21 #endif
22
23 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
24 #ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
25 K_THREAD_STACK_DEFINE(pm_device_runtime_stack, CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_STACK_SIZE);
26 static struct k_work_q pm_device_runtime_wq;
27 #endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
28 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
29
30 #define EVENT_STATE_ACTIVE BIT(PM_DEVICE_STATE_ACTIVE)
31 #define EVENT_STATE_SUSPENDED BIT(PM_DEVICE_STATE_SUSPENDED)
32
33 #define EVENT_MASK (EVENT_STATE_ACTIVE | EVENT_STATE_SUSPENDED)
34
35 /**
36 * @brief Suspend a device
37 *
38 * @note Asynchronous operations are not supported when in pre-kernel mode. In
39 * this case, the async flag will be always forced to be false, and so the
40 * function will be blocking.
41 *
42 * @funcprops \pre_kernel_ok
43 *
44 * @param dev Device instance.
45 * @param async Perform operation asynchronously.
46 * @param delay Period to delay the asynchronous operation.
47 *
48 * @retval 0 If device has been suspended or queued for suspend.
49 * @retval -EALREADY If device is already suspended (can only happen if get/put
50 * calls are unbalanced).
51 * @retval -EBUSY If the device is busy.
52 * @retval -errno Other negative errno, result of the action callback.
53 */
runtime_suspend(const struct device * dev,bool async,k_timeout_t delay)54 static int runtime_suspend(const struct device *dev, bool async,
55 k_timeout_t delay)
56 {
57 int ret = 0;
58 struct pm_device *pm = dev->pm;
59
60 /*
61 * Early return if device runtime is not enabled.
62 */
63 if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
64 return 0;
65 }
66
67 if (k_is_pre_kernel()) {
68 async = false;
69 } else {
70 ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
71 if (ret < 0) {
72 return -EBUSY;
73 }
74 }
75
76 if (pm->base.usage == 0U) {
77 LOG_WRN("Unbalanced suspend");
78 ret = -EALREADY;
79 goto unlock;
80 }
81
82 pm->base.usage--;
83 if (pm->base.usage > 0U) {
84 goto unlock;
85 }
86
87 if (async) {
88 /* queue suspend */
89 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
90 pm->base.state = PM_DEVICE_STATE_SUSPENDING;
91 #ifdef CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ
92 (void)k_work_schedule(&pm->work, delay);
93 #else
94 (void)k_work_schedule_for_queue(&pm_device_runtime_wq, &pm->work, delay);
95 #endif /* CONFIG_PM_DEVICE_RUNTIME_USE_SYSTEM_WQ */
96 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
97 } else {
98 /* suspend now */
99 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
100 if (ret < 0) {
101 pm->base.usage++;
102 goto unlock;
103 }
104
105 pm->base.state = PM_DEVICE_STATE_SUSPENDED;
106
107 /* Now put the domain */
108 if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
109 (void)pm_device_runtime_put(PM_DOMAIN(dev->pm_base));
110 atomic_clear_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED);
111 }
112 }
113
114 unlock:
115 if (!k_is_pre_kernel()) {
116 k_sem_give(&pm->lock);
117 }
118
119 return ret;
120 }
121
122 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
runtime_suspend_work(struct k_work * work)123 static void runtime_suspend_work(struct k_work *work)
124 {
125 int ret;
126 struct k_work_delayable *dwork = k_work_delayable_from_work(work);
127 struct pm_device *pm = CONTAINER_OF(dwork, struct pm_device, work);
128
129 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
130
131 (void)k_sem_take(&pm->lock, K_FOREVER);
132 if (ret < 0) {
133 pm->base.usage++;
134 pm->base.state = PM_DEVICE_STATE_ACTIVE;
135 } else {
136 pm->base.state = PM_DEVICE_STATE_SUSPENDED;
137 }
138 k_event_set(&pm->event, BIT(pm->base.state));
139 k_sem_give(&pm->lock);
140
141 /*
142 * On async put, we have to suspend the domain when the device
143 * finishes its operation
144 */
145 if ((ret == 0) &&
146 atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
147 (void)pm_device_runtime_put(PM_DOMAIN(&pm->base));
148 atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
149 }
150
151 __ASSERT(ret == 0, "Could not suspend device (%d)", ret);
152 }
153 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
154
get_sync_locked(const struct device * dev)155 static int get_sync_locked(const struct device *dev)
156 {
157 int ret;
158 struct pm_device_isr *pm = dev->pm_isr;
159 uint32_t flags = pm->base.flags;
160
161 if (pm->base.usage == 0) {
162 if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
163 const struct device *domain = PM_DOMAIN(&pm->base);
164
165 if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
166 ret = pm_device_runtime_get(domain);
167 if (ret < 0) {
168 return ret;
169 }
170 } else {
171 return -EWOULDBLOCK;
172 }
173 }
174
175 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
176 if (ret < 0) {
177 return ret;
178 }
179 pm->base.state = PM_DEVICE_STATE_ACTIVE;
180 } else {
181 ret = 0;
182 }
183
184 pm->base.usage++;
185
186 return ret;
187 }
188
pm_device_runtime_get(const struct device * dev)189 int pm_device_runtime_get(const struct device *dev)
190 {
191 int ret = 0;
192 struct pm_device *pm = dev->pm;
193
194 if (pm == NULL) {
195 return 0;
196 }
197
198 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_get, dev);
199
200 /*
201 * Early return if device runtime is not enabled.
202 */
203 if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
204 return 0;
205 }
206
207 if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
208 struct pm_device_isr *pm_sync = dev->pm_isr;
209 k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
210
211 ret = get_sync_locked(dev);
212 k_spin_unlock(&pm_sync->lock, k);
213 goto end;
214 }
215
216 if (!k_is_pre_kernel()) {
217 ret = k_sem_take(&pm->lock, k_is_in_isr() ? K_NO_WAIT : K_FOREVER);
218 if (ret < 0) {
219 return -EWOULDBLOCK;
220 }
221 }
222
223 if (k_is_in_isr() && (pm->base.state == PM_DEVICE_STATE_SUSPENDING)) {
224 ret = -EWOULDBLOCK;
225 goto unlock;
226 }
227
228 /*
229 * If the device is under a power domain, the domain has to be get
230 * first.
231 */
232 const struct device *domain = PM_DOMAIN(&pm->base);
233
234 if (domain != NULL && !atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_PD_CLAIMED)) {
235 ret = pm_device_runtime_get(domain);
236 if (ret != 0) {
237 goto unlock;
238 }
239 /* Check if powering up this device failed */
240 if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_TURN_ON_FAILED)) {
241 (void)pm_device_runtime_put(domain);
242 ret = -EAGAIN;
243 goto unlock;
244 }
245 /* Power domain successfully claimed */
246 atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_PD_CLAIMED);
247 }
248
249 pm->base.usage++;
250
251 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
252 /*
253 * Check if the device has a pending suspend operation (not started
254 * yet) and cancel it. This way we avoid unnecessary operations because
255 * the device is actually active.
256 */
257 if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
258 ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
259 pm->base.state = PM_DEVICE_STATE_ACTIVE;
260 goto unlock;
261 }
262
263 if (!k_is_pre_kernel()) {
264 /*
265 * If the device is already suspending there is
266 * nothing else we can do but wait until it finishes.
267 */
268 while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
269 k_event_clear(&pm->event, EVENT_MASK);
270 k_sem_give(&pm->lock);
271
272 k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
273
274 (void)k_sem_take(&pm->lock, K_FOREVER);
275 }
276 }
277 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
278
279 if (pm->base.usage > 1U) {
280 goto unlock;
281 }
282
283 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_RESUME);
284 if (ret < 0) {
285 pm->base.usage--;
286 if (domain != NULL) {
287 (void)pm_device_runtime_put(domain);
288 }
289 goto unlock;
290 }
291
292 pm->base.state = PM_DEVICE_STATE_ACTIVE;
293
294 unlock:
295 if (!k_is_pre_kernel()) {
296 k_sem_give(&pm->lock);
297 }
298
299 end:
300 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_get, dev, ret);
301
302 return ret;
303 }
304
305
put_sync_locked(const struct device * dev)306 static int put_sync_locked(const struct device *dev)
307 {
308 int ret;
309 struct pm_device_isr *pm = dev->pm_isr;
310 uint32_t flags = pm->base.flags;
311
312 if (!(flags & BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED))) {
313 return 0;
314 }
315
316 if (pm->base.usage == 0U) {
317 return -EALREADY;
318 }
319
320 pm->base.usage--;
321 if (pm->base.usage == 0U) {
322 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
323 if (ret < 0) {
324 return ret;
325 }
326 pm->base.state = PM_DEVICE_STATE_SUSPENDED;
327
328 if (flags & BIT(PM_DEVICE_FLAG_PD_CLAIMED)) {
329 const struct device *domain = PM_DOMAIN(&pm->base);
330
331 if (domain->pm_base->flags & BIT(PM_DEVICE_FLAG_ISR_SAFE)) {
332 ret = put_sync_locked(domain);
333 } else {
334 ret = -EWOULDBLOCK;
335 }
336 }
337 } else {
338 ret = 0;
339 }
340
341 return ret;
342 }
343
pm_device_runtime_put(const struct device * dev)344 int pm_device_runtime_put(const struct device *dev)
345 {
346 int ret;
347
348 if (dev->pm_base == NULL) {
349 return 0;
350 }
351
352 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put, dev);
353
354 if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
355 struct pm_device_isr *pm_sync = dev->pm_isr;
356 k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
357
358 ret = put_sync_locked(dev);
359
360 k_spin_unlock(&pm_sync->lock, k);
361 } else {
362 ret = runtime_suspend(dev, false, K_NO_WAIT);
363 }
364 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put, dev, ret);
365
366 return ret;
367 }
368
pm_device_runtime_put_async(const struct device * dev,k_timeout_t delay)369 int pm_device_runtime_put_async(const struct device *dev, k_timeout_t delay)
370 {
371 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
372 int ret;
373
374 if (dev->pm_base == NULL) {
375 return 0;
376 }
377
378 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_put_async, dev, delay);
379 if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
380 struct pm_device_isr *pm_sync = dev->pm_isr;
381 k_spinlock_key_t k = k_spin_lock(&pm_sync->lock);
382
383 ret = put_sync_locked(dev);
384
385 k_spin_unlock(&pm_sync->lock, k);
386 } else {
387 ret = runtime_suspend(dev, true, delay);
388 }
389 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_put_async, dev, delay, ret);
390
391 return ret;
392 #else
393 LOG_WRN("Function not available");
394 return -ENOSYS;
395 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
396 }
397
398 __boot_func
pm_device_runtime_auto_enable(const struct device * dev)399 int pm_device_runtime_auto_enable(const struct device *dev)
400 {
401 struct pm_device_base *pm = dev->pm_base;
402
403 /* No action needed if PM_DEVICE_FLAG_RUNTIME_AUTO is not enabled */
404 if (!pm || !atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_AUTO)) {
405 return 0;
406 }
407 return pm_device_runtime_enable(dev);
408 }
409
runtime_enable_sync(const struct device * dev)410 static int runtime_enable_sync(const struct device *dev)
411 {
412 int ret;
413 struct pm_device_isr *pm = dev->pm_isr;
414 k_spinlock_key_t k = k_spin_lock(&pm->lock);
415
416 if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
417 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_SUSPEND);
418 if (ret < 0) {
419 goto unlock;
420 }
421
422 pm->base.state = PM_DEVICE_STATE_SUSPENDED;
423 } else {
424 ret = 0;
425 }
426
427 pm->base.flags |= BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
428 pm->base.usage = 0U;
429 unlock:
430 k_spin_unlock(&pm->lock, k);
431 return ret;
432 }
433
pm_device_runtime_enable(const struct device * dev)434 int pm_device_runtime_enable(const struct device *dev)
435 {
436 int ret = 0;
437 struct pm_device *pm = dev->pm;
438
439 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_enable, dev);
440
441 if (pm == NULL) {
442 ret = -ENOTSUP;
443 goto end;
444 }
445
446 if (atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
447 goto end;
448 }
449
450 if (pm_device_is_busy(dev)) {
451 ret = -EBUSY;
452 goto end;
453 }
454
455 if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
456 ret = runtime_enable_sync(dev);
457 goto end;
458 }
459
460 if (!k_is_pre_kernel()) {
461 (void)k_sem_take(&pm->lock, K_FOREVER);
462 }
463
464 /* lazy init of PM fields */
465 if (pm->dev == NULL) {
466 pm->dev = dev;
467 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
468 k_work_init_delayable(&pm->work, runtime_suspend_work);
469 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
470 }
471
472 if (pm->base.state == PM_DEVICE_STATE_ACTIVE) {
473 ret = pm->base.action_cb(pm->dev, PM_DEVICE_ACTION_SUSPEND);
474 if (ret < 0) {
475 goto unlock;
476 }
477 pm->base.state = PM_DEVICE_STATE_SUSPENDED;
478 }
479
480 pm->base.usage = 0U;
481
482 atomic_set_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
483
484 unlock:
485 if (!k_is_pre_kernel()) {
486 k_sem_give(&pm->lock);
487 }
488
489 end:
490 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_enable, dev, ret);
491 return ret;
492 }
493
runtime_disable_sync(const struct device * dev)494 static int runtime_disable_sync(const struct device *dev)
495 {
496 struct pm_device_isr *pm = dev->pm_isr;
497 int ret;
498 k_spinlock_key_t k = k_spin_lock(&pm->lock);
499
500 if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
501 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
502 if (ret < 0) {
503 goto unlock;
504 }
505
506 pm->base.state = PM_DEVICE_STATE_ACTIVE;
507 } else {
508 ret = 0;
509 }
510
511 pm->base.flags &= ~BIT(PM_DEVICE_FLAG_RUNTIME_ENABLED);
512 unlock:
513 k_spin_unlock(&pm->lock, k);
514 return ret;
515 }
516
pm_device_runtime_disable(const struct device * dev)517 int pm_device_runtime_disable(const struct device *dev)
518 {
519 int ret = 0;
520 struct pm_device *pm = dev->pm;
521
522 SYS_PORT_TRACING_FUNC_ENTER(pm, device_runtime_disable, dev);
523
524 if (pm == NULL) {
525 ret = -ENOTSUP;
526 goto end;
527 }
528
529 if (!atomic_test_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED)) {
530 goto end;
531 }
532
533 if (atomic_test_bit(&dev->pm_base->flags, PM_DEVICE_FLAG_ISR_SAFE)) {
534 ret = runtime_disable_sync(dev);
535 goto end;
536 }
537
538 if (!k_is_pre_kernel()) {
539 (void)k_sem_take(&pm->lock, K_FOREVER);
540 }
541
542 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
543 if (!k_is_pre_kernel()) {
544 if ((pm->base.state == PM_DEVICE_STATE_SUSPENDING) &&
545 ((k_work_cancel_delayable(&pm->work) & K_WORK_RUNNING) == 0)) {
546 pm->base.state = PM_DEVICE_STATE_ACTIVE;
547 goto clear_bit;
548 }
549
550 /* wait until possible async suspend is completed */
551 while (pm->base.state == PM_DEVICE_STATE_SUSPENDING) {
552 k_event_clear(&pm->event, EVENT_MASK);
553 k_sem_give(&pm->lock);
554
555 k_event_wait(&pm->event, EVENT_MASK, false, K_FOREVER);
556
557 (void)k_sem_take(&pm->lock, K_FOREVER);
558 }
559 }
560 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
561
562 /* wake up the device if suspended */
563 if (pm->base.state == PM_DEVICE_STATE_SUSPENDED) {
564 ret = pm->base.action_cb(dev, PM_DEVICE_ACTION_RESUME);
565 if (ret < 0) {
566 goto unlock;
567 }
568
569 pm->base.state = PM_DEVICE_STATE_ACTIVE;
570 }
571 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
572 clear_bit:
573 #endif
574 atomic_clear_bit(&pm->base.flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
575
576 unlock:
577 if (!k_is_pre_kernel()) {
578 k_sem_give(&pm->lock);
579 }
580
581 end:
582 SYS_PORT_TRACING_FUNC_EXIT(pm, device_runtime_disable, dev, ret);
583
584 return ret;
585 }
586
pm_device_runtime_is_enabled(const struct device * dev)587 bool pm_device_runtime_is_enabled(const struct device *dev)
588 {
589 struct pm_device_base *pm = dev->pm_base;
590
591 return pm && atomic_test_bit(&pm->flags, PM_DEVICE_FLAG_RUNTIME_ENABLED);
592 }
593
pm_device_runtime_usage(const struct device * dev)594 int pm_device_runtime_usage(const struct device *dev)
595 {
596 if (!pm_device_runtime_is_enabled(dev)) {
597 return -ENOTSUP;
598 }
599
600 return dev->pm_base->usage;
601 }
602
603 #ifdef CONFIG_PM_DEVICE_RUNTIME_ASYNC
604 #ifdef CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ
605
pm_device_runtime_wq_init(void)606 static int pm_device_runtime_wq_init(void)
607 {
608 const struct k_work_queue_config cfg = {.name = "PM DEVICE RUNTIME WQ"};
609
610 k_work_queue_init(&pm_device_runtime_wq);
611
612 k_work_queue_start(&pm_device_runtime_wq, pm_device_runtime_stack,
613 K_THREAD_STACK_SIZEOF(pm_device_runtime_stack),
614 CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_PRIO, &cfg);
615
616 return 0;
617 }
618
619 SYS_INIT(pm_device_runtime_wq_init, POST_KERNEL,
620 CONFIG_PM_DEVICE_RUNTIME_DEDICATED_WQ_INIT_PRIO);
621
622 #endif /* CONFIG_PM_DEVICE_RUNTIME_USE_DEDICATED_WQ */
623 #endif /* CONFIG_PM_DEVICE_RUNTIME_ASYNC */
624