1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  * Copyright (c) 2022 Nordic Semiconductor ASA
4  *
5  * SPDX-License-Identifier: Apache-2.0
6  */
7 
8 #include <zephyr/pm/policy.h>
9 #include <zephyr/pm/state.h>
10 #include <zephyr/sys/__assert.h>
11 #include <zephyr/sys/atomic.h>
12 #include <zephyr/toolchain.h>
13 #include <zephyr/spinlock.h>
14 
15 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
16 
17 #define DT_SUB_LOCK_INIT(node_id)					\
18 	{ .state = PM_STATE_DT_INIT(node_id),				\
19 	  .substate_id = DT_PROP_OR(node_id, substate_id, 0),		\
20 	  .exit_latency_us = DT_PROP_OR(node_id, exit_latency_us, 0),	\
21 	},
22 
23 /**
24  * State and substate lock structure.
25  *
26  * Struct holds all power states defined in the device tree. Array with counter
27  * variables is in RAM and n-th counter is used for n-th power state. Structure
28  * also holds exit latency for each state. It is used to disable power states
29  * based on current latency requirement.
30  *
31  * Operations on this array are in the order of O(n) with the number of power
32  * states and this is mostly due to the random nature of the substate value
33  * (that can be anything from a small integer value to a bitmask). We can
34  * probably do better with an hashmap.
35  */
36 static const struct {
37 	enum pm_state state;
38 	uint8_t substate_id;
39 	uint32_t exit_latency_us;
40 } substates[] = {
41 	DT_FOREACH_STATUS_OKAY(zephyr_power_state, DT_SUB_LOCK_INIT)
42 };
43 static atomic_t lock_cnt[ARRAY_SIZE(substates)];
44 static atomic_t latency_mask = BIT_MASK(ARRAY_SIZE(substates));
45 static atomic_t unlock_mask = BIT_MASK(ARRAY_SIZE(substates));
46 static struct k_spinlock lock;
47 
48 #endif
49 
pm_policy_state_lock_get(enum pm_state state,uint8_t substate_id)50 void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
51 {
52 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
53 	for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
54 		if (substates[i].state == state &&
55 		   (substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
56 			k_spinlock_key_t key = k_spin_lock(&lock);
57 
58 			if (lock_cnt[i] == 0) {
59 				unlock_mask &= ~BIT(i);
60 			}
61 			lock_cnt[i]++;
62 			k_spin_unlock(&lock, key);
63 		}
64 	}
65 #endif
66 }
67 
pm_policy_state_lock_put(enum pm_state state,uint8_t substate_id)68 void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
69 {
70 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
71 	for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
72 		if (substates[i].state == state &&
73 		   (substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
74 			k_spinlock_key_t key = k_spin_lock(&lock);
75 
76 			__ASSERT(lock_cnt[i] > 0, "Unbalanced state lock get/put");
77 			lock_cnt[i]--;
78 			if (lock_cnt[i] == 0) {
79 				unlock_mask |= BIT(i);
80 			}
81 			k_spin_unlock(&lock, key);
82 		}
83 	}
84 #endif
85 }
86 
pm_policy_state_lock_is_active(enum pm_state state,uint8_t substate_id)87 bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id)
88 {
89 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
90 	for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
91 		if (substates[i].state == state &&
92 		   (substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
93 			return atomic_get(&lock_cnt[i]) != 0;
94 		}
95 	}
96 #endif
97 
98 	return false;
99 }
100 
pm_policy_state_is_available(enum pm_state state,uint8_t substate_id)101 bool pm_policy_state_is_available(enum pm_state state, uint8_t substate_id)
102 {
103 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
104 	for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
105 		if (substates[i].state == state &&
106 		   (substates[i].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES)) {
107 			return (atomic_get(&lock_cnt[i]) == 0) &&
108 			       (atomic_get(&latency_mask) & BIT(i));
109 		}
110 	}
111 #endif
112 
113 	return false;
114 }
115 
pm_policy_state_any_active(void)116 bool pm_policy_state_any_active(void)
117 {
118 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
119 	/* Check if there is any power state that is not locked and not disabled due
120 	 * to latency requirements.
121 	 */
122 	return atomic_get(&unlock_mask) & atomic_get(&latency_mask);
123 #endif
124 	return true;
125 }
126 
127 #if DT_HAS_COMPAT_STATUS_OKAY(zephyr_power_state)
128 /* Callback is called whenever latency requirement changes. It is called under lock. */
pm_policy_latency_update_locked(int32_t max_latency_us)129 static void pm_policy_latency_update_locked(int32_t max_latency_us)
130 {
131 	for (size_t i = 0; i < ARRAY_SIZE(substates); i++) {
132 		if (substates[i].exit_latency_us >= max_latency_us) {
133 			latency_mask &= ~BIT(i);
134 		} else {
135 			latency_mask |= BIT(i);
136 		}
137 	}
138 }
139 
pm_policy_latency_init(void)140 static int pm_policy_latency_init(void)
141 {
142 	static struct pm_policy_latency_subscription sub;
143 
144 	pm_policy_latency_changed_subscribe(&sub, pm_policy_latency_update_locked);
145 
146 	return 0;
147 }
148 
149 SYS_INIT(pm_policy_latency_init, PRE_KERNEL_1, 0);
150 #endif
151