1 /*
2  * Copyright (c) 2018 Intel Corporation.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  */
6 
7 #include <zephyr/device.h>
8 #include <zephyr/kernel.h>
9 #include <zephyr/kernel_structs.h>
10 #include <zephyr/init.h>
11 #include <string.h>
12 #include <zephyr/drivers/timer/system_timer.h>
13 #include <zephyr/pm/device.h>
14 #include <zephyr/pm/device_runtime.h>
15 #include <zephyr/pm/pm.h>
16 #include <zephyr/pm/state.h>
17 #include <zephyr/pm/policy.h>
18 #include <zephyr/tracing/tracing.h>
19 
20 #include "pm_stats.h"
21 #include "device_system_managed.h"
22 
23 #include <zephyr/logging/log.h>
24 LOG_MODULE_REGISTER(pm, CONFIG_PM_LOG_LEVEL);
25 
26 static ATOMIC_DEFINE(z_post_ops_required, CONFIG_MP_MAX_NUM_CPUS);
27 static sys_slist_t pm_notifiers = SYS_SLIST_STATIC_INIT(&pm_notifiers);
28 
29 /* Convert exit-latency-us to ticks using specified method. */
30 #define EXIT_LATENCY_US_TO_TICKS(us)						    \
31 	IS_ENABLED(CONFIG_PM_PREWAKEUP_CONV_MODE_NEAR) ? k_us_to_ticks_near32(us) : \
32 	IS_ENABLED(CONFIG_PM_PREWAKEUP_CONV_MODE_CEIL) ? k_us_to_ticks_ceil32(us) : \
33 		k_us_to_ticks_floor32(us)
34 
35 /* State pointers which are set to NULL indicate ACTIVE state. */
36 static const struct pm_state_info *z_cpus_pm_state[CONFIG_MP_MAX_NUM_CPUS];
37 static const struct pm_state_info *z_cpus_pm_forced_state[CONFIG_MP_MAX_NUM_CPUS];
38 
39 static struct k_spinlock pm_forced_state_lock;
40 static struct k_spinlock pm_notifier_lock;
41 
42 /*
43  * Function called to notify when the system is entering / exiting a
44  * power state
45  */
pm_state_notify(bool entering_state)46 static inline void pm_state_notify(bool entering_state)
47 {
48 	struct pm_notifier *notifier;
49 	k_spinlock_key_t pm_notifier_key;
50 	void (*callback)(enum pm_state state);
51 
52 	pm_notifier_key = k_spin_lock(&pm_notifier_lock);
53 	SYS_SLIST_FOR_EACH_CONTAINER(&pm_notifiers, notifier, _node) {
54 		if (entering_state) {
55 			callback = notifier->state_entry;
56 		} else {
57 			callback = notifier->state_exit;
58 		}
59 
60 		if (callback) {
61 			callback(z_cpus_pm_state[CPU_ID]->state);
62 		}
63 	}
64 	k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
65 }
66 
ticks_expiring_sooner(int32_t ticks1,int32_t ticks2)67 static inline int32_t ticks_expiring_sooner(int32_t ticks1, int32_t ticks2)
68 {
69 	/*
70 	 * Ticks are relative numbers that defines the number of ticks
71 	 * until the next event.
72 	 * Its maximum value is K_TICKS_FOREVER ((uint32_t)-1) which is -1
73 	 * when we cast it to (int32_t)
74 	 * We need to find out which one is the closest
75 	 */
76 
77 	__ASSERT(ticks1 >= -1, "ticks1 has unexpected negative value");
78 	__ASSERT(ticks2 >= -1, "ticks2 has unexpected negative value");
79 
80 	if (ticks1 == K_TICKS_FOREVER) {
81 		return ticks2;
82 	}
83 	if (ticks2 == K_TICKS_FOREVER) {
84 		return ticks1;
85 	}
86 	/* At this step ticks1 and ticks2 are positive */
87 	return MIN(ticks1, ticks2);
88 }
89 
pm_system_resume(void)90 void pm_system_resume(void)
91 {
92 	uint8_t id = CPU_ID;
93 
94 	/*
95 	 * This notification is called from the ISR of the event
96 	 * that caused exit from kernel idling after PM operations.
97 	 *
98 	 * Some CPU low power states require enabling of interrupts
99 	 * atomically when entering those states. The wake up from
100 	 * such a state first executes code in the ISR of the interrupt
101 	 * that caused the wake. This hook will be called from the ISR.
102 	 * For such CPU LPS states, do post operations and restores here.
103 	 * The kernel scheduler will get control after the ISR finishes
104 	 * and it may schedule another thread.
105 	 */
106 	if (atomic_test_and_clear_bit(z_post_ops_required, id)) {
107 #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
108 		if (atomic_add(&_cpus_active, 1) == 0) {
109 			if ((z_cpus_pm_state[id]->state != PM_STATE_RUNTIME_IDLE) &&
110 					!z_cpus_pm_state[id]->pm_device_disabled) {
111 				pm_resume_devices();
112 			}
113 		}
114 #endif
115 		pm_state_exit_post_ops(z_cpus_pm_state[id]->state,
116 				       z_cpus_pm_state[id]->substate_id);
117 		pm_state_notify(false);
118 #ifdef CONFIG_SYS_CLOCK_EXISTS
119 		sys_clock_idle_exit();
120 #endif /* CONFIG_SYS_CLOCK_EXISTS */
121 		z_cpus_pm_state[id] = NULL;
122 	}
123 }
124 
pm_state_force(uint8_t cpu,const struct pm_state_info * info)125 bool pm_state_force(uint8_t cpu, const struct pm_state_info *info)
126 {
127 	k_spinlock_key_t key;
128 
129 	__ASSERT(info->state < PM_STATE_COUNT,
130 		 "Invalid power state %d!", info->state);
131 
132 	info = pm_state_get(cpu, info->state, info->substate_id);
133 	if (info == NULL) {
134 		/* Return false if the state could not be retrieved */
135 		return false;
136 	}
137 
138 	key = k_spin_lock(&pm_forced_state_lock);
139 	z_cpus_pm_forced_state[cpu] = info;
140 	k_spin_unlock(&pm_forced_state_lock, key);
141 
142 	return true;
143 }
144 
pm_system_suspend(int32_t kernel_ticks)145 bool pm_system_suspend(int32_t kernel_ticks)
146 {
147 	uint8_t id = CPU_ID;
148 	k_spinlock_key_t key;
149 	int32_t ticks, events_ticks;
150 	uint32_t exit_latency_ticks;
151 
152 	SYS_PORT_TRACING_FUNC_ENTER(pm, system_suspend, kernel_ticks);
153 
154 	if (!pm_policy_state_any_active() && (z_cpus_pm_forced_state[id] == NULL)) {
155 		/* Return early if all states are unavailable. */
156 		return false;
157 	}
158 
159 	/*
160 	 * CPU needs to be fully wake up before the event is triggered.
161 	 * We need to find out first the ticks to the next event
162 	 */
163 	events_ticks = pm_policy_next_event_ticks();
164 	ticks = ticks_expiring_sooner(kernel_ticks, events_ticks);
165 
166 	key = k_spin_lock(&pm_forced_state_lock);
167 	if (z_cpus_pm_forced_state[id] != NULL) {
168 		z_cpus_pm_state[id] = z_cpus_pm_forced_state[id];
169 		z_cpus_pm_forced_state[id] = NULL;
170 	} else {
171 		z_cpus_pm_state[id] = pm_policy_next_state(id, ticks);
172 	}
173 	k_spin_unlock(&pm_forced_state_lock, key);
174 
175 	if (z_cpus_pm_state[id] == NULL) {
176 		LOG_DBG("No PM operations done.");
177 		SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks, PM_STATE_ACTIVE);
178 		return false;
179 	}
180 
181 #ifdef CONFIG_PM_DEVICE_SYSTEM_MANAGED
182 	if (atomic_sub(&_cpus_active, 1) == 1) {
183 		if ((z_cpus_pm_state[id]->state != PM_STATE_RUNTIME_IDLE) &&
184 		    !z_cpus_pm_state[id]->pm_device_disabled) {
185 			if (!pm_suspend_devices()) {
186 				pm_resume_devices();
187 				z_cpus_pm_state[id] = NULL;
188 				(void)atomic_add(&_cpus_active, 1);
189 				SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
190 							   PM_STATE_ACTIVE);
191 				return false;
192 			}
193 		}
194 	}
195 #endif
196 
197 	exit_latency_ticks = EXIT_LATENCY_US_TO_TICKS(z_cpus_pm_state[id]->exit_latency_us);
198 	if ((exit_latency_ticks > 0) && (ticks != K_TICKS_FOREVER)) {
199 		/*
200 		 * We need to set the timer to interrupt a little bit early to
201 		 * accommodate the time required by the CPU to fully wake up.
202 		 *
203 		 * Since K_TICKS_FOREVER is defined as -1, ensure that -1
204 		 * is not passed as the next timeout.
205 		 *
206 		 */
207 		sys_clock_set_timeout(MAX(0, ticks - exit_latency_ticks), true);
208 	}
209 
210 	/*
211 	 * This function runs with interruptions locked but it is
212 	 * expected the SoC to unlock them in
213 	 * pm_state_exit_post_ops() when returning to active
214 	 * state. We don't want to be scheduled out yet, first we need
215 	 * to send a notification about leaving the idle state. So,
216 	 * we lock the scheduler here and unlock just after we have
217 	 * sent the notification in pm_system_resume().
218 	 */
219 	k_sched_lock();
220 
221 	if (IS_ENABLED(CONFIG_PM_STATS)) {
222 		pm_stats_start();
223 	}
224 	/* Enter power state */
225 	pm_state_notify(true);
226 	atomic_set_bit(z_post_ops_required, id);
227 	pm_state_set(z_cpus_pm_state[id]->state, z_cpus_pm_state[id]->substate_id);
228 
229 	/* Wake up sequence starts here */
230 
231 	if (IS_ENABLED(CONFIG_PM_STATS)) {
232 		pm_stats_stop();
233 		pm_stats_update(z_cpus_pm_state[id] ?
234 				z_cpus_pm_state[id]->state : PM_STATE_ACTIVE);
235 	}
236 
237 	pm_system_resume();
238 	k_sched_unlock();
239 	SYS_PORT_TRACING_FUNC_EXIT(pm, system_suspend, ticks,
240 				   z_cpus_pm_state[id] ?
241 				   z_cpus_pm_state[id]->state : PM_STATE_ACTIVE);
242 
243 	return true;
244 }
245 
pm_notifier_register(struct pm_notifier * notifier)246 void pm_notifier_register(struct pm_notifier *notifier)
247 {
248 	k_spinlock_key_t pm_notifier_key = k_spin_lock(&pm_notifier_lock);
249 
250 	sys_slist_append(&pm_notifiers, &notifier->_node);
251 	k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
252 }
253 
pm_notifier_unregister(struct pm_notifier * notifier)254 int pm_notifier_unregister(struct pm_notifier *notifier)
255 {
256 	int ret = -EINVAL;
257 	k_spinlock_key_t pm_notifier_key;
258 
259 	pm_notifier_key = k_spin_lock(&pm_notifier_lock);
260 	if (sys_slist_find_and_remove(&pm_notifiers, &(notifier->_node))) {
261 		ret = 0;
262 	}
263 	k_spin_unlock(&pm_notifier_lock, pm_notifier_key);
264 
265 	return ret;
266 }
267 
pm_state_next_get(uint8_t cpu)268 const struct pm_state_info *pm_state_next_get(uint8_t cpu)
269 {
270 	static const struct pm_state_info active = {
271 		.state = PM_STATE_ACTIVE
272 	};
273 
274 	return z_cpus_pm_state[cpu] ? z_cpus_pm_state[cpu] : &active;
275 }
276