1 // © 2021 Qualcomm Innovation Center, Inc. All rights reserved.
2 //
3 // SPDX-License-Identifier: BSD-3-Clause
4 
5 #include <assert.h>
6 #include <hyptypes.h>
7 
8 #include <hypcontainers.h>
9 
10 #include <atomic.h>
11 #include <bitmap.h>
12 #include <cpulocal.h>
13 #include <ipi.h>
14 #include <panic.h>
15 #include <platform_cpu.h>
16 #include <power.h>
17 #include <preempt.h>
18 #include <rcu.h>
19 #include <scheduler.h>
20 #include <spinlock.h>
21 #include <timer_queue.h>
22 #include <util.h>
23 
24 #include <events/power.h>
25 
26 #include "event_handlers.h"
27 
28 static ticks_t power_cpu_on_retry_delay_ticks;
29 
30 static spinlock_t power_system_lock;
31 static BITMAP_DECLARE(PLATFORM_MAX_CORES, power_system_online_cpus)
32 	PROTECTED_BY(power_system_lock);
33 static platform_power_state_t
34 	power_system_suspend_state PROTECTED_BY(power_system_lock);
35 
36 CPULOCAL_DECLARE_STATIC(power_voting_t, power_voting);
37 
38 // This is protected by the lock in the corresponding power_voting_t structure,
39 // but must remain a separate array because it is exposed in crash minidumps.
40 CPULOCAL_DECLARE_STATIC(cpu_power_state_t, power_state);
41 
42 const cpu_power_state_array_t *
power_get_cpu_states_for_debug(void)43 power_get_cpu_states_for_debug(void)
44 {
45 	return &cpulocal_power_state;
46 }
47 
48 void
power_handle_boot_cold_init(cpu_index_t boot_cpu)49 power_handle_boot_cold_init(cpu_index_t boot_cpu)
50 {
51 	power_cpu_on_retry_delay_ticks =
52 		timer_convert_ns_to_ticks(POWER_CPU_ON_RETRY_DELAY_NS);
53 	assert(power_cpu_on_retry_delay_ticks != 0U);
54 
55 	for (cpu_index_t cpu = 0U; cpu < PLATFORM_MAX_CORES; cpu++) {
56 		spinlock_init(&CPULOCAL_BY_INDEX(power_voting, cpu).lock);
57 		spinlock_acquire_nopreempt(
58 			&CPULOCAL_BY_INDEX(power_voting, cpu).lock);
59 
60 		timer_init_object(
61 			&CPULOCAL_BY_INDEX(power_voting, cpu).retry_timer,
62 			TIMER_ACTION_POWER_CPU_ON_RETRY);
63 		CPULOCAL_BY_INDEX(power_voting, cpu).retry_count = 0U;
64 
65 		// Initialize the boot CPU's vote count to 1 while booting to
66 		// prevent the cpu going to suspend. This will be decremented
67 		// once the rootvm setup is completed and the rootvm VCPU has
68 		// voted to keep the boot core powered on.
69 		CPULOCAL_BY_INDEX(power_voting, cpu).vote_count =
70 			(cpu == boot_cpu) ? 1U : 0U;
71 
72 		CPULOCAL_BY_INDEX(power_state, cpu) =
73 			(cpu == boot_cpu) ? CPU_POWER_STATE_COLD_BOOT
74 					  : CPU_POWER_STATE_OFF;
75 
76 		spinlock_release_nopreempt(
77 			&CPULOCAL_BY_INDEX(power_voting, cpu).lock);
78 	}
79 
80 	spinlock_init(&power_system_lock);
81 
82 	// FIXME:
83 	spinlock_acquire_nopreempt(&power_system_lock);
84 	bitmap_set(power_system_online_cpus, (index_t)boot_cpu);
85 	spinlock_release_nopreempt(&power_system_lock);
86 }
87 
88 void
power_handle_boot_cpu_warm_init(void)89 power_handle_boot_cpu_warm_init(void)
90 {
91 	spinlock_acquire_nopreempt(&CPULOCAL(power_voting).lock);
92 	cpu_power_state_t state = CPULOCAL(power_state);
93 
94 	assert((state == CPU_POWER_STATE_COLD_BOOT) ||
95 	       (state == CPU_POWER_STATE_STARTED) ||
96 	       (state == CPU_POWER_STATE_SUSPEND));
97 	CPULOCAL(power_state) = CPU_POWER_STATE_ONLINE;
98 
99 	if (state == CPU_POWER_STATE_STARTED) {
100 		trigger_power_cpu_online_event();
101 
102 #if defined(DISABLE_PSCI_CPU_OFF) && DISABLE_PSCI_CPU_OFF
103 		power_voting_t *voting = &CPULOCAL(power_voting);
104 		voting->vote_count++;
105 #endif
106 	}
107 	spinlock_release_nopreempt(&CPULOCAL(power_voting).lock);
108 
109 	// FIXME:
110 	spinlock_acquire_nopreempt(&power_system_lock);
111 	if (bitmap_empty(power_system_online_cpus, PLATFORM_MAX_CORES)) {
112 		// STARTED could be seen due to a last-cpu-suspend/cpu_on race.
113 		assert((state == CPU_POWER_STATE_STARTED) ||
114 		       (state == CPU_POWER_STATE_SUSPEND));
115 		trigger_power_system_resume_event(power_system_suspend_state);
116 	}
117 	bitmap_set(power_system_online_cpus, (index_t)cpulocal_get_index());
118 	spinlock_release_nopreempt(&power_system_lock);
119 }
120 
121 error_t
power_handle_power_cpu_suspend(platform_power_state_t state)122 power_handle_power_cpu_suspend(platform_power_state_t state)
123 {
124 	error_t	    err	   = OK;
125 	cpu_index_t cpu_id = cpulocal_get_index();
126 
127 	// FIXME:
128 	spinlock_acquire_nopreempt(&power_system_lock);
129 	bitmap_clear(power_system_online_cpus, (index_t)cpu_id);
130 	if (bitmap_empty(power_system_online_cpus, PLATFORM_MAX_CORES)) {
131 		power_system_suspend_state = state;
132 		err = trigger_power_system_suspend_event(state);
133 		if (err != OK) {
134 			bitmap_set(power_system_online_cpus, (index_t)cpu_id);
135 		}
136 	}
137 	spinlock_release_nopreempt(&power_system_lock);
138 
139 	if (err == OK) {
140 		spinlock_acquire_nopreempt(&CPULOCAL(power_voting).lock);
141 		assert(CPULOCAL(power_state) == CPU_POWER_STATE_ONLINE);
142 		CPULOCAL(power_state) = CPU_POWER_STATE_SUSPEND;
143 		spinlock_release_nopreempt(&CPULOCAL(power_voting).lock);
144 	}
145 
146 	return err;
147 }
148 
149 void
power_handle_power_cpu_resume(bool was_poweroff)150 power_handle_power_cpu_resume(bool was_poweroff)
151 {
152 	// A cpu that was warm booted updates its state in the cpu warm-boot
153 	// event.
154 	if (!was_poweroff) {
155 		spinlock_acquire_nopreempt(&CPULOCAL(power_voting).lock);
156 		assert(CPULOCAL(power_state) == CPU_POWER_STATE_SUSPEND);
157 		CPULOCAL(power_state) = CPU_POWER_STATE_ONLINE;
158 		spinlock_release_nopreempt(&CPULOCAL(power_voting).lock);
159 
160 		// FIXME:
161 		spinlock_acquire_nopreempt(&power_system_lock);
162 		if (bitmap_empty(power_system_online_cpus,
163 				 PLATFORM_MAX_CORES)) {
164 			trigger_power_system_resume_event(
165 				power_system_suspend_state);
166 		}
167 		bitmap_set(power_system_online_cpus,
168 			   (index_t)cpulocal_get_index());
169 		spinlock_release_nopreempt(&power_system_lock);
170 	} else {
171 		spinlock_acquire_nopreempt(&power_system_lock);
172 		// power_system_online_cpus should be updated in the warm init
173 		// event.
174 		assert(!bitmap_empty(power_system_online_cpus,
175 				     PLATFORM_MAX_CORES));
176 		spinlock_release_nopreempt(&power_system_lock);
177 	}
178 }
179 
180 static error_t
power_try_cpu_on(power_voting_t * voting,cpu_index_t cpu)181 power_try_cpu_on(power_voting_t *voting, cpu_index_t cpu)
182 	REQUIRE_LOCK(voting->lock)
183 {
184 	error_t ret;
185 
186 	if (!platform_cpu_exists(cpu)) {
187 		ret = ERROR_ARGUMENT_INVALID;
188 		goto out;
189 	}
190 
191 	cpu_power_state_t *state = &CPULOCAL_BY_INDEX(power_state, cpu);
192 	if ((*state != CPU_POWER_STATE_OFF) &&
193 	    (*state != CPU_POWER_STATE_OFFLINE)) {
194 		// CPU has already been started, or didn't get to power off.
195 		ret = OK;
196 		goto out;
197 	}
198 
199 	ret = platform_cpu_on(cpu);
200 
201 	if (ret == OK) {
202 		// Mark the CPU as started so we don't call cpu_on twice.
203 		*state		    = CPU_POWER_STATE_STARTED;
204 		voting->retry_count = 0U;
205 		goto out;
206 	} else if ((ret == ERROR_RETRY) &&
207 		   (voting->retry_count < MAX_CPU_ON_RETRIES)) {
208 		// We are racing with a power-off, and it is too late to prevent
209 		// the power-off completing. We need to wait until power-off is
210 		// complete and then retry. Enqueue the retry timer, if it is
211 		// not already queued.
212 		if (!timer_is_queued(&voting->retry_timer)) {
213 			timer_enqueue(&voting->retry_timer,
214 				      timer_get_current_timer_ticks() +
215 					      power_cpu_on_retry_delay_ticks);
216 		}
217 
218 		// If we're racing with power-off, that means the CPU is
219 		// functional and the power-on should not fail, so report
220 		// success to the caller. If the retry does fail, we panic.
221 		ret = OK;
222 	} else if (ret == ERROR_RETRY) {
223 		// We ran out of retry attempts.
224 		ret = ERROR_FAILURE;
225 	} else {
226 		// platform_cpu_on() failed and cannot be retried; just return
227 		// the error status.
228 	}
229 
230 out:
231 	return ret;
232 }
233 
234 error_t
power_vote_cpu_on(cpu_index_t cpu)235 power_vote_cpu_on(cpu_index_t cpu)
236 {
237 	error_t ret;
238 
239 	assert(cpulocal_index_valid(cpu));
240 	power_voting_t *voting = &CPULOCAL_BY_INDEX(power_voting, cpu);
241 
242 	spinlock_acquire(&voting->lock);
243 	if (voting->vote_count == 0U) {
244 		ret = power_try_cpu_on(voting, cpu);
245 		if (ret != OK) {
246 			goto out;
247 		}
248 	}
249 
250 	voting->vote_count++;
251 	ret = OK;
252 
253 out:
254 	spinlock_release(&voting->lock);
255 	return ret;
256 }
257 
258 void
power_vote_cpu_off(cpu_index_t cpu)259 power_vote_cpu_off(cpu_index_t cpu)
260 {
261 	assert(cpulocal_index_valid(cpu));
262 	power_voting_t *voting = &CPULOCAL_BY_INDEX(power_voting, cpu);
263 
264 	spinlock_acquire(&voting->lock);
265 	assert(voting->vote_count > 0U);
266 	voting->vote_count--;
267 
268 	if (voting->vote_count == 0U) {
269 		// Any outstanding retries can be cancelled.
270 		voting->retry_count = 0U;
271 		timer_dequeue(&voting->retry_timer);
272 
273 		// Send an IPI to rerun the idle handlers in case the CPU
274 		// is already idle in WFI or suspend.
275 		ipi_one(IPI_REASON_IDLE, cpu);
276 	}
277 	spinlock_release(&voting->lock);
278 }
279 
280 idle_state_t
power_handle_idle_yield(bool in_idle_thread)281 power_handle_idle_yield(bool in_idle_thread)
282 {
283 	idle_state_t idle_state = IDLE_STATE_IDLE;
284 
285 	if (!in_idle_thread) {
286 		goto out;
287 	}
288 
289 	if (rcu_has_pending_updates()) {
290 		goto out;
291 	}
292 
293 	power_voting_t *voting = &CPULOCAL(power_voting);
294 	spinlock_acquire_nopreempt(&voting->lock);
295 	if (voting->vote_count == 0U) {
296 		error_t err = OK;
297 
298 		spinlock_acquire_nopreempt(&power_system_lock);
299 		cpu_index_t cpu_id = cpulocal_get_index();
300 		bitmap_clear(power_system_online_cpus, (index_t)cpu_id);
301 		if (bitmap_empty(power_system_online_cpus,
302 				 PLATFORM_MAX_CORES)) {
303 			power_system_suspend_state =
304 				(platform_power_state_t){ 0 };
305 			err = trigger_power_system_suspend_event(
306 				power_system_suspend_state);
307 			if (err != OK) {
308 				bitmap_set(power_system_online_cpus,
309 					   (index_t)cpu_id);
310 			}
311 		}
312 		spinlock_release_nopreempt(&power_system_lock);
313 
314 		if (err == OK) {
315 			assert(CPULOCAL(power_state) == CPU_POWER_STATE_ONLINE);
316 			trigger_power_cpu_offline_event();
317 			CPULOCAL(power_state) = CPU_POWER_STATE_OFFLINE;
318 			spinlock_release_nopreempt(&voting->lock);
319 
320 			platform_cpu_off();
321 
322 			idle_state = IDLE_STATE_WAKEUP;
323 		} else {
324 			spinlock_release_nopreempt(&voting->lock);
325 		}
326 	} else {
327 		spinlock_release_nopreempt(&voting->lock);
328 	}
329 
330 out:
331 	return idle_state;
332 }
333 
334 bool
power_handle_timer_action(timer_t * timer)335 power_handle_timer_action(timer_t *timer)
336 {
337 	assert(timer != NULL);
338 
339 	power_voting_t *voting = power_voting_container_of_retry_timer(timer);
340 	cpu_index_t	cpu    = CPULOCAL_PTR_INDEX(power_voting, voting);
341 
342 	spinlock_acquire_nopreempt(&voting->lock);
343 	error_t ret = OK;
344 	if (voting->vote_count > 0U) {
345 		voting->retry_count++;
346 		ret = power_try_cpu_on(voting, cpu);
347 	}
348 	spinlock_release_nopreempt(&voting->lock);
349 
350 	if (ret != OK) {
351 		panic("Failed to power on a CPU that was previously on");
352 	}
353 
354 	return true;
355 }
356 
357 #if defined(MODULE_VM_ROOTVM)
358 // The Boot CPU power count is initialised to 1. Decrement the count after the
359 // root VM initialization.
360 void
power_handle_rootvm_started(void)361 power_handle_rootvm_started(void)
362 {
363 	power_vote_cpu_off(cpulocal_get_index());
364 }
365 #endif
366 
367 void
power_handle_boot_hypervisor_handover(void)368 power_handle_boot_hypervisor_handover(void)
369 {
370 	// Ensure the running core is the only core online. There is no easy way
371 	// to do this race-free, but it doesn't really matter for our purpose.
372 	count_t on_count = 0;
373 	for (cpu_index_t cpu = 0U; cpu < PLATFORM_MAX_CORES; cpu++) {
374 		cpu_power_state_t state = CPULOCAL_BY_INDEX(power_state, cpu);
375 		if ((state != CPU_POWER_STATE_OFF) &&
376 		    (state != CPU_POWER_STATE_OFFLINE)) {
377 			on_count++;
378 		}
379 	}
380 
381 	if (on_count != 1U) {
382 		panic("Hypervisor hand-over requested with multiple CPUs on");
383 	}
384 }
385 
386 #if defined(POWER_START_ALL_CORES)
387 void
power_handle_boot_hypervisor_start(void)388 power_handle_boot_hypervisor_start(void)
389 {
390 	cpu_index_t boot_cpu = cpulocal_get_index();
391 
392 	for (cpu_index_t cpu = 0U; cpulocal_index_valid(cpu); cpu++) {
393 		if (cpu == boot_cpu) {
394 			continue;
395 		}
396 
397 		power_vote_cpu_on(cpu);
398 	}
399 }
400 #endif
401