1 /*
2 * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <platform_def.h>
10
11 #include <arch_helpers.h>
12 #include <bl31/interrupt_mgmt.h>
13 #include <common/debug.h>
14 #include <drivers/arm/css/css_scp.h>
15 #include <lib/cassert.h>
16 #include <plat/arm/common/plat_arm.h>
17
18 #include <plat/common/platform.h>
19
20 #include <plat/arm/css/common/css_pm.h>
21
22 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
23 #pragma weak plat_arm_psci_pm_ops
24
25 #if ARM_RECOM_STATE_ID_ENC
26 /*
27 * The table storing the valid idle power states. Ensure that the
28 * array entries are populated in ascending order of state-id to
29 * enable us to use binary search during power state validation.
30 * The table must be terminated by a NULL entry.
31 */
32 const unsigned int arm_pm_idle_states[] = {
33 /* State-id - 0x001 */
34 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
35 ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
36 /* State-id - 0x002 */
37 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
38 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
39 /* State-id - 0x022 */
40 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
41 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
42 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1
43 /* State-id - 0x222 */
44 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
45 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
46 #endif
47 0,
48 };
49 #endif /* __ARM_RECOM_STATE_ID_ENC__ */
50
51 /*
52 * All the power management helpers in this file assume at least cluster power
53 * level is supported.
54 */
55 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
56 assert_max_pwr_lvl_supported_mismatch);
57
58 /*
59 * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
60 * assumed by the CSS layer.
61 */
62 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
63 assert_max_pwr_lvl_higher_than_css_sys_lvl);
64
65 /*******************************************************************************
66 * Handler called when a power domain is about to be turned on. The
67 * level and mpidr determine the affinity instance.
68 ******************************************************************************/
css_pwr_domain_on(u_register_t mpidr)69 int css_pwr_domain_on(u_register_t mpidr)
70 {
71 css_scp_on(mpidr);
72
73 return PSCI_E_SUCCESS;
74 }
75
css_pwr_domain_on_finisher_common(const psci_power_state_t * target_state)76 static void css_pwr_domain_on_finisher_common(
77 const psci_power_state_t *target_state)
78 {
79 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
80
81 /*
82 * Perform the common cluster specific operations i.e enable coherency
83 * if this cluster was off.
84 */
85 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
86 plat_arm_interconnect_enter_coherency();
87 }
88
89 /*******************************************************************************
90 * Handler called when a power level has just been powered on after
91 * being turned off earlier. The target_state encodes the low power state that
92 * each level has woken up from. This handler would never be invoked with
93 * the system power domain uninitialized as either the primary would have taken
94 * care of it as part of cold boot or the first core awakened from system
95 * suspend would have already initialized it.
96 ******************************************************************************/
css_pwr_domain_on_finish(const psci_power_state_t * target_state)97 void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
98 {
99 /* Assert that the system power domain need not be initialized */
100 assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
101
102 css_pwr_domain_on_finisher_common(target_state);
103 }
104
105 /*******************************************************************************
106 * Handler called when a power domain has just been powered on and the cpu
107 * and its cluster are fully participating in coherent transaction on the
108 * interconnect. Data cache must be enabled for CPU at this point.
109 ******************************************************************************/
css_pwr_domain_on_finish_late(const psci_power_state_t * target_state)110 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
111 {
112 /* Program the gic per-cpu distributor or re-distributor interface */
113 plat_arm_gic_pcpu_init();
114
115 /* Enable the gic cpu interface */
116 plat_arm_gic_cpuif_enable();
117
118 /* Setup the CPU power down request interrupt for secondary core(s) */
119 css_setup_cpu_pwr_down_intr();
120 }
121
122 /*******************************************************************************
123 * Common function called while turning a cpu off or suspending it. It is called
124 * from css_off() or css_suspend() when these functions in turn are called for
125 * power domain at the highest power level which will be powered down. It
126 * performs the actions common to the OFF and SUSPEND calls.
127 ******************************************************************************/
css_power_down_common(const psci_power_state_t * target_state)128 static void css_power_down_common(const psci_power_state_t *target_state)
129 {
130 /* Prevent interrupts from spuriously waking up this cpu */
131 plat_arm_gic_cpuif_disable();
132
133 /* Turn redistributor off */
134 plat_arm_gic_redistif_off();
135
136 /* Cluster is to be turned off, so disable coherency */
137 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
138 plat_arm_interconnect_exit_coherency();
139
140 #if HW_ASSISTED_COHERENCY
141 uint32_t reg;
142
143 /*
144 * If we have determined this core to be the last man standing and we
145 * intend to power down the cluster proactively, we provide a hint to
146 * the power controller that cluster power is not required when all
147 * cores are powered down.
148 * Note that this is only an advisory to power controller and is supported
149 * by SoCs with DynamIQ Shared Units only.
150 */
151 reg = read_clusterpwrdn();
152
153 /* Clear and set bit 0 : Cluster power not required */
154 reg &= ~DSU_CLUSTER_PWR_MASK;
155 reg |= DSU_CLUSTER_PWR_OFF;
156 write_clusterpwrdn(reg);
157 #endif
158 }
159 }
160
161 /*******************************************************************************
162 * Handler called when a power domain is about to be turned off. The
163 * target_state encodes the power state that each level should transition to.
164 ******************************************************************************/
css_pwr_domain_off(const psci_power_state_t * target_state)165 void css_pwr_domain_off(const psci_power_state_t *target_state)
166 {
167 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
168 css_power_down_common(target_state);
169 css_scp_off(target_state);
170 }
171
172 /*******************************************************************************
173 * Handler called when a power domain is about to be suspended. The
174 * target_state encodes the power state that each level should transition to.
175 ******************************************************************************/
css_pwr_domain_suspend(const psci_power_state_t * target_state)176 void css_pwr_domain_suspend(const psci_power_state_t *target_state)
177 {
178 /*
179 * CSS currently supports retention only at cpu level. Just return
180 * as nothing is to be done for retention.
181 */
182 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
183 return;
184
185
186 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
187 css_power_down_common(target_state);
188
189 /* Perform system domain state saving if issuing system suspend */
190 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
191 arm_system_pwr_domain_save();
192
193 /* Power off the Redistributor after having saved its context */
194 plat_arm_gic_redistif_off();
195 }
196
197 css_scp_suspend(target_state);
198 }
199
200 /*******************************************************************************
201 * Handler called when a power domain has just been powered on after
202 * having been suspended earlier. The target_state encodes the low power state
203 * that each level has woken up from.
204 * TODO: At the moment we reuse the on finisher and reinitialize the secure
205 * context. Need to implement a separate suspend finisher.
206 ******************************************************************************/
css_pwr_domain_suspend_finish(const psci_power_state_t * target_state)207 void css_pwr_domain_suspend_finish(
208 const psci_power_state_t *target_state)
209 {
210 /* Return as nothing is to be done on waking up from retention. */
211 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
212 return;
213
214 /* Perform system domain restore if woken up from system suspend */
215 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
216 /*
217 * At this point, the Distributor must be powered on to be ready
218 * to have its state restored. The Redistributor will be powered
219 * on as part of gicv3_rdistif_init_restore.
220 */
221 arm_system_pwr_domain_resume();
222
223 css_pwr_domain_on_finisher_common(target_state);
224
225 /* Enable the gic cpu interface */
226 plat_arm_gic_cpuif_enable();
227 }
228
229 /*******************************************************************************
230 * Handlers to shutdown/reboot the system
231 ******************************************************************************/
css_system_off(void)232 void __dead2 css_system_off(void)
233 {
234 css_scp_sys_shutdown();
235 }
236
css_system_reset(void)237 void __dead2 css_system_reset(void)
238 {
239 css_scp_sys_reboot();
240 }
241
242 /*******************************************************************************
243 * Handler called when the CPU power domain is about to enter standby.
244 ******************************************************************************/
css_cpu_standby(plat_local_state_t cpu_state)245 void css_cpu_standby(plat_local_state_t cpu_state)
246 {
247 unsigned int scr;
248
249 assert(cpu_state == ARM_LOCAL_STATE_RET);
250
251 scr = read_scr_el3();
252 /*
253 * Enable the Non secure interrupt to wake the CPU.
254 * In GICv3 affinity routing mode, the non secure group1 interrupts use
255 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
256 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
257 * routing mode.
258 */
259 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
260 isb();
261 dsb();
262 wfi();
263
264 /*
265 * Restore SCR to the original value, synchronisation of scr_el3 is
266 * done by eret while el3_exit to save some execution cycles.
267 */
268 write_scr_el3(scr);
269 }
270
271 /*******************************************************************************
272 * Handler called to return the 'req_state' for system suspend.
273 ******************************************************************************/
css_get_sys_suspend_power_state(psci_power_state_t * req_state)274 void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
275 {
276 unsigned int i;
277
278 /*
279 * System Suspend is supported only if the system power domain node
280 * is implemented.
281 */
282 assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
283
284 for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
285 req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
286 }
287
288 /*******************************************************************************
289 * Handler to query CPU/cluster power states from SCP
290 ******************************************************************************/
css_node_hw_state(u_register_t mpidr,unsigned int power_level)291 int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
292 {
293 return css_scp_get_power_state(mpidr, power_level);
294 }
295
296 /*
297 * The system power domain suspend is only supported only via
298 * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
299 * will be downgraded to the lower level.
300 */
css_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)301 static int css_validate_power_state(unsigned int power_state,
302 psci_power_state_t *req_state)
303 {
304 int rc;
305 rc = arm_validate_power_state(power_state, req_state);
306
307 /*
308 * Ensure that we don't overrun the pwr_domain_state array in the case
309 * where the platform supported max power level is less than the system
310 * power level
311 */
312
313 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
314
315 /*
316 * Ensure that the system power domain level is never suspended
317 * via PSCI CPU SUSPEND API. Currently system suspend is only
318 * supported via PSCI SYSTEM SUSPEND API.
319 */
320
321 req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] =
322 ARM_LOCAL_STATE_RUN;
323 #endif
324
325 return rc;
326 }
327
328 /*
329 * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
330 * `css_validate_power_state`, we do not downgrade the system power
331 * domain level request in `power_state` as it will be used to query the
332 * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
333 */
css_translate_power_state_by_mpidr(u_register_t mpidr,unsigned int power_state,psci_power_state_t * output_state)334 static int css_translate_power_state_by_mpidr(u_register_t mpidr,
335 unsigned int power_state,
336 psci_power_state_t *output_state)
337 {
338 return arm_validate_power_state(power_state, output_state);
339 }
340
341 /*
342 * Setup the SGI interrupt that will be used trigger the execution of power
343 * down sequence for all the secondary cores. This interrupt is setup to be
344 * handled in EL3 context at a priority defined by the platform.
345 */
css_setup_cpu_pwr_down_intr(void)346 void css_setup_cpu_pwr_down_intr(void)
347 {
348 #if CSS_SYSTEM_GRACEFUL_RESET
349 plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3);
350 plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR,
351 PLAT_REBOOT_PRI);
352 plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
353 #endif
354 }
355
356 /*
357 * For a graceful shutdown/reboot, each CPU in the system should do their power
358 * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an
359 * opportunity to do the powerdown sequence. To achieve graceful reset, of all
360 * cores in the system, the CPU gets the opportunity raise warm reboot SGI to
361 * rest of the CPUs which are online. Add handler for the reboot SGI where the
362 * rest of the CPU execute the powerdown sequence.
363 */
css_reboot_interrupt_handler(uint32_t intr_raw,uint32_t flags,void * handle,void * cookie)364 int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags,
365 void *handle, void *cookie)
366 {
367 assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR);
368
369 /* Deactivate warm reboot SGI */
370 plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
371
372 /*
373 * Disable GIC CPU interface to prevent pending interrupt from waking
374 * up the AP from WFI.
375 */
376 plat_arm_gic_cpuif_disable();
377 plat_arm_gic_redistif_off();
378
379 psci_pwrdown_cpu(PLAT_MAX_PWR_LVL);
380
381 dmbsy();
382
383 wfi();
384 return 0;
385 }
386
387 /*******************************************************************************
388 * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
389 * platform will take care of registering the handlers with PSCI.
390 ******************************************************************************/
391 plat_psci_ops_t plat_arm_psci_pm_ops = {
392 .pwr_domain_on = css_pwr_domain_on,
393 .pwr_domain_on_finish = css_pwr_domain_on_finish,
394 .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late,
395 .pwr_domain_off = css_pwr_domain_off,
396 .cpu_standby = css_cpu_standby,
397 .pwr_domain_suspend = css_pwr_domain_suspend,
398 .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
399 .system_off = css_system_off,
400 .system_reset = css_system_reset,
401 .validate_power_state = css_validate_power_state,
402 .validate_ns_entrypoint = arm_validate_psci_entrypoint,
403 .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
404 .get_node_hw_state = css_node_hw_state,
405 .get_sys_suspend_power_state = css_get_sys_suspend_power_state,
406
407 #if defined(PLAT_ARM_MEM_PROT_ADDR)
408 .mem_protect_chk = arm_psci_mem_protect_chk,
409 .read_mem_protect = arm_psci_read_mem_protect,
410 .write_mem_protect = arm_nor_psci_write_mem_protect,
411 #endif
412 #if CSS_USE_SCMI_SDS_DRIVER
413 .system_reset2 = css_system_reset2,
414 #endif
415 };
416