1 /*
2 * Copyright (c) 2015-2025, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <platform_def.h>
10
11 #include <arch_helpers.h>
12 #include <bl31/interrupt_mgmt.h>
13 #include <common/debug.h>
14 #include <drivers/arm/css/css_scp.h>
15 #include <drivers/arm/dsu.h>
16 #include <lib/cassert.h>
17 #include <plat/arm/common/plat_arm.h>
18
19 #include <plat/common/platform.h>
20
21 #include <plat/arm/css/common/css_pm.h>
22
23 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
24 #pragma weak plat_arm_psci_pm_ops
25
26 #if ARM_RECOM_STATE_ID_ENC
27 /*
28 * The table storing the valid idle power states. Ensure that the
29 * array entries are populated in ascending order of state-id to
30 * enable us to use binary search during power state validation.
31 * The table must be terminated by a NULL entry.
32 */
33 const unsigned int arm_pm_idle_states[] = {
34 /* State-id - 0x001 */
35 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
36 ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
37 /* State-id - 0x002 */
38 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
39 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
40 /* State-id - 0x022 */
41 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
42 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
43 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1
44 /* State-id - 0x222 */
45 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
46 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
47 #endif
48 0,
49 };
50 #endif /* __ARM_RECOM_STATE_ID_ENC__ */
51
52 /*
53 * All the power management helpers in this file assume at least cluster power
54 * level is supported.
55 */
56 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
57 assert_max_pwr_lvl_supported_mismatch);
58
59 /*
60 * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
61 * assumed by the CSS layer.
62 */
63 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
64 assert_max_pwr_lvl_higher_than_css_sys_lvl);
65
66 /*******************************************************************************
67 * Handler called when a power domain is about to be turned on. The
68 * level and mpidr determine the affinity instance.
69 ******************************************************************************/
css_pwr_domain_on(u_register_t mpidr)70 int css_pwr_domain_on(u_register_t mpidr)
71 {
72 css_scp_on(mpidr);
73
74 return PSCI_E_SUCCESS;
75 }
76
css_pwr_domain_on_finisher_common(const psci_power_state_t * target_state)77 static void css_pwr_domain_on_finisher_common(
78 const psci_power_state_t *target_state)
79 {
80 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
81
82 /*
83 * Perform the common cluster specific operations i.e enable coherency
84 * if this cluster was off.
85 */
86 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
87 #if PRESERVE_DSU_PMU_REGS
88 cluster_on_dsu_pmu_context_restore();
89 #endif
90 #if !HW_ASSISTED_COHERENCY
91 plat_arm_interconnect_enter_coherency();
92 #endif
93 }
94 }
95
96 /*******************************************************************************
97 * Handler called when a power level has just been powered on after
98 * being turned off earlier. The target_state encodes the low power state that
99 * each level has woken up from. This handler would never be invoked with
100 * the system power domain uninitialized as either the primary would have taken
101 * care of it as part of cold boot or the first core awakened from system
102 * suspend would have already initialized it.
103 ******************************************************************************/
css_pwr_domain_on_finish(const psci_power_state_t * target_state)104 void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
105 {
106 /* Assert that the system power domain need not be initialized */
107 assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
108
109 css_pwr_domain_on_finisher_common(target_state);
110 }
111
112 /*******************************************************************************
113 * Handler called when a power domain has just been powered on and the cpu
114 * and its cluster are fully participating in coherent transaction on the
115 * interconnect. Data cache must be enabled for CPU at this point.
116 ******************************************************************************/
css_pwr_domain_on_finish_late(const psci_power_state_t * target_state)117 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
118 {
119 /* Setup the CPU power down request interrupt for secondary core(s) */
120 css_setup_cpu_pwr_down_intr();
121 }
122
123 /*******************************************************************************
124 * Common function called while turning a cpu off or suspending it. It is called
125 * from css_off() or css_suspend() when these functions in turn are called for
126 * power domain at the highest power level which will be powered down. It
127 * performs the actions common to the OFF and SUSPEND calls.
128 ******************************************************************************/
css_power_down_common(const psci_power_state_t * target_state)129 static void css_power_down_common(const psci_power_state_t *target_state)
130 {
131 /* Cluster is to be turned off, so disable coherency */
132 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
133 #if PRESERVE_DSU_PMU_REGS
134 cluster_off_dsu_pmu_context_save();
135 #endif
136 #if !HW_ASSISTED_COHERENCY
137 plat_arm_interconnect_exit_coherency();
138 #endif
139 }
140 }
141
142 /*******************************************************************************
143 * Handler called when a power domain is about to be turned off. The
144 * target_state encodes the power state that each level should transition to.
145 ******************************************************************************/
css_pwr_domain_off(const psci_power_state_t * target_state)146 void css_pwr_domain_off(const psci_power_state_t *target_state)
147 {
148 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
149 css_power_down_common(target_state);
150 css_scp_off(target_state);
151 }
152
153 /*******************************************************************************
154 * Handler called when a power domain is about to be suspended. The
155 * target_state encodes the power state that each level should transition to.
156 ******************************************************************************/
css_pwr_domain_suspend(const psci_power_state_t * target_state)157 void css_pwr_domain_suspend(const psci_power_state_t *target_state)
158 {
159 /*
160 * CSS currently supports retention only at cpu level. Just return
161 * as nothing is to be done for retention.
162 */
163 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
164 return;
165
166
167 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
168 css_power_down_common(target_state);
169
170 /* Perform system domain state saving if issuing system suspend */
171 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
172 arm_system_pwr_domain_save();
173
174 /* Power off the Redistributor after having saved its context */
175 gic_pcpu_off(plat_my_core_pos());
176 }
177
178 css_scp_suspend(target_state);
179 }
180
181 /*******************************************************************************
182 * Handler called when a power domain has just been powered on after
183 * having been suspended earlier. The target_state encodes the low power state
184 * that each level has woken up from.
185 * TODO: At the moment we reuse the on finisher and reinitialize the secure
186 * context. Need to implement a separate suspend finisher.
187 ******************************************************************************/
css_pwr_domain_suspend_finish(const psci_power_state_t * target_state)188 void css_pwr_domain_suspend_finish(
189 const psci_power_state_t *target_state)
190 {
191 /* Return as nothing is to be done on waking up from retention. */
192 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
193 return;
194
195 /* Perform system domain restore if woken up from system suspend */
196 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
197 /*
198 * At this point, the Distributor must be powered on to be ready
199 * to have its state restored. The Redistributor will be powered
200 * on as part of gicv3_rdistif_init_restore.
201 */
202 arm_system_pwr_domain_resume();
203
204 css_pwr_domain_on_finisher_common(target_state);
205 }
206
207 /*******************************************************************************
208 * Handlers to shutdown/reboot the system
209 ******************************************************************************/
css_system_off(void)210 void css_system_off(void)
211 {
212 css_scp_sys_shutdown();
213 }
214
css_system_reset(void)215 void css_system_reset(void)
216 {
217 css_scp_sys_reboot();
218 }
219
220 /*******************************************************************************
221 * Handler called when the CPU power domain is about to enter standby.
222 ******************************************************************************/
css_cpu_standby(plat_local_state_t cpu_state)223 void css_cpu_standby(plat_local_state_t cpu_state)
224 {
225 unsigned int scr;
226
227 assert(cpu_state == ARM_LOCAL_STATE_RET);
228
229 scr = read_scr_el3();
230 /*
231 * Enable the Non secure interrupt to wake the CPU.
232 * In GICv3 affinity routing mode, the non secure group1 interrupts use
233 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
234 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
235 * routing mode.
236 */
237 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
238 isb();
239 dsb();
240 wfi();
241
242 /*
243 * Restore SCR to the original value, synchronisation of scr_el3 is
244 * done by eret while el3_exit to save some execution cycles.
245 */
246 write_scr_el3(scr);
247 }
248
249 /*******************************************************************************
250 * Handler called to return the 'req_state' for system suspend.
251 ******************************************************************************/
css_get_sys_suspend_power_state(psci_power_state_t * req_state)252 void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
253 {
254 unsigned int i;
255
256 /*
257 * System Suspend is supported only if the system power domain node
258 * is implemented.
259 */
260 assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
261
262 for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
263 req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
264 }
265
266 /*******************************************************************************
267 * Handler to query CPU/cluster power states from SCP
268 ******************************************************************************/
css_node_hw_state(u_register_t mpidr,unsigned int power_level)269 int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
270 {
271 return css_scp_get_power_state(mpidr, power_level);
272 }
273
274 /*
275 * The system power domain suspend is only supported only via
276 * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
277 * will be downgraded to the lower level.
278 */
css_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)279 static int css_validate_power_state(unsigned int power_state,
280 psci_power_state_t *req_state)
281 {
282 int rc;
283 rc = arm_validate_power_state(power_state, req_state);
284
285 /*
286 * Ensure that we don't overrun the pwr_domain_state array in the case
287 * where the platform supported max power level is less than the system
288 * power level
289 */
290
291 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
292
293 /*
294 * Ensure that the system power domain level is never suspended
295 * via PSCI CPU SUSPEND API. Currently system suspend is only
296 * supported via PSCI SYSTEM SUSPEND API.
297 */
298
299 req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] =
300 ARM_LOCAL_STATE_RUN;
301 #endif
302
303 return rc;
304 }
305
306 /*
307 * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
308 * `css_validate_power_state`, we do not downgrade the system power
309 * domain level request in `power_state` as it will be used to query the
310 * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
311 */
css_translate_power_state_by_mpidr(u_register_t mpidr,unsigned int power_state,psci_power_state_t * output_state)312 static int css_translate_power_state_by_mpidr(u_register_t mpidr,
313 unsigned int power_state,
314 psci_power_state_t *output_state)
315 {
316 return arm_validate_power_state(power_state, output_state);
317 }
318
319 /*
320 * Setup the SGI interrupt that will be used trigger the execution of power
321 * down sequence for all the secondary cores. This interrupt is setup to be
322 * handled in EL3 context at a priority defined by the platform.
323 */
css_setup_cpu_pwr_down_intr(void)324 void css_setup_cpu_pwr_down_intr(void)
325 {
326 #if CSS_SYSTEM_GRACEFUL_RESET
327 plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3);
328 plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR,
329 PLAT_REBOOT_PRI);
330 plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
331 #endif
332 }
333
334 /*
335 * For a graceful shutdown/reboot, each CPU in the system should do their power
336 * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an
337 * opportunity to do the powerdown sequence. To achieve graceful reset, of all
338 * cores in the system, the CPU gets the opportunity raise warm reboot SGI to
339 * rest of the CPUs which are online. Add handler for the reboot SGI where the
340 * rest of the CPU execute the powerdown sequence.
341 */
css_reboot_interrupt_handler(uint32_t intr_raw,uint32_t flags,void * handle,void * cookie)342 int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags,
343 void *handle, void *cookie)
344 {
345 unsigned int core_pos = plat_my_core_pos();
346
347 assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR);
348
349 /* Deactivate warm reboot SGI */
350 plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
351
352 /*
353 * Disable GIC CPU interface to prevent pending interrupt from waking
354 * up the AP from WFI.
355 */
356 gic_cpuif_disable(core_pos);
357 gic_pcpu_off(core_pos);
358
359 psci_pwrdown_cpu_start(PLAT_MAX_PWR_LVL);
360
361 psci_pwrdown_cpu_end_terminal();
362 return 0;
363 }
364
365 /*******************************************************************************
366 * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
367 * platform will take care of registering the handlers with PSCI.
368 ******************************************************************************/
369 plat_psci_ops_t plat_arm_psci_pm_ops = {
370 .pwr_domain_on = css_pwr_domain_on,
371 .pwr_domain_on_finish = css_pwr_domain_on_finish,
372 .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late,
373 .pwr_domain_off = css_pwr_domain_off,
374 .cpu_standby = css_cpu_standby,
375 .pwr_domain_suspend = css_pwr_domain_suspend,
376 .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
377 .system_off = css_system_off,
378 .system_reset = css_system_reset,
379 .validate_power_state = css_validate_power_state,
380 .validate_ns_entrypoint = arm_validate_psci_entrypoint,
381 .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
382 .get_node_hw_state = css_node_hw_state,
383 .get_sys_suspend_power_state = css_get_sys_suspend_power_state,
384
385 #if defined(PLAT_ARM_MEM_PROT_ADDR)
386 .mem_protect_chk = arm_psci_mem_protect_chk,
387 .read_mem_protect = arm_psci_read_mem_protect,
388 .write_mem_protect = arm_nor_psci_write_mem_protect,
389 #endif
390 #if CSS_USE_SCMI_SDS_DRIVER
391 .system_reset2 = css_system_reset2,
392 #endif
393 };
394