1 /*
2 * Copyright (c) 2015-2026, Arm Limited and Contributors. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8
9 #include <platform_def.h>
10
11 #include <arch_helpers.h>
12 #include <bl31/interrupt_mgmt.h>
13 #include <common/debug.h>
14 #include <drivers/arm/css/css_scp.h>
15 #include <drivers/arm/dsu.h>
16 #include <lib/cassert.h>
17 #include <plat/arm/common/plat_arm.h>
18
19 #include <plat/common/platform.h>
20
21 #include <plat/arm/css/common/css_pm.h>
22
23 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
24 #pragma weak plat_arm_psci_pm_ops
25
26 #if ARM_RECOM_STATE_ID_ENC
27 /*
28 * The table storing the valid idle power states. Ensure that the
29 * array entries are populated in ascending order of state-id to
30 * enable us to use binary search during power state validation.
31 * The table must be terminated by a NULL entry.
32 */
33 const unsigned int arm_pm_idle_states[] = {
34 /* State-id - 0x001 */
35 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
36 ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
37 /* State-id - 0x002 */
38 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
39 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
40 /* State-id - 0x022 */
41 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
42 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
43 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1
44 /* State-id - 0x222 */
45 arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
46 ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
47 #endif
48 0,
49 };
50 #endif /* __ARM_RECOM_STATE_ID_ENC__ */
51
52 /*
53 * All the power management helpers in this file assume at least cluster power
54 * level is supported.
55 */
56 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
57 assert_max_pwr_lvl_supported_mismatch);
58
59 /*
60 * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
61 * assumed by the CSS layer.
62 */
63 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
64 assert_max_pwr_lvl_higher_than_css_sys_lvl);
65
66 /*******************************************************************************
67 * Handler called when a power domain is about to be turned on. The
68 * level and mpidr determine the affinity instance.
69 ******************************************************************************/
css_pwr_domain_on(u_register_t mpidr)70 int css_pwr_domain_on(u_register_t mpidr)
71 {
72 css_scp_on(mpidr);
73
74 return PSCI_E_SUCCESS;
75 }
76
css_pwr_domain_on_finisher_common(const psci_power_state_t * target_state)77 static void css_pwr_domain_on_finisher_common(
78 const psci_power_state_t *target_state)
79 {
80 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
81
82 /*
83 * Perform the common cluster specific operations i.e enable coherency
84 * if this cluster was off.
85 */
86 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
87 #if PRESERVE_DSU_PMU_REGS
88 cluster_on_dsu_pmu_context_restore();
89 #endif
90 #if !HW_ASSISTED_COHERENCY
91 plat_arm_interconnect_enter_coherency();
92 #endif
93 }
94 }
95
96 /*******************************************************************************
97 * Handler called when a power level has just been powered on after
98 * being turned off earlier. The target_state encodes the low power state that
99 * each level has woken up from. This handler would never be invoked with
100 * the system power domain uninitialized as either the primary would have taken
101 * care of it as part of cold boot or the first core awakened from system
102 * suspend would have already initialized it.
103 ******************************************************************************/
css_pwr_domain_on_finish(const psci_power_state_t * target_state)104 void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
105 {
106 /* Assert that the system power domain need not be initialized */
107 assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
108
109 css_pwr_domain_on_finisher_common(target_state);
110 }
111
plat_gic_pre_pcpu_init(unsigned int cpu_idx)112 void plat_gic_pre_pcpu_init(unsigned int cpu_idx)
113 {
114 }
115
116 /*******************************************************************************
117 * Handler called when a power domain has just been powered on and the cpu
118 * and its cluster are fully participating in coherent transaction on the
119 * interconnect. Data cache must be enabled for CPU at this point.
120 ******************************************************************************/
css_pwr_domain_on_finish_late(const psci_power_state_t * target_state)121 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
122 {
123 /* Setup the CPU power down request interrupt for secondary core(s) */
124 css_setup_cpu_pwr_down_intr();
125 }
126
127 /*******************************************************************************
128 * Common function called while turning a cpu off or suspending it. It is called
129 * from css_off() or css_suspend() when these functions in turn are called for
130 * power domain at the highest power level which will be powered down. It
131 * performs the actions common to the OFF and SUSPEND calls.
132 ******************************************************************************/
css_power_down_common(const psci_power_state_t * target_state)133 static void css_power_down_common(const psci_power_state_t *target_state)
134 {
135 /* Cluster is to be turned off, so disable coherency */
136 if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
137 #if PRESERVE_DSU_PMU_REGS
138 cluster_off_dsu_pmu_context_save();
139 #endif
140 #if !HW_ASSISTED_COHERENCY
141 plat_arm_interconnect_exit_coherency();
142 #endif
143 }
144 }
145
146 /*******************************************************************************
147 * Handler called when a power domain is about to be turned off. The
148 * target_state encodes the power state that each level should transition to.
149 ******************************************************************************/
css_pwr_domain_off(const psci_power_state_t * target_state)150 void css_pwr_domain_off(const psci_power_state_t *target_state)
151 {
152 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
153 css_power_down_common(target_state);
154 css_scp_off(target_state);
155
156 /*
157 * force SME off to not get power down rejected on some cores. Getting
158 * here is terminal so we don't care if we lose context because of
159 * another wakeup.
160 */
161 #if CSS_ERRATA_SME_POWER_DOWN
162 if (is_feat_sme_supported()) {
163 write_svcr(0);
164 isb();
165 }
166 #endif
167 }
168
169 /*******************************************************************************
170 * Handler called when a power domain is about to be suspended. The
171 * target_state encodes the power state that each level should transition to.
172 ******************************************************************************/
css_pwr_domain_suspend(const psci_power_state_t * target_state)173 void css_pwr_domain_suspend(const psci_power_state_t *target_state)
174 {
175 /*
176 * CSS currently supports retention only at cpu level. Just return
177 * as nothing is to be done for retention.
178 */
179 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
180 return;
181
182
183 assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
184 css_power_down_common(target_state);
185
186 /* Perform system domain state saving if issuing system suspend */
187 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
188 arm_system_pwr_domain_save();
189
190 /* Power off the Redistributor after having saved its context */
191 gic_pcpu_off(plat_my_core_pos());
192 }
193
194 css_scp_suspend(target_state);
195 }
196
197 /*******************************************************************************
198 * Handler called when a power domain has just been powered on after
199 * having been suspended earlier. The target_state encodes the low power state
200 * that each level has woken up from.
201 * TODO: At the moment we reuse the on finisher and reinitialize the secure
202 * context. Need to implement a separate suspend finisher.
203 ******************************************************************************/
css_pwr_domain_suspend_finish(const psci_power_state_t * target_state)204 void css_pwr_domain_suspend_finish(
205 const psci_power_state_t *target_state)
206 {
207 /* Return as nothing is to be done on waking up from retention. */
208 if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
209 return;
210
211 /* Perform system domain restore if woken up from system suspend */
212 if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
213 /*
214 * At this point, the Distributor must be powered on to be ready
215 * to have its state restored. The Redistributor will be powered
216 * on as part of gicv3_rdistif_init_restore.
217 */
218 arm_system_pwr_domain_resume();
219
220 css_pwr_domain_on_finisher_common(target_state);
221 }
222
223 /*******************************************************************************
224 * Handlers to shutdown/reboot the system
225 ******************************************************************************/
css_system_off(void)226 void css_system_off(void)
227 {
228 css_scp_sys_shutdown();
229 }
230
css_system_reset(void)231 void css_system_reset(void)
232 {
233 css_scp_sys_reboot();
234 }
235
236 /*******************************************************************************
237 * Handler called when the CPU power domain is about to enter standby.
238 ******************************************************************************/
css_cpu_standby(plat_local_state_t cpu_state)239 void css_cpu_standby(plat_local_state_t cpu_state)
240 {
241 unsigned int scr;
242
243 assert(cpu_state == ARM_LOCAL_STATE_RET);
244
245 scr = read_scr_el3();
246 /*
247 * Enable the Non secure interrupt to wake the CPU.
248 * In GICv3 affinity routing mode, the non secure group1 interrupts use
249 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
250 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
251 * routing mode.
252 */
253 write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
254 isb();
255 dsb();
256 wfi();
257
258 /*
259 * Restore SCR to the original value, synchronisation of scr_el3 is
260 * done by eret while el3_exit to save some execution cycles.
261 */
262 write_scr_el3(scr);
263 }
264
265 /*******************************************************************************
266 * Handler called to return the 'req_state' for system suspend.
267 ******************************************************************************/
css_get_sys_suspend_power_state(psci_power_state_t * req_state)268 void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
269 {
270 unsigned int i;
271
272 /*
273 * System Suspend is supported only if the system power domain node
274 * is implemented.
275 */
276 assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
277
278 for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
279 req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
280 }
281
282 /*******************************************************************************
283 * Handler to query CPU/cluster power states from SCP
284 ******************************************************************************/
css_node_hw_state(u_register_t mpidr,unsigned int power_level)285 int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
286 {
287 return css_scp_get_power_state(mpidr, power_level);
288 }
289
290 /*
291 * The system power domain suspend is only supported only via
292 * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
293 * will be downgraded to the lower level.
294 */
css_validate_power_state(unsigned int power_state,psci_power_state_t * req_state)295 static int css_validate_power_state(unsigned int power_state,
296 psci_power_state_t *req_state)
297 {
298 int rc;
299 rc = arm_validate_power_state(power_state, req_state);
300
301 /*
302 * Ensure that we don't overrun the pwr_domain_state array in the case
303 * where the platform supported max power level is less than the system
304 * power level
305 */
306
307 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
308
309 /*
310 * Ensure that the system power domain level is never suspended
311 * via PSCI CPU SUSPEND API. Currently system suspend is only
312 * supported via PSCI SYSTEM SUSPEND API.
313 */
314
315 req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] =
316 ARM_LOCAL_STATE_RUN;
317 #endif
318
319 return rc;
320 }
321
322 /*
323 * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
324 * `css_validate_power_state`, we do not downgrade the system power
325 * domain level request in `power_state` as it will be used to query the
326 * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
327 */
css_translate_power_state_by_mpidr(u_register_t mpidr,unsigned int power_state,psci_power_state_t * output_state)328 static int css_translate_power_state_by_mpidr(u_register_t mpidr,
329 unsigned int power_state,
330 psci_power_state_t *output_state)
331 {
332 return arm_validate_power_state(power_state, output_state);
333 }
334
335 /*
336 * Setup the SGI interrupt that will be used trigger the execution of power
337 * down sequence for all the secondary cores. This interrupt is setup to be
338 * handled in EL3 context at a priority defined by the platform.
339 */
css_setup_cpu_pwr_down_intr(void)340 void css_setup_cpu_pwr_down_intr(void)
341 {
342 #if CSS_SYSTEM_GRACEFUL_RESET
343 plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3);
344 plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR,
345 PLAT_REBOOT_PRI);
346 plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
347 #endif
348 }
349
350 /*
351 * For a graceful shutdown/reboot, each CPU in the system should do their power
352 * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an
353 * opportunity to do the powerdown sequence. To achieve graceful reset, of all
354 * cores in the system, the CPU gets the opportunity raise warm reboot SGI to
355 * rest of the CPUs which are online. Add handler for the reboot SGI where the
356 * rest of the CPU execute the powerdown sequence.
357 */
css_reboot_interrupt_handler(uint32_t intr_raw,uint32_t flags,void * handle,void * cookie)358 int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags,
359 void *handle, void *cookie)
360 {
361 unsigned int core_pos = plat_my_core_pos();
362
363 assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR);
364
365 /* Deactivate warm reboot SGI */
366 plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
367
368 /*
369 * Disable GIC CPU interface to prevent pending interrupt from waking
370 * up the AP from WFI.
371 */
372 gic_cpuif_disable(core_pos);
373 gic_pcpu_off(core_pos);
374
375 psci_pwrdown_cpu_start(PLAT_MAX_PWR_LVL);
376
377 psci_pwrdown_cpu_end_terminal();
378 return 0;
379 }
380
381 /*******************************************************************************
382 * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
383 * platform will take care of registering the handlers with PSCI.
384 ******************************************************************************/
385 plat_psci_ops_t plat_arm_psci_pm_ops = {
386 .pwr_domain_on = css_pwr_domain_on,
387 .pwr_domain_on_finish = css_pwr_domain_on_finish,
388 .pwr_domain_on_finish_late = css_pwr_domain_on_finish_late,
389 .pwr_domain_off = css_pwr_domain_off,
390 .cpu_standby = css_cpu_standby,
391 .pwr_domain_suspend = css_pwr_domain_suspend,
392 .pwr_domain_suspend_finish = css_pwr_domain_suspend_finish,
393 .system_off = css_system_off,
394 .system_reset = css_system_reset,
395 .validate_power_state = css_validate_power_state,
396 .validate_ns_entrypoint = arm_validate_psci_entrypoint,
397 .translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
398 .get_node_hw_state = css_node_hw_state,
399 .get_sys_suspend_power_state = css_get_sys_suspend_power_state,
400
401 #if defined(PLAT_ARM_MEM_PROT_ADDR)
402 .mem_protect_chk = arm_psci_mem_protect_chk,
403 .read_mem_protect = arm_psci_read_mem_protect,
404 .write_mem_protect = arm_nor_psci_write_mem_protect,
405 #endif
406 #if CSS_USE_SCMI_SDS_DRIVER
407 .system_reset2 = css_system_reset2,
408 #endif
409 };
410