xref: /rk3399_ARM-atf/plat/arm/css/common/css_pm.c (revision 3e13402cbf95e142ed3a3bb1b4f38d1cae7dcc8f)
1 /*
2  * Copyright (c) 2015-2026, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <platform_def.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/interrupt_mgmt.h>
13 #include <common/debug.h>
14 #include <drivers/arm/css/css_scp.h>
15 #include <drivers/arm/dsu.h>
16 #include <lib/cassert.h>
17 #include <plat/arm/common/plat_arm.h>
18 
19 #include <plat/common/platform.h>
20 
21 #include <plat/arm/css/common/css_pm.h>
22 
23 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
24 #pragma weak plat_arm_psci_pm_ops
25 
26 #if ARM_RECOM_STATE_ID_ENC
27 /*
28  *  The table storing the valid idle power states. Ensure that the
29  *  array entries are populated in ascending order of state-id to
30  *  enable us to use binary search during power state validation.
31  *  The table must be terminated by a NULL entry.
32  */
33 const unsigned int arm_pm_idle_states[] = {
34 	/* State-id - 0x001 */
35 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
36 		ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
37 	/* State-id - 0x002 */
38 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
39 		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
40 	/* State-id - 0x022 */
41 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
42 		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
43 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1
44 	/* State-id - 0x222 */
45 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
46 		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
47 #endif
48 	0,
49 };
50 #endif /* __ARM_RECOM_STATE_ID_ENC__ */
51 
52 /*
53  * All the power management helpers in this file assume at least cluster power
54  * level is supported.
55  */
56 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
57 		assert_max_pwr_lvl_supported_mismatch);
58 
59 /*
60  * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
61  * assumed by the CSS layer.
62  */
63 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
64 		assert_max_pwr_lvl_higher_than_css_sys_lvl);
65 
66 /*******************************************************************************
67  * Handler called when a power domain is about to be turned on. The
68  * level and mpidr determine the affinity instance.
69  ******************************************************************************/
70 int css_pwr_domain_on(u_register_t mpidr)
71 {
72 	css_scp_on(mpidr);
73 
74 	return PSCI_E_SUCCESS;
75 }
76 
77 static void css_pwr_domain_on_finisher_common(
78 		const psci_power_state_t *target_state)
79 {
80 	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
81 
82 	/*
83 	 * Perform the common cluster specific operations i.e enable coherency
84 	 * if this cluster was off.
85 	 */
86 	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
87 #if PRESERVE_DSU_PMU_REGS
88 		cluster_on_dsu_pmu_context_restore();
89 #endif
90 #if !HW_ASSISTED_COHERENCY
91 		plat_arm_interconnect_enter_coherency();
92 #endif
93 	}
94 }
95 
96 /*******************************************************************************
97  * Handler called when a power level has just been powered on after
98  * being turned off earlier. The target_state encodes the low power state that
99  * each level has woken up from. This handler would never be invoked with
100  * the system power domain uninitialized as either the primary would have taken
101  * care of it as part of cold boot or the first core awakened from system
102  * suspend would have already initialized it.
103  ******************************************************************************/
104 void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
105 {
106 	/* Assert that the system power domain need not be initialized */
107 	assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
108 
109 	css_pwr_domain_on_finisher_common(target_state);
110 }
111 
112 void plat_gic_pre_pcpu_init(unsigned int cpu_idx)
113 {
114 }
115 
116 /*******************************************************************************
117  * Handler called when a power domain has just been powered on and the cpu
118  * and its cluster are fully participating in coherent transaction on the
119  * interconnect. Data cache must be enabled for CPU at this point.
120  ******************************************************************************/
121 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
122 {
123 	/* Setup the CPU power down request interrupt for secondary core(s) */
124 	css_setup_cpu_pwr_down_intr();
125 }
126 
127 /*******************************************************************************
128  * Common function called while turning a cpu off or suspending it. It is called
129  * from css_off() or css_suspend() when these functions in turn are called for
130  * power domain at the highest power level which will be powered down. It
131  * performs the actions common to the OFF and SUSPEND calls.
132  ******************************************************************************/
133 static void css_power_down_common(const psci_power_state_t *target_state)
134 {
135 	/* Cluster is to be turned off, so disable coherency */
136 	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
137 #if PRESERVE_DSU_PMU_REGS
138 		cluster_off_dsu_pmu_context_save();
139 #endif
140 #if !HW_ASSISTED_COHERENCY
141 		plat_arm_interconnect_exit_coherency();
142 #endif
143 	}
144 }
145 
146 /*******************************************************************************
147  * Handler called when a power domain is about to be turned off. The
148  * target_state encodes the power state that each level should transition to.
149  ******************************************************************************/
150 void css_pwr_domain_off(const psci_power_state_t *target_state)
151 {
152 	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
153 	css_power_down_common(target_state);
154 	css_scp_off(target_state);
155 }
156 
157 /*******************************************************************************
158  * Handler called when a power domain is about to be suspended. The
159  * target_state encodes the power state that each level should transition to.
160  ******************************************************************************/
161 void css_pwr_domain_suspend(const psci_power_state_t *target_state)
162 {
163 	/*
164 	 * CSS currently supports retention only at cpu level. Just return
165 	 * as nothing is to be done for retention.
166 	 */
167 	if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
168 		return;
169 
170 
171 	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
172 	css_power_down_common(target_state);
173 
174 	/* Perform system domain state saving if issuing system suspend */
175 	if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
176 		arm_system_pwr_domain_save();
177 
178 		/* Power off the Redistributor after having saved its context */
179 		gic_pcpu_off(plat_my_core_pos());
180 	}
181 
182 	css_scp_suspend(target_state);
183 }
184 
185 /*******************************************************************************
186  * Handler called when a power domain has just been powered on after
187  * having been suspended earlier. The target_state encodes the low power state
188  * that each level has woken up from.
189  * TODO: At the moment we reuse the on finisher and reinitialize the secure
190  * context. Need to implement a separate suspend finisher.
191  ******************************************************************************/
192 void css_pwr_domain_suspend_finish(
193 				const psci_power_state_t *target_state)
194 {
195 	/* Return as nothing is to be done on waking up from retention. */
196 	if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
197 		return;
198 
199 	/* Perform system domain restore if woken up from system suspend */
200 	if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
201 		/*
202 		 * At this point, the Distributor must be powered on to be ready
203 		 * to have its state restored. The Redistributor will be powered
204 		 * on as part of gicv3_rdistif_init_restore.
205 		 */
206 		arm_system_pwr_domain_resume();
207 
208 	css_pwr_domain_on_finisher_common(target_state);
209 }
210 
211 /*******************************************************************************
212  * Handlers to shutdown/reboot the system
213  ******************************************************************************/
214 void css_system_off(void)
215 {
216 	css_scp_sys_shutdown();
217 }
218 
219 void css_system_reset(void)
220 {
221 	css_scp_sys_reboot();
222 }
223 
224 /*******************************************************************************
225  * Handler called when the CPU power domain is about to enter standby.
226  ******************************************************************************/
227 void css_cpu_standby(plat_local_state_t cpu_state)
228 {
229 	unsigned int scr;
230 
231 	assert(cpu_state == ARM_LOCAL_STATE_RET);
232 
233 	scr = read_scr_el3();
234 	/*
235 	 * Enable the Non secure interrupt to wake the CPU.
236 	 * In GICv3 affinity routing mode, the non secure group1 interrupts use
237 	 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
238 	 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
239 	 * routing mode.
240 	 */
241 	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
242 	isb();
243 	dsb();
244 	wfi();
245 
246 	/*
247 	 * Restore SCR to the original value, synchronisation of scr_el3 is
248 	 * done by eret while el3_exit to save some execution cycles.
249 	 */
250 	write_scr_el3(scr);
251 }
252 
253 /*******************************************************************************
254  * Handler called to return the 'req_state' for system suspend.
255  ******************************************************************************/
256 void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
257 {
258 	unsigned int i;
259 
260 	/*
261 	 * System Suspend is supported only if the system power domain node
262 	 * is implemented.
263 	 */
264 	assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
265 
266 	for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
267 		req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
268 }
269 
270 /*******************************************************************************
271  * Handler to query CPU/cluster power states from SCP
272  ******************************************************************************/
273 int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
274 {
275 	return css_scp_get_power_state(mpidr, power_level);
276 }
277 
278 /*
279  * The system power domain suspend is only supported only via
280  * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
281  * will be downgraded to the lower level.
282  */
283 static int css_validate_power_state(unsigned int power_state,
284 			    psci_power_state_t *req_state)
285 {
286 	int rc;
287 	rc = arm_validate_power_state(power_state, req_state);
288 
289 	/*
290 	 * Ensure that we don't overrun the pwr_domain_state array in the case
291 	 * where the platform supported max power level is less than the system
292 	 * power level
293 	 */
294 
295 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
296 
297 	/*
298 	 * Ensure that the system power domain level is never suspended
299 	 * via PSCI CPU SUSPEND API. Currently system suspend is only
300 	 * supported via PSCI SYSTEM SUSPEND API.
301 	 */
302 
303 	req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] =
304 							ARM_LOCAL_STATE_RUN;
305 #endif
306 
307 	return rc;
308 }
309 
310 /*
311  * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
312  * `css_validate_power_state`, we do not downgrade the system power
313  * domain level request in `power_state` as it will be used to query the
314  * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
315  */
316 static int css_translate_power_state_by_mpidr(u_register_t mpidr,
317 		unsigned int power_state,
318 		psci_power_state_t *output_state)
319 {
320 	return arm_validate_power_state(power_state, output_state);
321 }
322 
323 /*
324  * Setup the SGI interrupt that will be used trigger the execution of power
325  * down sequence for all the secondary cores. This interrupt is setup to be
326  * handled in EL3 context at a priority defined by the platform.
327  */
328 void css_setup_cpu_pwr_down_intr(void)
329 {
330 #if CSS_SYSTEM_GRACEFUL_RESET
331 	plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3);
332 	plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR,
333 			PLAT_REBOOT_PRI);
334 	plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
335 #endif
336 }
337 
338 /*
339  * For a graceful shutdown/reboot, each CPU in the system should do their power
340  * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an
341  * opportunity to do the powerdown sequence. To achieve graceful reset, of all
342  * cores in the system, the CPU gets the opportunity raise warm reboot SGI to
343  * rest of the CPUs which are online. Add handler for the reboot SGI where the
344  * rest of the CPU execute the powerdown sequence.
345  */
346 int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags,
347 		void *handle, void *cookie)
348 {
349 	unsigned int core_pos = plat_my_core_pos();
350 
351 	assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR);
352 
353 	/* Deactivate warm reboot SGI */
354 	plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
355 
356 	/*
357 	 * Disable GIC CPU interface to prevent pending interrupt from waking
358 	 * up the AP from WFI.
359 	 */
360 	gic_cpuif_disable(core_pos);
361 	gic_pcpu_off(core_pos);
362 
363 	psci_pwrdown_cpu_start(PLAT_MAX_PWR_LVL);
364 
365 	psci_pwrdown_cpu_end_terminal();
366 	return 0;
367 }
368 
369 /*******************************************************************************
370  * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
371  * platform will take care of registering the handlers with PSCI.
372  ******************************************************************************/
373 plat_psci_ops_t plat_arm_psci_pm_ops = {
374 	.pwr_domain_on		= css_pwr_domain_on,
375 	.pwr_domain_on_finish	= css_pwr_domain_on_finish,
376 	.pwr_domain_on_finish_late = css_pwr_domain_on_finish_late,
377 	.pwr_domain_off		= css_pwr_domain_off,
378 	.cpu_standby		= css_cpu_standby,
379 	.pwr_domain_suspend	= css_pwr_domain_suspend,
380 	.pwr_domain_suspend_finish	= css_pwr_domain_suspend_finish,
381 	.system_off		= css_system_off,
382 	.system_reset		= css_system_reset,
383 	.validate_power_state	= css_validate_power_state,
384 	.validate_ns_entrypoint = arm_validate_psci_entrypoint,
385 	.translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
386 	.get_node_hw_state	= css_node_hw_state,
387 	.get_sys_suspend_power_state = css_get_sys_suspend_power_state,
388 
389 #if defined(PLAT_ARM_MEM_PROT_ADDR)
390 	.mem_protect_chk	= arm_psci_mem_protect_chk,
391 	.read_mem_protect	= arm_psci_read_mem_protect,
392 	.write_mem_protect	= arm_nor_psci_write_mem_protect,
393 #endif
394 #if CSS_USE_SCMI_SDS_DRIVER
395 	.system_reset2		= css_system_reset2,
396 #endif
397 };
398