xref: /rk3399_ARM-atf/plat/arm/css/common/css_pm.c (revision f90fe02f061b8a203391e566682221396b656c6f)
1 /*
2  * Copyright (c) 2015-2022, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 
9 #include <platform_def.h>
10 
11 #include <arch_helpers.h>
12 #include <bl31/interrupt_mgmt.h>
13 #include <common/debug.h>
14 #include <drivers/arm/css/css_scp.h>
15 #include <lib/cassert.h>
16 #include <plat/arm/common/plat_arm.h>
17 
18 #include <plat/common/platform.h>
19 
20 #include <plat/arm/css/common/css_pm.h>
21 
22 /* Allow CSS platforms to override `plat_arm_psci_pm_ops` */
23 #pragma weak plat_arm_psci_pm_ops
24 
25 #if ARM_RECOM_STATE_ID_ENC
26 /*
27  *  The table storing the valid idle power states. Ensure that the
28  *  array entries are populated in ascending order of state-id to
29  *  enable us to use binary search during power state validation.
30  *  The table must be terminated by a NULL entry.
31  */
32 const unsigned int arm_pm_idle_states[] = {
33 	/* State-id - 0x001 */
34 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
35 		ARM_LOCAL_STATE_RET, ARM_PWR_LVL0, PSTATE_TYPE_STANDBY),
36 	/* State-id - 0x002 */
37 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_RUN,
38 		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL0, PSTATE_TYPE_POWERDOWN),
39 	/* State-id - 0x022 */
40 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_RUN, ARM_LOCAL_STATE_OFF,
41 		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL1, PSTATE_TYPE_POWERDOWN),
42 #if PLAT_MAX_PWR_LVL > ARM_PWR_LVL1
43 	/* State-id - 0x222 */
44 	arm_make_pwrstate_lvl2(ARM_LOCAL_STATE_OFF, ARM_LOCAL_STATE_OFF,
45 		ARM_LOCAL_STATE_OFF, ARM_PWR_LVL2, PSTATE_TYPE_POWERDOWN),
46 #endif
47 	0,
48 };
49 #endif /* __ARM_RECOM_STATE_ID_ENC__ */
50 
51 /*
52  * All the power management helpers in this file assume at least cluster power
53  * level is supported.
54  */
55 CASSERT(PLAT_MAX_PWR_LVL >= ARM_PWR_LVL1,
56 		assert_max_pwr_lvl_supported_mismatch);
57 
58 /*
59  * Ensure that the PLAT_MAX_PWR_LVL is not greater than CSS_SYSTEM_PWR_DMN_LVL
60  * assumed by the CSS layer.
61  */
62 CASSERT(PLAT_MAX_PWR_LVL <= CSS_SYSTEM_PWR_DMN_LVL,
63 		assert_max_pwr_lvl_higher_than_css_sys_lvl);
64 
65 /*******************************************************************************
66  * Handler called when a power domain is about to be turned on. The
67  * level and mpidr determine the affinity instance.
68  ******************************************************************************/
69 int css_pwr_domain_on(u_register_t mpidr)
70 {
71 	css_scp_on(mpidr);
72 
73 	return PSCI_E_SUCCESS;
74 }
75 
76 static void css_pwr_domain_on_finisher_common(
77 		const psci_power_state_t *target_state)
78 {
79 	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
80 
81 	/*
82 	 * Perform the common cluster specific operations i.e enable coherency
83 	 * if this cluster was off.
84 	 */
85 	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF)
86 		plat_arm_interconnect_enter_coherency();
87 }
88 
89 /*******************************************************************************
90  * Handler called when a power level has just been powered on after
91  * being turned off earlier. The target_state encodes the low power state that
92  * each level has woken up from. This handler would never be invoked with
93  * the system power domain uninitialized as either the primary would have taken
94  * care of it as part of cold boot or the first core awakened from system
95  * suspend would have already initialized it.
96  ******************************************************************************/
97 void css_pwr_domain_on_finish(const psci_power_state_t *target_state)
98 {
99 	/* Assert that the system power domain need not be initialized */
100 	assert(css_system_pwr_state(target_state) == ARM_LOCAL_STATE_RUN);
101 
102 	css_pwr_domain_on_finisher_common(target_state);
103 }
104 
105 /*******************************************************************************
106  * Handler called when a power domain has just been powered on and the cpu
107  * and its cluster are fully participating in coherent transaction on the
108  * interconnect. Data cache must be enabled for CPU at this point.
109  ******************************************************************************/
110 void css_pwr_domain_on_finish_late(const psci_power_state_t *target_state)
111 {
112 	/* Program the gic per-cpu distributor or re-distributor interface */
113 	plat_arm_gic_pcpu_init();
114 
115 	/* Enable the gic cpu interface */
116 	plat_arm_gic_cpuif_enable();
117 
118 	/* Setup the CPU power down request interrupt for secondary core(s) */
119 	css_setup_cpu_pwr_down_intr();
120 }
121 
122 /*******************************************************************************
123  * Common function called while turning a cpu off or suspending it. It is called
124  * from css_off() or css_suspend() when these functions in turn are called for
125  * power domain at the highest power level which will be powered down. It
126  * performs the actions common to the OFF and SUSPEND calls.
127  ******************************************************************************/
128 static void css_power_down_common(const psci_power_state_t *target_state)
129 {
130 	/* Prevent interrupts from spuriously waking up this cpu */
131 	plat_arm_gic_cpuif_disable();
132 
133 	/* Cluster is to be turned off, so disable coherency */
134 	if (CSS_CLUSTER_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF) {
135 		plat_arm_interconnect_exit_coherency();
136 
137 #if HW_ASSISTED_COHERENCY
138 		uint32_t reg;
139 
140 		/*
141 		 * If we have determined this core to be the last man standing and we
142 		 * intend to power down the cluster proactively, we provide a hint to
143 		 * the power controller that cluster power is not required when all
144 		 * cores are powered down.
145 		 * Note that this is only an advisory to power controller and is supported
146 		 * by SoCs with DynamIQ Shared Units only.
147 		 */
148 		reg = read_clusterpwrdn();
149 
150 		/* Clear and set bit 0 : Cluster power not required */
151 		reg &= ~DSU_CLUSTER_PWR_MASK;
152 		reg |= DSU_CLUSTER_PWR_OFF;
153 		write_clusterpwrdn(reg);
154 #endif
155 	}
156 }
157 
158 /*******************************************************************************
159  * Handler called when a power domain is about to be turned off. The
160  * target_state encodes the power state that each level should transition to.
161  ******************************************************************************/
162 void css_pwr_domain_off(const psci_power_state_t *target_state)
163 {
164 	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
165 	css_power_down_common(target_state);
166 	css_scp_off(target_state);
167 }
168 
169 /*******************************************************************************
170  * Handler called when a power domain is about to be suspended. The
171  * target_state encodes the power state that each level should transition to.
172  ******************************************************************************/
173 void css_pwr_domain_suspend(const psci_power_state_t *target_state)
174 {
175 	/*
176 	 * CSS currently supports retention only at cpu level. Just return
177 	 * as nothing is to be done for retention.
178 	 */
179 	if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
180 		return;
181 
182 
183 	assert(CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_OFF);
184 	css_power_down_common(target_state);
185 
186 	/* Perform system domain state saving if issuing system suspend */
187 	if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF) {
188 		arm_system_pwr_domain_save();
189 
190 		/* Power off the Redistributor after having saved its context */
191 		plat_arm_gic_redistif_off();
192 	}
193 
194 	css_scp_suspend(target_state);
195 }
196 
197 /*******************************************************************************
198  * Handler called when a power domain has just been powered on after
199  * having been suspended earlier. The target_state encodes the low power state
200  * that each level has woken up from.
201  * TODO: At the moment we reuse the on finisher and reinitialize the secure
202  * context. Need to implement a separate suspend finisher.
203  ******************************************************************************/
204 void css_pwr_domain_suspend_finish(
205 				const psci_power_state_t *target_state)
206 {
207 	/* Return as nothing is to be done on waking up from retention. */
208 	if (CSS_CORE_PWR_STATE(target_state) == ARM_LOCAL_STATE_RET)
209 		return;
210 
211 	/* Perform system domain restore if woken up from system suspend */
212 	if (css_system_pwr_state(target_state) == ARM_LOCAL_STATE_OFF)
213 		/*
214 		 * At this point, the Distributor must be powered on to be ready
215 		 * to have its state restored. The Redistributor will be powered
216 		 * on as part of gicv3_rdistif_init_restore.
217 		 */
218 		arm_system_pwr_domain_resume();
219 
220 	css_pwr_domain_on_finisher_common(target_state);
221 
222 	/* Enable the gic cpu interface */
223 	plat_arm_gic_cpuif_enable();
224 }
225 
226 /*******************************************************************************
227  * Handlers to shutdown/reboot the system
228  ******************************************************************************/
229 void __dead2 css_system_off(void)
230 {
231 	css_scp_sys_shutdown();
232 }
233 
234 void __dead2 css_system_reset(void)
235 {
236 	css_scp_sys_reboot();
237 }
238 
239 /*******************************************************************************
240  * Handler called when the CPU power domain is about to enter standby.
241  ******************************************************************************/
242 void css_cpu_standby(plat_local_state_t cpu_state)
243 {
244 	unsigned int scr;
245 
246 	assert(cpu_state == ARM_LOCAL_STATE_RET);
247 
248 	scr = read_scr_el3();
249 	/*
250 	 * Enable the Non secure interrupt to wake the CPU.
251 	 * In GICv3 affinity routing mode, the non secure group1 interrupts use
252 	 * the PhysicalFIQ at EL3 whereas in GICv2, it uses the PhysicalIRQ.
253 	 * Enabling both the bits works for both GICv2 mode and GICv3 affinity
254 	 * routing mode.
255 	 */
256 	write_scr_el3(scr | SCR_IRQ_BIT | SCR_FIQ_BIT);
257 	isb();
258 	dsb();
259 	wfi();
260 
261 	/*
262 	 * Restore SCR to the original value, synchronisation of scr_el3 is
263 	 * done by eret while el3_exit to save some execution cycles.
264 	 */
265 	write_scr_el3(scr);
266 }
267 
268 /*******************************************************************************
269  * Handler called to return the 'req_state' for system suspend.
270  ******************************************************************************/
271 void css_get_sys_suspend_power_state(psci_power_state_t *req_state)
272 {
273 	unsigned int i;
274 
275 	/*
276 	 * System Suspend is supported only if the system power domain node
277 	 * is implemented.
278 	 */
279 	assert(PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL);
280 
281 	for (i = ARM_PWR_LVL0; i <= PLAT_MAX_PWR_LVL; i++)
282 		req_state->pwr_domain_state[i] = ARM_LOCAL_STATE_OFF;
283 }
284 
285 /*******************************************************************************
286  * Handler to query CPU/cluster power states from SCP
287  ******************************************************************************/
288 int css_node_hw_state(u_register_t mpidr, unsigned int power_level)
289 {
290 	return css_scp_get_power_state(mpidr, power_level);
291 }
292 
293 /*
294  * The system power domain suspend is only supported only via
295  * PSCI SYSTEM_SUSPEND API. PSCI CPU_SUSPEND request to system power domain
296  * will be downgraded to the lower level.
297  */
298 static int css_validate_power_state(unsigned int power_state,
299 			    psci_power_state_t *req_state)
300 {
301 	int rc;
302 	rc = arm_validate_power_state(power_state, req_state);
303 
304 	/*
305 	 * Ensure that we don't overrun the pwr_domain_state array in the case
306 	 * where the platform supported max power level is less than the system
307 	 * power level
308 	 */
309 
310 #if (PLAT_MAX_PWR_LVL == CSS_SYSTEM_PWR_DMN_LVL)
311 
312 	/*
313 	 * Ensure that the system power domain level is never suspended
314 	 * via PSCI CPU SUSPEND API. Currently system suspend is only
315 	 * supported via PSCI SYSTEM SUSPEND API.
316 	 */
317 
318 	req_state->pwr_domain_state[CSS_SYSTEM_PWR_DMN_LVL] =
319 							ARM_LOCAL_STATE_RUN;
320 #endif
321 
322 	return rc;
323 }
324 
325 /*
326  * Custom `translate_power_state_by_mpidr` handler for CSS. Unlike in the
327  * `css_validate_power_state`, we do not downgrade the system power
328  * domain level request in `power_state` as it will be used to query the
329  * PSCI_STAT_COUNT/RESIDENCY at the system power domain level.
330  */
331 static int css_translate_power_state_by_mpidr(u_register_t mpidr,
332 		unsigned int power_state,
333 		psci_power_state_t *output_state)
334 {
335 	return arm_validate_power_state(power_state, output_state);
336 }
337 
338 /*
339  * Setup the SGI interrupt that will be used trigger the execution of power
340  * down sequence for all the secondary cores. This interrupt is setup to be
341  * handled in EL3 context at a priority defined by the platform.
342  */
343 void css_setup_cpu_pwr_down_intr(void)
344 {
345 #if CSS_SYSTEM_GRACEFUL_RESET
346 	plat_ic_set_interrupt_type(CSS_CPU_PWR_DOWN_REQ_INTR, INTR_TYPE_EL3);
347 	plat_ic_set_interrupt_priority(CSS_CPU_PWR_DOWN_REQ_INTR,
348 			PLAT_REBOOT_PRI);
349 	plat_ic_enable_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
350 #endif
351 }
352 
353 /*
354  * For a graceful shutdown/reboot, each CPU in the system should do their power
355  * down sequence. On a PSCI shutdown/reboot request, only one CPU gets an
356  * opportunity to do the powerdown sequence. To achieve graceful reset, of all
357  * cores in the system, the CPU gets the opportunity raise warm reboot SGI to
358  * rest of the CPUs which are online. Add handler for the reboot SGI where the
359  * rest of the CPU execute the powerdown sequence.
360  */
361 int css_reboot_interrupt_handler(uint32_t intr_raw, uint32_t flags,
362 		void *handle, void *cookie)
363 {
364 	assert(intr_raw == CSS_CPU_PWR_DOWN_REQ_INTR);
365 
366 	/* Deactivate warm reboot SGI */
367 	plat_ic_end_of_interrupt(CSS_CPU_PWR_DOWN_REQ_INTR);
368 
369 	/*
370 	 * Disable GIC CPU interface to prevent pending interrupt from waking
371 	 * up the AP from WFI.
372 	 */
373 	plat_arm_gic_cpuif_disable();
374 	plat_arm_gic_redistif_off();
375 
376 	psci_pwrdown_cpu(PLAT_MAX_PWR_LVL);
377 
378 	dmbsy();
379 
380 	wfi();
381 	return 0;
382 }
383 
384 /*******************************************************************************
385  * Export the platform handlers via plat_arm_psci_pm_ops. The ARM Standard
386  * platform will take care of registering the handlers with PSCI.
387  ******************************************************************************/
388 plat_psci_ops_t plat_arm_psci_pm_ops = {
389 	.pwr_domain_on		= css_pwr_domain_on,
390 	.pwr_domain_on_finish	= css_pwr_domain_on_finish,
391 	.pwr_domain_on_finish_late = css_pwr_domain_on_finish_late,
392 	.pwr_domain_off		= css_pwr_domain_off,
393 	.cpu_standby		= css_cpu_standby,
394 	.pwr_domain_suspend	= css_pwr_domain_suspend,
395 	.pwr_domain_suspend_finish	= css_pwr_domain_suspend_finish,
396 	.system_off		= css_system_off,
397 	.system_reset		= css_system_reset,
398 	.validate_power_state	= css_validate_power_state,
399 	.validate_ns_entrypoint = arm_validate_psci_entrypoint,
400 	.translate_power_state_by_mpidr = css_translate_power_state_by_mpidr,
401 	.get_node_hw_state	= css_node_hw_state,
402 	.get_sys_suspend_power_state = css_get_sys_suspend_power_state,
403 
404 #if defined(PLAT_ARM_MEM_PROT_ADDR)
405 	.mem_protect_chk	= arm_psci_mem_protect_chk,
406 	.read_mem_protect	= arm_psci_read_mem_protect,
407 	.write_mem_protect	= arm_nor_psci_write_mem_protect,
408 #endif
409 #if CSS_USE_SCMI_SDS_DRIVER
410 	.system_reset2		= css_system_reset2,
411 #endif
412 };
413