xref: /rk3399_ARM-atf/lib/psci/psci_suspend.c (revision bfc87a8dff75688f3f0ef558f4921c4b1acc07b1)
1532ed618SSoby Mathew /*
204c1db1eSdp-arm  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3532ed618SSoby Mathew  *
482cb2c1aSdp-arm  * SPDX-License-Identifier: BSD-3-Clause
5532ed618SSoby Mathew  */
6532ed618SSoby Mathew 
7532ed618SSoby Mathew #include <arch.h>
8532ed618SSoby Mathew #include <arch_helpers.h>
92a4b4b71SIsla Mitchell #include <assert.h>
102a4b4b71SIsla Mitchell #include <bl_common.h>
11532ed618SSoby Mathew #include <context.h>
12532ed618SSoby Mathew #include <context_mgmt.h>
13532ed618SSoby Mathew #include <cpu_data.h>
14532ed618SSoby Mathew #include <debug.h>
15532ed618SSoby Mathew #include <platform.h>
16872be88aSdp-arm #include <pmf.h>
17872be88aSdp-arm #include <runtime_instr.h>
18532ed618SSoby Mathew #include <stddef.h>
19532ed618SSoby Mathew #include "psci_private.h"
20532ed618SSoby Mathew 
21532ed618SSoby Mathew /*******************************************************************************
22532ed618SSoby Mathew  * This function does generic and platform specific operations after a wake-up
23532ed618SSoby Mathew  * from standby/retention states at multiple power levels.
24532ed618SSoby Mathew  ******************************************************************************/
25532ed618SSoby Mathew static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
26532ed618SSoby Mathew 					     unsigned int end_pwrlvl)
27532ed618SSoby Mathew {
2861eae524SAchin Gupta 	psci_power_state_t state_info;
2961eae524SAchin Gupta 
30532ed618SSoby Mathew 	psci_acquire_pwr_domain_locks(end_pwrlvl,
31532ed618SSoby Mathew 				cpu_idx);
32532ed618SSoby Mathew 
33532ed618SSoby Mathew 	/*
3461eae524SAchin Gupta 	 * Find out which retention states this CPU has exited from until the
3561eae524SAchin Gupta 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
3661eae524SAchin Gupta 	 * state as a result of state coordination amongst other CPUs post wfi.
3761eae524SAchin Gupta 	 */
3861eae524SAchin Gupta 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
3961eae524SAchin Gupta 
40*bfc87a8dSSoby Mathew #if ENABLE_PSCI_STAT
41*bfc87a8dSSoby Mathew 	plat_psci_stat_accounting_stop(&state_info);
42*bfc87a8dSSoby Mathew 	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
43*bfc87a8dSSoby Mathew #endif
44*bfc87a8dSSoby Mathew 
4561eae524SAchin Gupta 	/*
46532ed618SSoby Mathew 	 * Plat. management: Allow the platform to do operations
47532ed618SSoby Mathew 	 * on waking up from retention.
48532ed618SSoby Mathew 	 */
4961eae524SAchin Gupta 	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
50532ed618SSoby Mathew 
51532ed618SSoby Mathew 	/*
52532ed618SSoby Mathew 	 * Set the requested and target state of this CPU and all the higher
53532ed618SSoby Mathew 	 * power domain levels for this CPU to run.
54532ed618SSoby Mathew 	 */
55532ed618SSoby Mathew 	psci_set_pwr_domains_to_run(end_pwrlvl);
56532ed618SSoby Mathew 
57532ed618SSoby Mathew 	psci_release_pwr_domain_locks(end_pwrlvl,
58532ed618SSoby Mathew 				cpu_idx);
59532ed618SSoby Mathew }
60532ed618SSoby Mathew 
61532ed618SSoby Mathew /*******************************************************************************
62532ed618SSoby Mathew  * This function does generic and platform specific suspend to power down
63532ed618SSoby Mathew  * operations.
64532ed618SSoby Mathew  ******************************************************************************/
65532ed618SSoby Mathew static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
66532ed618SSoby Mathew 					  entry_point_info_t *ep,
67532ed618SSoby Mathew 					  psci_power_state_t *state_info)
68532ed618SSoby Mathew {
69532ed618SSoby Mathew 	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
70532ed618SSoby Mathew 
71532ed618SSoby Mathew 	/* Save PSCI target power level for the suspend finisher handler */
72532ed618SSoby Mathew 	psci_set_suspend_pwrlvl(end_pwrlvl);
73532ed618SSoby Mathew 
74532ed618SSoby Mathew 	/*
75a10d3632SJeenu Viswambharan 	 * Flush the target power level as it might be accessed on power up with
76532ed618SSoby Mathew 	 * Data cache disabled.
77532ed618SSoby Mathew 	 */
78a10d3632SJeenu Viswambharan 	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
79532ed618SSoby Mathew 
80532ed618SSoby Mathew 	/*
81532ed618SSoby Mathew 	 * Call the cpu suspend handler registered by the Secure Payload
82532ed618SSoby Mathew 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
83532ed618SSoby Mathew 	 * error, it's expected to assert within
84532ed618SSoby Mathew 	 */
85532ed618SSoby Mathew 	if (psci_spd_pm && psci_spd_pm->svc_suspend)
86532ed618SSoby Mathew 		psci_spd_pm->svc_suspend(max_off_lvl);
87532ed618SSoby Mathew 
881862d620SVarun Wadekar #if !HW_ASSISTED_COHERENCY
891862d620SVarun Wadekar 	/*
901862d620SVarun Wadekar 	 * Plat. management: Allow the platform to perform any early
911862d620SVarun Wadekar 	 * actions required to power down the CPU. This might be useful for
921862d620SVarun Wadekar 	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
931862d620SVarun Wadekar 	 * actions with data caches enabled.
941862d620SVarun Wadekar 	 */
951862d620SVarun Wadekar 	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early)
961862d620SVarun Wadekar 		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
971862d620SVarun Wadekar #endif
981862d620SVarun Wadekar 
99532ed618SSoby Mathew 	/*
100532ed618SSoby Mathew 	 * Store the re-entry information for the non-secure world.
101532ed618SSoby Mathew 	 */
102532ed618SSoby Mathew 	cm_init_my_context(ep);
103532ed618SSoby Mathew 
1047941816aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
1057941816aSdp-arm 
1067941816aSdp-arm 	/*
1077941816aSdp-arm 	 * Flush cache line so that even if CPU power down happens
1087941816aSdp-arm 	 * the timestamp update is reflected in memory.
1097941816aSdp-arm 	 */
1107941816aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1117941816aSdp-arm 		RT_INSTR_ENTER_CFLUSH,
1127941816aSdp-arm 		PMF_CACHE_MAINT);
1137941816aSdp-arm #endif
1147941816aSdp-arm 
115532ed618SSoby Mathew 	/*
116b0408e87SJeenu Viswambharan 	 * Arch. management. Initiate power down sequence.
117532ed618SSoby Mathew 	 * TODO : Introduce a mechanism to query the cache level to flush
118532ed618SSoby Mathew 	 * and the cpu-ops power down to perform from the platform.
119532ed618SSoby Mathew 	 */
120b0408e87SJeenu Viswambharan 	psci_do_pwrdown_sequence(max_off_lvl);
1217941816aSdp-arm 
1227941816aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
1237941816aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1247941816aSdp-arm 		RT_INSTR_EXIT_CFLUSH,
1257941816aSdp-arm 		PMF_NO_CACHE_MAINT);
1267941816aSdp-arm #endif
127532ed618SSoby Mathew }
128532ed618SSoby Mathew 
129532ed618SSoby Mathew /*******************************************************************************
130532ed618SSoby Mathew  * Top level handler which is called when a cpu wants to suspend its execution.
131532ed618SSoby Mathew  * It is assumed that along with suspending the cpu power domain, power domains
132532ed618SSoby Mathew  * at higher levels until the target power level will be suspended as well. It
133532ed618SSoby Mathew  * coordinates with the platform to negotiate the target state for each of
134532ed618SSoby Mathew  * the power domain level till the target power domain level. It then performs
135532ed618SSoby Mathew  * generic, architectural, platform setup and state management required to
136532ed618SSoby Mathew  * suspend that power domain level and power domain levels below it.
137532ed618SSoby Mathew  * e.g. For a cpu that's to be suspended, it could mean programming the
138532ed618SSoby Mathew  * power controller whereas for a cluster that's to be suspended, it will call
139532ed618SSoby Mathew  * the platform specific code which will disable coherency at the interconnect
140532ed618SSoby Mathew  * level if the cpu is the last in the cluster and also the program the power
141532ed618SSoby Mathew  * controller.
142532ed618SSoby Mathew  *
143532ed618SSoby Mathew  * All the required parameter checks are performed at the beginning and after
144532ed618SSoby Mathew  * the state transition has been done, no further error is expected and it is
145532ed618SSoby Mathew  * not possible to undo any of the actions taken beyond that point.
146532ed618SSoby Mathew  ******************************************************************************/
147532ed618SSoby Mathew void psci_cpu_suspend_start(entry_point_info_t *ep,
148532ed618SSoby Mathew 			    unsigned int end_pwrlvl,
149532ed618SSoby Mathew 			    psci_power_state_t *state_info,
150532ed618SSoby Mathew 			    unsigned int is_power_down_state)
151532ed618SSoby Mathew {
152532ed618SSoby Mathew 	int skip_wfi = 0;
153532ed618SSoby Mathew 	unsigned int idx = plat_my_core_pos();
154532ed618SSoby Mathew 
155532ed618SSoby Mathew 	/*
156532ed618SSoby Mathew 	 * This function must only be called on platforms where the
157532ed618SSoby Mathew 	 * CPU_SUSPEND platform hooks have been implemented.
158532ed618SSoby Mathew 	 */
159532ed618SSoby Mathew 	assert(psci_plat_pm_ops->pwr_domain_suspend &&
160532ed618SSoby Mathew 			psci_plat_pm_ops->pwr_domain_suspend_finish);
161532ed618SSoby Mathew 
162532ed618SSoby Mathew 	/*
163532ed618SSoby Mathew 	 * This function acquires the lock corresponding to each power
164532ed618SSoby Mathew 	 * level so that by the time all locks are taken, the system topology
165532ed618SSoby Mathew 	 * is snapshot and state management can be done safely.
166532ed618SSoby Mathew 	 */
167532ed618SSoby Mathew 	psci_acquire_pwr_domain_locks(end_pwrlvl,
168532ed618SSoby Mathew 				      idx);
169532ed618SSoby Mathew 
170532ed618SSoby Mathew 	/*
171532ed618SSoby Mathew 	 * We check if there are any pending interrupts after the delay
172532ed618SSoby Mathew 	 * introduced by lock contention to increase the chances of early
173532ed618SSoby Mathew 	 * detection that a wake-up interrupt has fired.
174532ed618SSoby Mathew 	 */
175532ed618SSoby Mathew 	if (read_isr_el1()) {
176532ed618SSoby Mathew 		skip_wfi = 1;
177532ed618SSoby Mathew 		goto exit;
178532ed618SSoby Mathew 	}
179532ed618SSoby Mathew 
180532ed618SSoby Mathew 	/*
181532ed618SSoby Mathew 	 * This function is passed the requested state info and
182532ed618SSoby Mathew 	 * it returns the negotiated state info for each power level upto
183532ed618SSoby Mathew 	 * the end level specified.
184532ed618SSoby Mathew 	 */
185532ed618SSoby Mathew 	psci_do_state_coordination(end_pwrlvl, state_info);
186532ed618SSoby Mathew 
187532ed618SSoby Mathew #if ENABLE_PSCI_STAT
188532ed618SSoby Mathew 	/* Update the last cpu for each level till end_pwrlvl */
189532ed618SSoby Mathew 	psci_stats_update_pwr_down(end_pwrlvl, state_info);
190532ed618SSoby Mathew #endif
191532ed618SSoby Mathew 
192532ed618SSoby Mathew 	if (is_power_down_state)
193532ed618SSoby Mathew 		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
194532ed618SSoby Mathew 
195532ed618SSoby Mathew 	/*
196532ed618SSoby Mathew 	 * Plat. management: Allow the platform to perform the
197532ed618SSoby Mathew 	 * necessary actions to turn off this cpu e.g. set the
198532ed618SSoby Mathew 	 * platform defined mailbox with the psci entrypoint,
199532ed618SSoby Mathew 	 * program the power controller etc.
200532ed618SSoby Mathew 	 */
201532ed618SSoby Mathew 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
202532ed618SSoby Mathew 
203532ed618SSoby Mathew #if ENABLE_PSCI_STAT
20404c1db1eSdp-arm 	plat_psci_stat_accounting_start(state_info);
205532ed618SSoby Mathew #endif
206532ed618SSoby Mathew 
207532ed618SSoby Mathew exit:
208532ed618SSoby Mathew 	/*
209532ed618SSoby Mathew 	 * Release the locks corresponding to each power level in the
210532ed618SSoby Mathew 	 * reverse order to which they were acquired.
211532ed618SSoby Mathew 	 */
212532ed618SSoby Mathew 	psci_release_pwr_domain_locks(end_pwrlvl,
213532ed618SSoby Mathew 				  idx);
214532ed618SSoby Mathew 	if (skip_wfi)
215532ed618SSoby Mathew 		return;
216532ed618SSoby Mathew 
217532ed618SSoby Mathew 	if (is_power_down_state) {
218872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
219872be88aSdp-arm 
220872be88aSdp-arm 		/*
221872be88aSdp-arm 		 * Update the timestamp with cache off.  We assume this
222872be88aSdp-arm 		 * timestamp can only be read from the current CPU and the
223872be88aSdp-arm 		 * timestamp cache line will be flushed before return to
224872be88aSdp-arm 		 * normal world on wakeup.
225872be88aSdp-arm 		 */
226872be88aSdp-arm 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
227872be88aSdp-arm 		    RT_INSTR_ENTER_HW_LOW_PWR,
228872be88aSdp-arm 		    PMF_NO_CACHE_MAINT);
229872be88aSdp-arm #endif
230872be88aSdp-arm 
231532ed618SSoby Mathew 		/* The function calls below must not return */
232532ed618SSoby Mathew 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
233532ed618SSoby Mathew 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
234532ed618SSoby Mathew 		else
235532ed618SSoby Mathew 			psci_power_down_wfi();
236532ed618SSoby Mathew 	}
237532ed618SSoby Mathew 
238872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
239872be88aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
240872be88aSdp-arm 	    RT_INSTR_ENTER_HW_LOW_PWR,
241872be88aSdp-arm 	    PMF_NO_CACHE_MAINT);
242872be88aSdp-arm #endif
243872be88aSdp-arm 
244532ed618SSoby Mathew 	/*
245532ed618SSoby Mathew 	 * We will reach here if only retention/standby states have been
246532ed618SSoby Mathew 	 * requested at multiple power levels. This means that the cpu
247532ed618SSoby Mathew 	 * context will be preserved.
248532ed618SSoby Mathew 	 */
249532ed618SSoby Mathew 	wfi();
250532ed618SSoby Mathew 
251872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
252872be88aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
253872be88aSdp-arm 	    RT_INSTR_EXIT_HW_LOW_PWR,
254872be88aSdp-arm 	    PMF_NO_CACHE_MAINT);
255872be88aSdp-arm #endif
256872be88aSdp-arm 
257532ed618SSoby Mathew 	/*
258532ed618SSoby Mathew 	 * After we wake up from context retaining suspend, call the
259532ed618SSoby Mathew 	 * context retaining suspend finisher.
260532ed618SSoby Mathew 	 */
26161eae524SAchin Gupta 	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
262532ed618SSoby Mathew }
263532ed618SSoby Mathew 
264532ed618SSoby Mathew /*******************************************************************************
265532ed618SSoby Mathew  * The following functions finish an earlier suspend request. They
266532ed618SSoby Mathew  * are called by the common finisher routine in psci_common.c. The `state_info`
267532ed618SSoby Mathew  * is the psci_power_state from which this CPU has woken up from.
268532ed618SSoby Mathew  ******************************************************************************/
269532ed618SSoby Mathew void psci_cpu_suspend_finish(unsigned int cpu_idx,
270532ed618SSoby Mathew 			     psci_power_state_t *state_info)
271532ed618SSoby Mathew {
272532ed618SSoby Mathew 	unsigned int counter_freq;
273532ed618SSoby Mathew 	unsigned int max_off_lvl;
274532ed618SSoby Mathew 
275532ed618SSoby Mathew 	/* Ensure we have been woken up from a suspended state */
276532ed618SSoby Mathew 	assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
277532ed618SSoby Mathew 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
278532ed618SSoby Mathew 
279532ed618SSoby Mathew 	/*
280532ed618SSoby Mathew 	 * Plat. management: Perform the platform specific actions
281532ed618SSoby Mathew 	 * before we change the state of the cpu e.g. enabling the
282532ed618SSoby Mathew 	 * gic or zeroing the mailbox register. If anything goes
283532ed618SSoby Mathew 	 * wrong then assert as there is no way to recover from this
284532ed618SSoby Mathew 	 * situation.
285532ed618SSoby Mathew 	 */
286532ed618SSoby Mathew 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
287532ed618SSoby Mathew 
288bcc3c49cSSoby Mathew #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
289b0408e87SJeenu Viswambharan 	/* Arch. management: Enable the data cache, stack memory maintenance. */
290532ed618SSoby Mathew 	psci_do_pwrup_cache_maintenance();
291b0408e87SJeenu Viswambharan #endif
292532ed618SSoby Mathew 
293532ed618SSoby Mathew 	/* Re-init the cntfrq_el0 register */
294532ed618SSoby Mathew 	counter_freq = plat_get_syscnt_freq2();
295532ed618SSoby Mathew 	write_cntfrq_el0(counter_freq);
296532ed618SSoby Mathew 
297532ed618SSoby Mathew 	/*
298532ed618SSoby Mathew 	 * Call the cpu suspend finish handler registered by the Secure Payload
299532ed618SSoby Mathew 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
300532ed618SSoby Mathew 	 * error, it's expected to assert within
301532ed618SSoby Mathew 	 */
302c283e05aSEtienne Carriere 	if (psci_spd_pm && psci_spd_pm->svc_suspend_finish) {
303532ed618SSoby Mathew 		max_off_lvl = psci_find_max_off_lvl(state_info);
304532ed618SSoby Mathew 		assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
305532ed618SSoby Mathew 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
306532ed618SSoby Mathew 	}
307532ed618SSoby Mathew 
308532ed618SSoby Mathew 	/* Invalidate the suspend level for the cpu */
309532ed618SSoby Mathew 	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
310532ed618SSoby Mathew 
311532ed618SSoby Mathew 	/*
312532ed618SSoby Mathew 	 * Generic management: Now we just need to retrieve the
313532ed618SSoby Mathew 	 * information that we had stashed away during the suspend
314532ed618SSoby Mathew 	 * call to set this cpu on its way.
315532ed618SSoby Mathew 	 */
316532ed618SSoby Mathew 	cm_prepare_el3_exit(NON_SECURE);
317532ed618SSoby Mathew }
318