xref: /rk3399_ARM-atf/lib/psci/psci_suspend.c (revision 1862d6203cb21d1846388e8d7530612a9b98786e)
1532ed618SSoby Mathew /*
204c1db1eSdp-arm  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3532ed618SSoby Mathew  *
482cb2c1aSdp-arm  * SPDX-License-Identifier: BSD-3-Clause
5532ed618SSoby Mathew  */
6532ed618SSoby Mathew 
7532ed618SSoby Mathew #include <arch.h>
8532ed618SSoby Mathew #include <arch_helpers.h>
92a4b4b71SIsla Mitchell #include <assert.h>
102a4b4b71SIsla Mitchell #include <bl_common.h>
11532ed618SSoby Mathew #include <context.h>
12532ed618SSoby Mathew #include <context_mgmt.h>
13532ed618SSoby Mathew #include <cpu_data.h>
14532ed618SSoby Mathew #include <debug.h>
15532ed618SSoby Mathew #include <platform.h>
16872be88aSdp-arm #include <pmf.h>
17872be88aSdp-arm #include <runtime_instr.h>
18532ed618SSoby Mathew #include <stddef.h>
19532ed618SSoby Mathew #include "psci_private.h"
20532ed618SSoby Mathew 
21532ed618SSoby Mathew /*******************************************************************************
22532ed618SSoby Mathew  * This function does generic and platform specific operations after a wake-up
23532ed618SSoby Mathew  * from standby/retention states at multiple power levels.
24532ed618SSoby Mathew  ******************************************************************************/
25532ed618SSoby Mathew static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
26532ed618SSoby Mathew 					     unsigned int end_pwrlvl)
27532ed618SSoby Mathew {
2861eae524SAchin Gupta 	psci_power_state_t state_info;
2961eae524SAchin Gupta 
30532ed618SSoby Mathew 	psci_acquire_pwr_domain_locks(end_pwrlvl,
31532ed618SSoby Mathew 				cpu_idx);
32532ed618SSoby Mathew 
33532ed618SSoby Mathew 	/*
3461eae524SAchin Gupta 	 * Find out which retention states this CPU has exited from until the
3561eae524SAchin Gupta 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
3661eae524SAchin Gupta 	 * state as a result of state coordination amongst other CPUs post wfi.
3761eae524SAchin Gupta 	 */
3861eae524SAchin Gupta 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
3961eae524SAchin Gupta 
4061eae524SAchin Gupta 	/*
41532ed618SSoby Mathew 	 * Plat. management: Allow the platform to do operations
42532ed618SSoby Mathew 	 * on waking up from retention.
43532ed618SSoby Mathew 	 */
4461eae524SAchin Gupta 	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
45532ed618SSoby Mathew 
46532ed618SSoby Mathew 	/*
47532ed618SSoby Mathew 	 * Set the requested and target state of this CPU and all the higher
48532ed618SSoby Mathew 	 * power domain levels for this CPU to run.
49532ed618SSoby Mathew 	 */
50532ed618SSoby Mathew 	psci_set_pwr_domains_to_run(end_pwrlvl);
51532ed618SSoby Mathew 
52532ed618SSoby Mathew 	psci_release_pwr_domain_locks(end_pwrlvl,
53532ed618SSoby Mathew 				cpu_idx);
54532ed618SSoby Mathew }
55532ed618SSoby Mathew 
56532ed618SSoby Mathew /*******************************************************************************
57532ed618SSoby Mathew  * This function does generic and platform specific suspend to power down
58532ed618SSoby Mathew  * operations.
59532ed618SSoby Mathew  ******************************************************************************/
60532ed618SSoby Mathew static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
61532ed618SSoby Mathew 					  entry_point_info_t *ep,
62532ed618SSoby Mathew 					  psci_power_state_t *state_info)
63532ed618SSoby Mathew {
64532ed618SSoby Mathew 	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
65532ed618SSoby Mathew 
66532ed618SSoby Mathew 	/* Save PSCI target power level for the suspend finisher handler */
67532ed618SSoby Mathew 	psci_set_suspend_pwrlvl(end_pwrlvl);
68532ed618SSoby Mathew 
69532ed618SSoby Mathew 	/*
70a10d3632SJeenu Viswambharan 	 * Flush the target power level as it might be accessed on power up with
71532ed618SSoby Mathew 	 * Data cache disabled.
72532ed618SSoby Mathew 	 */
73a10d3632SJeenu Viswambharan 	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
74532ed618SSoby Mathew 
75532ed618SSoby Mathew 	/*
76532ed618SSoby Mathew 	 * Call the cpu suspend handler registered by the Secure Payload
77532ed618SSoby Mathew 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
78532ed618SSoby Mathew 	 * error, it's expected to assert within
79532ed618SSoby Mathew 	 */
80532ed618SSoby Mathew 	if (psci_spd_pm && psci_spd_pm->svc_suspend)
81532ed618SSoby Mathew 		psci_spd_pm->svc_suspend(max_off_lvl);
82532ed618SSoby Mathew 
83*1862d620SVarun Wadekar #if !HW_ASSISTED_COHERENCY
84*1862d620SVarun Wadekar 	/*
85*1862d620SVarun Wadekar 	 * Plat. management: Allow the platform to perform any early
86*1862d620SVarun Wadekar 	 * actions required to power down the CPU. This might be useful for
87*1862d620SVarun Wadekar 	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
88*1862d620SVarun Wadekar 	 * actions with data caches enabled.
89*1862d620SVarun Wadekar 	 */
90*1862d620SVarun Wadekar 	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early)
91*1862d620SVarun Wadekar 		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
92*1862d620SVarun Wadekar #endif
93*1862d620SVarun Wadekar 
94532ed618SSoby Mathew 	/*
95532ed618SSoby Mathew 	 * Store the re-entry information for the non-secure world.
96532ed618SSoby Mathew 	 */
97532ed618SSoby Mathew 	cm_init_my_context(ep);
98532ed618SSoby Mathew 
997941816aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
1007941816aSdp-arm 
1017941816aSdp-arm 	/*
1027941816aSdp-arm 	 * Flush cache line so that even if CPU power down happens
1037941816aSdp-arm 	 * the timestamp update is reflected in memory.
1047941816aSdp-arm 	 */
1057941816aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1067941816aSdp-arm 		RT_INSTR_ENTER_CFLUSH,
1077941816aSdp-arm 		PMF_CACHE_MAINT);
1087941816aSdp-arm #endif
1097941816aSdp-arm 
110532ed618SSoby Mathew 	/*
111b0408e87SJeenu Viswambharan 	 * Arch. management. Initiate power down sequence.
112532ed618SSoby Mathew 	 * TODO : Introduce a mechanism to query the cache level to flush
113532ed618SSoby Mathew 	 * and the cpu-ops power down to perform from the platform.
114532ed618SSoby Mathew 	 */
115b0408e87SJeenu Viswambharan 	psci_do_pwrdown_sequence(max_off_lvl);
1167941816aSdp-arm 
1177941816aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
1187941816aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1197941816aSdp-arm 		RT_INSTR_EXIT_CFLUSH,
1207941816aSdp-arm 		PMF_NO_CACHE_MAINT);
1217941816aSdp-arm #endif
122532ed618SSoby Mathew }
123532ed618SSoby Mathew 
124532ed618SSoby Mathew /*******************************************************************************
125532ed618SSoby Mathew  * Top level handler which is called when a cpu wants to suspend its execution.
126532ed618SSoby Mathew  * It is assumed that along with suspending the cpu power domain, power domains
127532ed618SSoby Mathew  * at higher levels until the target power level will be suspended as well. It
128532ed618SSoby Mathew  * coordinates with the platform to negotiate the target state for each of
129532ed618SSoby Mathew  * the power domain level till the target power domain level. It then performs
130532ed618SSoby Mathew  * generic, architectural, platform setup and state management required to
131532ed618SSoby Mathew  * suspend that power domain level and power domain levels below it.
132532ed618SSoby Mathew  * e.g. For a cpu that's to be suspended, it could mean programming the
133532ed618SSoby Mathew  * power controller whereas for a cluster that's to be suspended, it will call
134532ed618SSoby Mathew  * the platform specific code which will disable coherency at the interconnect
135532ed618SSoby Mathew  * level if the cpu is the last in the cluster and also the program the power
136532ed618SSoby Mathew  * controller.
137532ed618SSoby Mathew  *
138532ed618SSoby Mathew  * All the required parameter checks are performed at the beginning and after
139532ed618SSoby Mathew  * the state transition has been done, no further error is expected and it is
140532ed618SSoby Mathew  * not possible to undo any of the actions taken beyond that point.
141532ed618SSoby Mathew  ******************************************************************************/
142532ed618SSoby Mathew void psci_cpu_suspend_start(entry_point_info_t *ep,
143532ed618SSoby Mathew 			    unsigned int end_pwrlvl,
144532ed618SSoby Mathew 			    psci_power_state_t *state_info,
145532ed618SSoby Mathew 			    unsigned int is_power_down_state)
146532ed618SSoby Mathew {
147532ed618SSoby Mathew 	int skip_wfi = 0;
148532ed618SSoby Mathew 	unsigned int idx = plat_my_core_pos();
149532ed618SSoby Mathew 
150532ed618SSoby Mathew 	/*
151532ed618SSoby Mathew 	 * This function must only be called on platforms where the
152532ed618SSoby Mathew 	 * CPU_SUSPEND platform hooks have been implemented.
153532ed618SSoby Mathew 	 */
154532ed618SSoby Mathew 	assert(psci_plat_pm_ops->pwr_domain_suspend &&
155532ed618SSoby Mathew 			psci_plat_pm_ops->pwr_domain_suspend_finish);
156532ed618SSoby Mathew 
157532ed618SSoby Mathew 	/*
158532ed618SSoby Mathew 	 * This function acquires the lock corresponding to each power
159532ed618SSoby Mathew 	 * level so that by the time all locks are taken, the system topology
160532ed618SSoby Mathew 	 * is snapshot and state management can be done safely.
161532ed618SSoby Mathew 	 */
162532ed618SSoby Mathew 	psci_acquire_pwr_domain_locks(end_pwrlvl,
163532ed618SSoby Mathew 				      idx);
164532ed618SSoby Mathew 
165532ed618SSoby Mathew 	/*
166532ed618SSoby Mathew 	 * We check if there are any pending interrupts after the delay
167532ed618SSoby Mathew 	 * introduced by lock contention to increase the chances of early
168532ed618SSoby Mathew 	 * detection that a wake-up interrupt has fired.
169532ed618SSoby Mathew 	 */
170532ed618SSoby Mathew 	if (read_isr_el1()) {
171532ed618SSoby Mathew 		skip_wfi = 1;
172532ed618SSoby Mathew 		goto exit;
173532ed618SSoby Mathew 	}
174532ed618SSoby Mathew 
175532ed618SSoby Mathew 	/*
176532ed618SSoby Mathew 	 * This function is passed the requested state info and
177532ed618SSoby Mathew 	 * it returns the negotiated state info for each power level upto
178532ed618SSoby Mathew 	 * the end level specified.
179532ed618SSoby Mathew 	 */
180532ed618SSoby Mathew 	psci_do_state_coordination(end_pwrlvl, state_info);
181532ed618SSoby Mathew 
182532ed618SSoby Mathew #if ENABLE_PSCI_STAT
183532ed618SSoby Mathew 	/* Update the last cpu for each level till end_pwrlvl */
184532ed618SSoby Mathew 	psci_stats_update_pwr_down(end_pwrlvl, state_info);
185532ed618SSoby Mathew #endif
186532ed618SSoby Mathew 
187532ed618SSoby Mathew 	if (is_power_down_state)
188532ed618SSoby Mathew 		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
189532ed618SSoby Mathew 
190532ed618SSoby Mathew 	/*
191532ed618SSoby Mathew 	 * Plat. management: Allow the platform to perform the
192532ed618SSoby Mathew 	 * necessary actions to turn off this cpu e.g. set the
193532ed618SSoby Mathew 	 * platform defined mailbox with the psci entrypoint,
194532ed618SSoby Mathew 	 * program the power controller etc.
195532ed618SSoby Mathew 	 */
196532ed618SSoby Mathew 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
197532ed618SSoby Mathew 
198532ed618SSoby Mathew #if ENABLE_PSCI_STAT
19904c1db1eSdp-arm 	plat_psci_stat_accounting_start(state_info);
200532ed618SSoby Mathew #endif
201532ed618SSoby Mathew 
202532ed618SSoby Mathew exit:
203532ed618SSoby Mathew 	/*
204532ed618SSoby Mathew 	 * Release the locks corresponding to each power level in the
205532ed618SSoby Mathew 	 * reverse order to which they were acquired.
206532ed618SSoby Mathew 	 */
207532ed618SSoby Mathew 	psci_release_pwr_domain_locks(end_pwrlvl,
208532ed618SSoby Mathew 				  idx);
209532ed618SSoby Mathew 	if (skip_wfi)
210532ed618SSoby Mathew 		return;
211532ed618SSoby Mathew 
212532ed618SSoby Mathew 	if (is_power_down_state) {
213872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
214872be88aSdp-arm 
215872be88aSdp-arm 		/*
216872be88aSdp-arm 		 * Update the timestamp with cache off.  We assume this
217872be88aSdp-arm 		 * timestamp can only be read from the current CPU and the
218872be88aSdp-arm 		 * timestamp cache line will be flushed before return to
219872be88aSdp-arm 		 * normal world on wakeup.
220872be88aSdp-arm 		 */
221872be88aSdp-arm 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
222872be88aSdp-arm 		    RT_INSTR_ENTER_HW_LOW_PWR,
223872be88aSdp-arm 		    PMF_NO_CACHE_MAINT);
224872be88aSdp-arm #endif
225872be88aSdp-arm 
226532ed618SSoby Mathew 		/* The function calls below must not return */
227532ed618SSoby Mathew 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
228532ed618SSoby Mathew 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
229532ed618SSoby Mathew 		else
230532ed618SSoby Mathew 			psci_power_down_wfi();
231532ed618SSoby Mathew 	}
232532ed618SSoby Mathew 
233872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
234872be88aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
235872be88aSdp-arm 	    RT_INSTR_ENTER_HW_LOW_PWR,
236872be88aSdp-arm 	    PMF_NO_CACHE_MAINT);
237872be88aSdp-arm #endif
238872be88aSdp-arm 
239e5bbd16aSdp-arm #if ENABLE_PSCI_STAT
240e5bbd16aSdp-arm 	plat_psci_stat_accounting_start(state_info);
241e5bbd16aSdp-arm #endif
242e5bbd16aSdp-arm 
243532ed618SSoby Mathew 	/*
244532ed618SSoby Mathew 	 * We will reach here if only retention/standby states have been
245532ed618SSoby Mathew 	 * requested at multiple power levels. This means that the cpu
246532ed618SSoby Mathew 	 * context will be preserved.
247532ed618SSoby Mathew 	 */
248532ed618SSoby Mathew 	wfi();
249532ed618SSoby Mathew 
250e5bbd16aSdp-arm #if ENABLE_PSCI_STAT
251e5bbd16aSdp-arm 	plat_psci_stat_accounting_stop(state_info);
252e5bbd16aSdp-arm 	psci_stats_update_pwr_up(end_pwrlvl, state_info);
253e5bbd16aSdp-arm #endif
254e5bbd16aSdp-arm 
255872be88aSdp-arm #if ENABLE_RUNTIME_INSTRUMENTATION
256872be88aSdp-arm 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
257872be88aSdp-arm 	    RT_INSTR_EXIT_HW_LOW_PWR,
258872be88aSdp-arm 	    PMF_NO_CACHE_MAINT);
259872be88aSdp-arm #endif
260872be88aSdp-arm 
261532ed618SSoby Mathew 	/*
262532ed618SSoby Mathew 	 * After we wake up from context retaining suspend, call the
263532ed618SSoby Mathew 	 * context retaining suspend finisher.
264532ed618SSoby Mathew 	 */
26561eae524SAchin Gupta 	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
266532ed618SSoby Mathew }
267532ed618SSoby Mathew 
268532ed618SSoby Mathew /*******************************************************************************
269532ed618SSoby Mathew  * The following functions finish an earlier suspend request. They
270532ed618SSoby Mathew  * are called by the common finisher routine in psci_common.c. The `state_info`
271532ed618SSoby Mathew  * is the psci_power_state from which this CPU has woken up from.
272532ed618SSoby Mathew  ******************************************************************************/
273532ed618SSoby Mathew void psci_cpu_suspend_finish(unsigned int cpu_idx,
274532ed618SSoby Mathew 			     psci_power_state_t *state_info)
275532ed618SSoby Mathew {
276532ed618SSoby Mathew 	unsigned int counter_freq;
277532ed618SSoby Mathew 	unsigned int max_off_lvl;
278532ed618SSoby Mathew 
279532ed618SSoby Mathew 	/* Ensure we have been woken up from a suspended state */
280532ed618SSoby Mathew 	assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
281532ed618SSoby Mathew 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
282532ed618SSoby Mathew 
283532ed618SSoby Mathew 	/*
284532ed618SSoby Mathew 	 * Plat. management: Perform the platform specific actions
285532ed618SSoby Mathew 	 * before we change the state of the cpu e.g. enabling the
286532ed618SSoby Mathew 	 * gic or zeroing the mailbox register. If anything goes
287532ed618SSoby Mathew 	 * wrong then assert as there is no way to recover from this
288532ed618SSoby Mathew 	 * situation.
289532ed618SSoby Mathew 	 */
290532ed618SSoby Mathew 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
291532ed618SSoby Mathew 
292bcc3c49cSSoby Mathew #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
293b0408e87SJeenu Viswambharan 	/* Arch. management: Enable the data cache, stack memory maintenance. */
294532ed618SSoby Mathew 	psci_do_pwrup_cache_maintenance();
295b0408e87SJeenu Viswambharan #endif
296532ed618SSoby Mathew 
297532ed618SSoby Mathew 	/* Re-init the cntfrq_el0 register */
298532ed618SSoby Mathew 	counter_freq = plat_get_syscnt_freq2();
299532ed618SSoby Mathew 	write_cntfrq_el0(counter_freq);
300532ed618SSoby Mathew 
301532ed618SSoby Mathew 	/*
302532ed618SSoby Mathew 	 * Call the cpu suspend finish handler registered by the Secure Payload
303532ed618SSoby Mathew 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
304532ed618SSoby Mathew 	 * error, it's expected to assert within
305532ed618SSoby Mathew 	 */
306c283e05aSEtienne Carriere 	if (psci_spd_pm && psci_spd_pm->svc_suspend_finish) {
307532ed618SSoby Mathew 		max_off_lvl = psci_find_max_off_lvl(state_info);
308532ed618SSoby Mathew 		assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
309532ed618SSoby Mathew 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
310532ed618SSoby Mathew 	}
311532ed618SSoby Mathew 
312532ed618SSoby Mathew 	/* Invalidate the suspend level for the cpu */
313532ed618SSoby Mathew 	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
314532ed618SSoby Mathew 
315532ed618SSoby Mathew 	/*
316532ed618SSoby Mathew 	 * Generic management: Now we just need to retrieve the
317532ed618SSoby Mathew 	 * information that we had stashed away during the suspend
318532ed618SSoby Mathew 	 * call to set this cpu on its way.
319532ed618SSoby Mathew 	 */
320532ed618SSoby Mathew 	cm_prepare_el3_exit(NON_SECURE);
321532ed618SSoby Mathew }
322