xref: /rk3399_ARM-atf/lib/psci/psci_suspend.c (revision 66b4542a5fa465edda55a4a7862ed1be7b99b02e)
1 /*
2  * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <assert.h>
32 #include <bl_common.h>
33 #include <arch.h>
34 #include <arch_helpers.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <cpu_data.h>
38 #include <debug.h>
39 #include <platform.h>
40 #include <pmf.h>
41 #include <runtime_instr.h>
42 #include <stddef.h>
43 #include "psci_private.h"
44 
45 /*******************************************************************************
46  * This function does generic and platform specific operations after a wake-up
47  * from standby/retention states at multiple power levels.
48  ******************************************************************************/
49 static void psci_suspend_to_standby_finisher(unsigned int cpu_idx,
50 					     unsigned int end_pwrlvl)
51 {
52 	psci_power_state_t state_info;
53 
54 	psci_acquire_pwr_domain_locks(end_pwrlvl,
55 				cpu_idx);
56 
57 	/*
58 	 * Find out which retention states this CPU has exited from until the
59 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
60 	 * state as a result of state coordination amongst other CPUs post wfi.
61 	 */
62 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
63 
64 	/*
65 	 * Plat. management: Allow the platform to do operations
66 	 * on waking up from retention.
67 	 */
68 	psci_plat_pm_ops->pwr_domain_suspend_finish(&state_info);
69 
70 	/*
71 	 * Set the requested and target state of this CPU and all the higher
72 	 * power domain levels for this CPU to run.
73 	 */
74 	psci_set_pwr_domains_to_run(end_pwrlvl);
75 
76 	psci_release_pwr_domain_locks(end_pwrlvl,
77 				cpu_idx);
78 }
79 
80 /*******************************************************************************
81  * This function does generic and platform specific suspend to power down
82  * operations.
83  ******************************************************************************/
84 static void psci_suspend_to_pwrdown_start(unsigned int end_pwrlvl,
85 					  entry_point_info_t *ep,
86 					  psci_power_state_t *state_info)
87 {
88 	unsigned int max_off_lvl = psci_find_max_off_lvl(state_info);
89 
90 	/* Save PSCI target power level for the suspend finisher handler */
91 	psci_set_suspend_pwrlvl(end_pwrlvl);
92 
93 	/*
94 	 * Flush the target power level as it will be accessed on power up with
95 	 * Data cache disabled.
96 	 */
97 	flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
98 
99 	/*
100 	 * Call the cpu suspend handler registered by the Secure Payload
101 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
102 	 * error, it's expected to assert within
103 	 */
104 	if (psci_spd_pm && psci_spd_pm->svc_suspend)
105 		psci_spd_pm->svc_suspend(max_off_lvl);
106 
107 	/*
108 	 * Store the re-entry information for the non-secure world.
109 	 */
110 	cm_init_my_context(ep);
111 
112 	/*
113 	 * Arch. management. Perform the necessary steps to flush all
114 	 * cpu caches. Currently we assume that the power level correspond
115 	 * the cache level.
116 	 * TODO : Introduce a mechanism to query the cache level to flush
117 	 * and the cpu-ops power down to perform from the platform.
118 	 */
119 	psci_do_pwrdown_cache_maintenance(max_off_lvl);
120 }
121 
122 /*******************************************************************************
123  * Top level handler which is called when a cpu wants to suspend its execution.
124  * It is assumed that along with suspending the cpu power domain, power domains
125  * at higher levels until the target power level will be suspended as well. It
126  * coordinates with the platform to negotiate the target state for each of
127  * the power domain level till the target power domain level. It then performs
128  * generic, architectural, platform setup and state management required to
129  * suspend that power domain level and power domain levels below it.
130  * e.g. For a cpu that's to be suspended, it could mean programming the
131  * power controller whereas for a cluster that's to be suspended, it will call
132  * the platform specific code which will disable coherency at the interconnect
133  * level if the cpu is the last in the cluster and also the program the power
134  * controller.
135  *
136  * All the required parameter checks are performed at the beginning and after
137  * the state transition has been done, no further error is expected and it is
138  * not possible to undo any of the actions taken beyond that point.
139  ******************************************************************************/
140 void psci_cpu_suspend_start(entry_point_info_t *ep,
141 			    unsigned int end_pwrlvl,
142 			    psci_power_state_t *state_info,
143 			    unsigned int is_power_down_state)
144 {
145 	int skip_wfi = 0;
146 	unsigned int idx = plat_my_core_pos();
147 
148 	/*
149 	 * This function must only be called on platforms where the
150 	 * CPU_SUSPEND platform hooks have been implemented.
151 	 */
152 	assert(psci_plat_pm_ops->pwr_domain_suspend &&
153 			psci_plat_pm_ops->pwr_domain_suspend_finish);
154 
155 	/*
156 	 * This function acquires the lock corresponding to each power
157 	 * level so that by the time all locks are taken, the system topology
158 	 * is snapshot and state management can be done safely.
159 	 */
160 	psci_acquire_pwr_domain_locks(end_pwrlvl,
161 				      idx);
162 
163 	/*
164 	 * We check if there are any pending interrupts after the delay
165 	 * introduced by lock contention to increase the chances of early
166 	 * detection that a wake-up interrupt has fired.
167 	 */
168 	if (read_isr_el1()) {
169 		skip_wfi = 1;
170 		goto exit;
171 	}
172 
173 	/*
174 	 * This function is passed the requested state info and
175 	 * it returns the negotiated state info for each power level upto
176 	 * the end level specified.
177 	 */
178 	psci_do_state_coordination(end_pwrlvl, state_info);
179 
180 #if ENABLE_PSCI_STAT
181 	/* Update the last cpu for each level till end_pwrlvl */
182 	psci_stats_update_pwr_down(end_pwrlvl, state_info);
183 #endif
184 
185 	if (is_power_down_state)
186 		psci_suspend_to_pwrdown_start(end_pwrlvl, ep, state_info);
187 
188 	/*
189 	 * Plat. management: Allow the platform to perform the
190 	 * necessary actions to turn off this cpu e.g. set the
191 	 * platform defined mailbox with the psci entrypoint,
192 	 * program the power controller etc.
193 	 */
194 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
195 
196 #if ENABLE_PSCI_STAT
197 	/*
198 	 * Capture time-stamp while entering low power state.
199 	 * No cache maintenance needed because caches are off
200 	 * and writes are direct to main memory.
201 	 */
202 	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
203 		PMF_NO_CACHE_MAINT);
204 #endif
205 
206 exit:
207 	/*
208 	 * Release the locks corresponding to each power level in the
209 	 * reverse order to which they were acquired.
210 	 */
211 	psci_release_pwr_domain_locks(end_pwrlvl,
212 				  idx);
213 	if (skip_wfi)
214 		return;
215 
216 	if (is_power_down_state) {
217 #if ENABLE_RUNTIME_INSTRUMENTATION
218 
219 		/*
220 		 * Update the timestamp with cache off.  We assume this
221 		 * timestamp can only be read from the current CPU and the
222 		 * timestamp cache line will be flushed before return to
223 		 * normal world on wakeup.
224 		 */
225 		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
226 		    RT_INSTR_ENTER_HW_LOW_PWR,
227 		    PMF_NO_CACHE_MAINT);
228 #endif
229 
230 		/* The function calls below must not return */
231 		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi)
232 			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(state_info);
233 		else
234 			psci_power_down_wfi();
235 	}
236 
237 #if ENABLE_RUNTIME_INSTRUMENTATION
238 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
239 	    RT_INSTR_ENTER_HW_LOW_PWR,
240 	    PMF_NO_CACHE_MAINT);
241 #endif
242 
243 	/*
244 	 * We will reach here if only retention/standby states have been
245 	 * requested at multiple power levels. This means that the cpu
246 	 * context will be preserved.
247 	 */
248 	wfi();
249 
250 #if ENABLE_RUNTIME_INSTRUMENTATION
251 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
252 	    RT_INSTR_EXIT_HW_LOW_PWR,
253 	    PMF_NO_CACHE_MAINT);
254 #endif
255 
256 	/*
257 	 * After we wake up from context retaining suspend, call the
258 	 * context retaining suspend finisher.
259 	 */
260 	psci_suspend_to_standby_finisher(idx, end_pwrlvl);
261 }
262 
263 /*******************************************************************************
264  * The following functions finish an earlier suspend request. They
265  * are called by the common finisher routine in psci_common.c. The `state_info`
266  * is the psci_power_state from which this CPU has woken up from.
267  ******************************************************************************/
268 void psci_cpu_suspend_finish(unsigned int cpu_idx,
269 			     psci_power_state_t *state_info)
270 {
271 	unsigned int counter_freq;
272 	unsigned int max_off_lvl;
273 
274 	/* Ensure we have been woken up from a suspended state */
275 	assert(psci_get_aff_info_state() == AFF_STATE_ON && is_local_state_off(\
276 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]));
277 
278 	/*
279 	 * Plat. management: Perform the platform specific actions
280 	 * before we change the state of the cpu e.g. enabling the
281 	 * gic or zeroing the mailbox register. If anything goes
282 	 * wrong then assert as there is no way to recover from this
283 	 * situation.
284 	 */
285 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
286 
287 	/*
288 	 * Arch. management: Enable the data cache, manage stack memory and
289 	 * restore the stashed EL3 architectural context from the 'cpu_context'
290 	 * structure for this cpu.
291 	 */
292 	psci_do_pwrup_cache_maintenance();
293 
294 	/* Re-init the cntfrq_el0 register */
295 	counter_freq = plat_get_syscnt_freq2();
296 	write_cntfrq_el0(counter_freq);
297 
298 	/*
299 	 * Call the cpu suspend finish handler registered by the Secure Payload
300 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
301 	 * error, it's expected to assert within
302 	 */
303 	if (psci_spd_pm && psci_spd_pm->svc_suspend) {
304 		max_off_lvl = psci_find_max_off_lvl(state_info);
305 		assert (max_off_lvl != PSCI_INVALID_PWR_LVL);
306 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
307 	}
308 
309 	/* Invalidate the suspend level for the cpu */
310 	psci_set_suspend_pwrlvl(PSCI_INVALID_PWR_LVL);
311 
312 	/*
313 	 * Generic management: Now we just need to retrieve the
314 	 * information that we had stashed away during the suspend
315 	 * call to set this cpu on its way.
316 	 */
317 	cm_prepare_el3_exit(NON_SECURE);
318 }
319