xref: /rk3399_ARM-atf/lib/psci/psci_suspend.c (revision ed0c801fc69f55103c597dcc29cadf4c7cb7d575)
1 /*
2  * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <stddef.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <lib/el3_runtime/context_mgmt.h>
16 #include <lib/el3_runtime/cpu_data.h>
17 #include <lib/el3_runtime/pubsub_events.h>
18 #include <lib/pmf/pmf.h>
19 #include <lib/runtime_instr.h>
20 #include <plat/common/platform.h>
21 
22 #include "psci_private.h"
23 
24 /*******************************************************************************
25  * This function does generic and platform specific operations after a wake-up
26  * from standby/retention states at multiple power levels.
27  ******************************************************************************/
28 static void psci_cpu_suspend_to_standby_finish(unsigned int end_pwrlvl,
29 					     psci_power_state_t *state_info)
30 {
31 	/*
32 	 * Plat. management: Allow the platform to do operations
33 	 * on waking up from retention.
34 	 */
35 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
36 
37 	/* This loses its meaning when not suspending, reset so it's correct for OFF */
38 	psci_set_suspend_pwrlvl(PLAT_MAX_PWR_LVL);
39 }
40 
41 /*******************************************************************************
42  * This function does generic and platform specific suspend to power down
43  * operations.
44  ******************************************************************************/
45 static void psci_suspend_to_pwrdown_start(unsigned int idx,
46 					  unsigned int end_pwrlvl,
47 					  unsigned int max_off_lvl,
48 					  const entry_point_info_t *ep,
49 					  const psci_power_state_t *state_info)
50 {
51 	PUBLISH_EVENT_ARG(psci_suspend_pwrdown_start, &idx);
52 
53 #if PSCI_OS_INIT_MODE
54 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
55 	end_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
56 #else
57 	end_pwrlvl = PLAT_MAX_PWR_LVL;
58 #endif
59 #endif
60 
61 	/* Save PSCI target power level for the suspend finisher handler */
62 	psci_set_suspend_pwrlvl(end_pwrlvl);
63 
64 	/*
65 	 * Flush the target power level as it might be accessed on power up with
66 	 * Data cache disabled.
67 	 */
68 	psci_flush_cpu_data(psci_svc_cpu_data.target_pwrlvl);
69 
70 	/*
71 	 * Call the cpu suspend handler registered by the Secure Payload
72 	 * Dispatcher to let it do any book-keeping. If the handler encounters an
73 	 * error, it's expected to assert within
74 	 */
75 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend != NULL))
76 		psci_spd_pm->svc_suspend(max_off_lvl);
77 
78 #if !HW_ASSISTED_COHERENCY
79 	/*
80 	 * Plat. management: Allow the platform to perform any early
81 	 * actions required to power down the CPU. This might be useful for
82 	 * HW_ASSISTED_COHERENCY = 0 platforms that can safely perform these
83 	 * actions with data caches enabled.
84 	 */
85 	if (psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early != NULL)
86 		psci_plat_pm_ops->pwr_domain_suspend_pwrdown_early(state_info);
87 #endif
88 
89 	/*
90 	 * Store the re-entry information for the non-secure world.
91 	 */
92 	cm_init_my_context(ep);
93 
94 	/*
95 	 * Arch. management. Initiate power down sequence.
96 	 */
97 	psci_pwrdown_cpu_start(max_off_lvl);
98 }
99 
100 /*******************************************************************************
101  * Top level handler which is called when a cpu wants to suspend its execution.
102  * It is assumed that along with suspending the cpu power domain, power domains
103  * at higher levels until the target power level will be suspended as well. It
104  * coordinates with the platform to negotiate the target state for each of
105  * the power domain level till the target power domain level. It then performs
106  * generic, architectural, platform setup and state management required to
107  * suspend that power domain level and power domain levels below it.
108  * e.g. For a cpu that's to be suspended, it could mean programming the
109  * power controller whereas for a cluster that's to be suspended, it will call
110  * the platform specific code which will disable coherency at the interconnect
111  * level if the cpu is the last in the cluster and also the program the power
112  * controller.
113  *
114  * All the required parameter checks are performed at the beginning and after
115  * the state transition has been done, no further error is expected and it is
116  * not possible to undo any of the actions taken beyond that point.
117  ******************************************************************************/
118 int psci_cpu_suspend_start(unsigned int idx,
119 			   const entry_point_info_t *ep,
120 			   unsigned int end_pwrlvl,
121 			   psci_power_state_t *state_info,
122 			   unsigned int is_power_down_state)
123 {
124 	int rc = PSCI_E_SUCCESS;
125 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
126 	unsigned int max_off_lvl = 0;
127 #if FEAT_PABANDON
128 	cpu_context_t *ctx = cm_get_context(NON_SECURE);
129 	cpu_context_t old_ctx;
130 #endif
131 
132 	/*
133 	 * This function must only be called on platforms where the
134 	 * CPU_SUSPEND platform hooks have been implemented.
135 	 */
136 	assert((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
137 	       (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL));
138 
139 	/* Get the parent nodes */
140 	psci_get_parent_pwr_domain_nodes(idx, end_pwrlvl, parent_nodes);
141 
142 	/*
143 	 * This function acquires the lock corresponding to each power
144 	 * level so that by the time all locks are taken, the system topology
145 	 * is snapshot and state management can be done safely.
146 	 */
147 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
148 
149 	/*
150 	 * We check if there are any pending interrupts after the delay
151 	 * introduced by lock contention to increase the chances of early
152 	 * detection that a wake-up interrupt has fired.
153 	 */
154 	if (read_isr_el1() != 0U) {
155 		goto exit;
156 	}
157 
158 #if PSCI_OS_INIT_MODE
159 	if (psci_suspend_mode == OS_INIT) {
160 		/*
161 		 * This function validates the requested state info for
162 		 * OS-initiated mode.
163 		 */
164 		rc = psci_validate_state_coordination(idx, end_pwrlvl, state_info);
165 		if (rc != PSCI_E_SUCCESS) {
166 			goto exit;
167 		}
168 	} else {
169 #endif
170 		/*
171 		 * This function is passed the requested state info and
172 		 * it returns the negotiated state info for each power level upto
173 		 * the end level specified.
174 		 */
175 		psci_do_state_coordination(idx, end_pwrlvl, state_info);
176 #if PSCI_OS_INIT_MODE
177 	}
178 #endif
179 
180 #if PSCI_OS_INIT_MODE
181 	if (psci_plat_pm_ops->pwr_domain_validate_suspend != NULL) {
182 		rc = psci_plat_pm_ops->pwr_domain_validate_suspend(state_info);
183 		if (rc != PSCI_E_SUCCESS) {
184 			goto exit;
185 		}
186 	}
187 #endif
188 
189 	/* Update the target state in the power domain nodes */
190 	psci_set_target_local_pwr_states(idx, end_pwrlvl, state_info);
191 
192 #if ENABLE_PSCI_STAT
193 	/* Update the last cpu for each level till end_pwrlvl */
194 	psci_stats_update_pwr_down(idx, end_pwrlvl, state_info);
195 #endif
196 
197 	if (is_power_down_state != 0U) {
198 		/*
199 		 * WHen CTX_INCLUDE_EL2_REGS is usnet, we're probably runnig
200 		 * with some SPD that assumes the core is going off so it
201 		 * doesn't bother saving NS's context. Do that here until we
202 		 * figure out a way to make this coherent.
203 		 */
204 #if FEAT_PABANDON
205 #if !CTX_INCLUDE_EL2_REGS
206 		cm_el1_sysregs_context_save(NON_SECURE);
207 #endif
208 		/*
209 		 * when the core wakes it expects its context to already be in
210 		 * place so we must overwrite it before powerdown. But if
211 		 * powerdown never happens we want the old context. Save it in
212 		 * case we wake up. EL2/El1 will not be touched by PSCI so don't
213 		 * copy */
214 		memcpy(&ctx->gpregs_ctx, &old_ctx.gpregs_ctx, sizeof(gp_regs_t));
215 		memcpy(&ctx->el3state_ctx, &old_ctx.el3state_ctx, sizeof(el3_state_t));
216 #if DYNAMIC_WORKAROUND_CVE_2018_3639
217 		memcpy(&ctx->cve_2018_3639_ctx, &old_ctx.cve_2018_3639_ctx, sizeof(cve_2018_3639_t));
218 #endif
219 #if ERRATA_SPECULATIVE_AT
220 		memcpy(&ctx->errata_speculative_at_ctx, &old_ctx.errata_speculative_at_ctx, sizeof(errata_speculative_at_t));
221 #endif
222 #if CTX_INCLUDE_PAUTH_REGS
223 		memcpy(&ctx->pauth_ctx, &old_ctx.pauth_ctx, sizeof(pauth_t));
224 #endif
225 #endif
226 		max_off_lvl = psci_find_max_off_lvl(state_info);
227 		psci_suspend_to_pwrdown_start(idx, end_pwrlvl, end_pwrlvl, ep, state_info);
228 	}
229 
230 	/*
231 	 * Plat. management: Allow the platform to perform the
232 	 * necessary actions to turn off this cpu e.g. set the
233 	 * platform defined mailbox with the psci entrypoint,
234 	 * program the power controller etc.
235 	 */
236 
237 	psci_plat_pm_ops->pwr_domain_suspend(state_info);
238 
239 #if ENABLE_PSCI_STAT
240 	plat_psci_stat_accounting_start(state_info);
241 #endif
242 
243 	/*
244 	 * Release the locks corresponding to each power level in the
245 	 * reverse order to which they were acquired.
246 	 */
247 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
248 
249 #if ENABLE_RUNTIME_INSTRUMENTATION
250 	/*
251 	 * Update the timestamp with cache off. We assume this
252 	 * timestamp can only be read from the current CPU and the
253 	 * timestamp cache line will be flushed before return to
254 	 * normal world on wakeup.
255 	 */
256 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
257 	    RT_INSTR_ENTER_HW_LOW_PWR,
258 	    PMF_NO_CACHE_MAINT);
259 #endif
260 
261 	if (is_power_down_state != 0U) {
262 		if (psci_plat_pm_ops->pwr_domain_pwr_down != NULL) {
263 			/* This function may not return */
264 			psci_plat_pm_ops->pwr_domain_pwr_down(state_info);
265 		}
266 
267 		psci_pwrdown_cpu_end_wakeup(max_off_lvl);
268 	} else {
269 		/*
270 		 * We will reach here if only retention/standby states have been
271 		 * requested at multiple power levels. This means that the cpu
272 		 * context will be preserved.
273 		 */
274 		wfi();
275 	}
276 
277 #if ENABLE_RUNTIME_INSTRUMENTATION
278 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
279 	    RT_INSTR_EXIT_HW_LOW_PWR,
280 	    PMF_NO_CACHE_MAINT);
281 #endif
282 
283 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
284 	/*
285 	 * Find out which retention states this CPU has exited from until the
286 	 * 'end_pwrlvl'. The exit retention state could be deeper than the entry
287 	 * state as a result of state coordination amongst other CPUs post wfi.
288 	 */
289 	psci_get_target_local_pwr_states(idx, end_pwrlvl, state_info);
290 
291 #if ENABLE_PSCI_STAT
292 	plat_psci_stat_accounting_stop(state_info);
293 	psci_stats_update_pwr_up(idx, end_pwrlvl, state_info);
294 #endif
295 
296 	/*
297 	 * Waking up means we've retained all context. Call the finishers to put
298 	 * the system back to a usable state.
299 	 */
300 	if (is_power_down_state != 0U) {
301 #if FEAT_PABANDON
302 		psci_cpu_suspend_to_powerdown_finish(idx, max_off_lvl, state_info);
303 
304 		/* we overwrote context ourselves, put it back */
305 		memcpy(&ctx->gpregs_ctx, &old_ctx.gpregs_ctx, sizeof(gp_regs_t));
306 		memcpy(&ctx->el3state_ctx, &old_ctx.el3state_ctx, sizeof(el3_state_t));
307 #if DYNAMIC_WORKAROUND_CVE_2018_3639
308 		memcpy(&ctx->cve_2018_3639_ctx, &old_ctx.cve_2018_3639_ctx, sizeof(cve_2018_3639_t));
309 #endif
310 #if ERRATA_SPECULATIVE_AT
311 		memcpy(&ctx->errata_speculative_at_ctx, &old_ctx.errata_speculative_at_ctx, sizeof(errata_speculative_at_t));
312 #endif
313 #if CTX_INCLUDE_PAUTH_REGS
314 		memcpy(&ctx->pauth_ctx, &old_ctx.pauth_ctx, sizeof(pauth_t));
315 #endif
316 #if !CTX_INCLUDE_EL2_REGS
317 		cm_el1_sysregs_context_restore(NON_SECURE);
318 #endif
319 #endif
320 	} else {
321 		psci_cpu_suspend_to_standby_finish(end_pwrlvl, state_info);
322 	}
323 
324 	/*
325 	 * Set the requested and target state of this CPU and all the higher
326 	 * power domain levels for this CPU to run.
327 	 */
328 	psci_set_pwr_domains_to_run(idx, end_pwrlvl);
329 
330 exit:
331 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
332 
333 	return rc;
334 }
335 
336 /*******************************************************************************
337  * The following functions finish an earlier suspend request. They
338  * are called by the common finisher routine in psci_common.c. The `state_info`
339  * is the psci_power_state from which this CPU has woken up from.
340  ******************************************************************************/
341 void psci_cpu_suspend_to_powerdown_finish(unsigned int cpu_idx, unsigned int max_off_lvl, const psci_power_state_t *state_info)
342 {
343 	unsigned int counter_freq;
344 
345 	/* Ensure we have been woken up from a suspended state */
346 	assert((psci_get_aff_info_state() == AFF_STATE_ON) &&
347 		(is_local_state_off(
348 			state_info->pwr_domain_state[PSCI_CPU_PWR_LVL]) != 0));
349 
350 	/*
351 	 * Plat. management: Perform the platform specific actions
352 	 * before we change the state of the cpu e.g. enabling the
353 	 * gic or zeroing the mailbox register. If anything goes
354 	 * wrong then assert as there is no way to recover from this
355 	 * situation.
356 	 */
357 	psci_plat_pm_ops->pwr_domain_suspend_finish(state_info);
358 
359 #if !(HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
360 	/* Arch. management: Enable the data cache, stack memory maintenance. */
361 	psci_do_pwrup_cache_maintenance();
362 #endif
363 
364 	/* Re-init the cntfrq_el0 register */
365 	counter_freq = plat_get_syscnt_freq2();
366 	write_cntfrq_el0(counter_freq);
367 
368 #if ENABLE_PAUTH
369 	/* Store APIAKey_EL1 key */
370 	set_cpu_data(apiakey[0], read_apiakeylo_el1());
371 	set_cpu_data(apiakey[1], read_apiakeyhi_el1());
372 #endif /* ENABLE_PAUTH */
373 
374 	/*
375 	 * Call the cpu suspend finish handler registered by the Secure Payload
376 	 * Dispatcher to let it do any bookeeping. If the handler encounters an
377 	 * error, it's expected to assert within
378 	 */
379 	if ((psci_spd_pm != NULL) && (psci_spd_pm->svc_suspend_finish != NULL)) {
380 		psci_spd_pm->svc_suspend_finish(max_off_lvl);
381 	}
382 
383 	/* This loses its meaning when not suspending, reset so it's correct for OFF */
384 	psci_set_suspend_pwrlvl(PLAT_MAX_PWR_LVL);
385 
386 	PUBLISH_EVENT_ARG(psci_suspend_pwrdown_finish, &cpu_idx);
387 }
388