xref: /rk3399_ARM-atf/lib/psci/psci_main.c (revision 28d3d614b57730bdf364e49259d3c42599d26145)
1 /*
2  * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <debug.h>
35 #include <platform.h>
36 #include <smcc.h>
37 #include <string.h>
38 #include "psci_private.h"
39 
40 /*******************************************************************************
41  * PSCI frontend api for servicing SMCs. Described in the PSCI spec.
42  ******************************************************************************/
43 int psci_cpu_on(u_register_t target_cpu,
44 		uintptr_t entrypoint,
45 		u_register_t context_id)
46 
47 {
48 	int rc;
49 	entry_point_info_t ep;
50 
51 	/* Determine if the cpu exists of not */
52 	rc = psci_validate_mpidr(target_cpu);
53 	if (rc != PSCI_E_SUCCESS)
54 		return PSCI_E_INVALID_PARAMS;
55 
56 	/* Validate the entry point and get the entry_point_info */
57 	rc = psci_validate_entry_point(&ep, entrypoint, context_id);
58 	if (rc != PSCI_E_SUCCESS)
59 		return rc;
60 
61 	/*
62 	 * To turn this cpu on, specify which power
63 	 * levels need to be turned on
64 	 */
65 	return psci_cpu_on_start(target_cpu, &ep);
66 }
67 
68 unsigned int psci_version(void)
69 {
70 	return PSCI_MAJOR_VER | PSCI_MINOR_VER;
71 }
72 
73 int psci_cpu_suspend(unsigned int power_state,
74 		     uintptr_t entrypoint,
75 		     u_register_t context_id)
76 {
77 	int rc;
78 	unsigned int target_pwrlvl, is_power_down_state;
79 	entry_point_info_t ep;
80 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
81 	plat_local_state_t cpu_pd_state;
82 
83 	/* Validate the power_state parameter */
84 	rc = psci_validate_power_state(power_state, &state_info);
85 	if (rc != PSCI_E_SUCCESS) {
86 		assert(rc == PSCI_E_INVALID_PARAMS);
87 		return rc;
88 	}
89 
90 	/*
91 	 * Get the value of the state type bit from the power state parameter.
92 	 */
93 	is_power_down_state = psci_get_pstate_type(power_state);
94 
95 	/* Sanity check the requested suspend levels */
96 	assert(psci_validate_suspend_req(&state_info, is_power_down_state)
97 			== PSCI_E_SUCCESS);
98 
99 	target_pwrlvl = psci_find_target_suspend_lvl(&state_info);
100 	if (target_pwrlvl == PSCI_INVALID_PWR_LVL) {
101 		ERROR("Invalid target power level for suspend operation\n");
102 		panic();
103 	}
104 
105 	/* Fast path for CPU standby.*/
106 	if (is_cpu_standby_req(is_power_down_state, target_pwrlvl)) {
107 		if  (!psci_plat_pm_ops->cpu_standby)
108 			return PSCI_E_INVALID_PARAMS;
109 
110 		/*
111 		 * Set the state of the CPU power domain to the platform
112 		 * specific retention state and enter the standby state.
113 		 */
114 		cpu_pd_state = state_info.pwr_domain_state[PSCI_CPU_PWR_LVL];
115 		psci_set_cpu_local_state(cpu_pd_state);
116 
117 #if ENABLE_PSCI_STAT
118 		/*
119 		 * Capture time-stamp before CPU standby
120 		 * No cache maintenance is needed as caches
121 		 * are ON through out the CPU standby operation.
122 		 */
123 		PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
124 			PMF_NO_CACHE_MAINT);
125 #endif
126 
127 		psci_plat_pm_ops->cpu_standby(cpu_pd_state);
128 
129 		/* Upon exit from standby, set the state back to RUN. */
130 		psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
131 
132 #if ENABLE_PSCI_STAT
133 		/* Capture time-stamp after CPU standby */
134 		PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
135 			PMF_NO_CACHE_MAINT);
136 
137 		/* Update PSCI stats */
138 		psci_stats_update_pwr_up(PSCI_CPU_PWR_LVL, &state_info,
139 			PMF_NO_CACHE_MAINT);
140 #endif
141 
142 		return PSCI_E_SUCCESS;
143 	}
144 
145 	/*
146 	 * If a power down state has been requested, we need to verify entry
147 	 * point and program entry information.
148 	 */
149 	if (is_power_down_state) {
150 		rc = psci_validate_entry_point(&ep, entrypoint, context_id);
151 		if (rc != PSCI_E_SUCCESS)
152 			return rc;
153 	}
154 
155 	/*
156 	 * Do what is needed to enter the power down state. Upon success,
157 	 * enter the final wfi which will power down this CPU. This function
158 	 * might return if the power down was abandoned for any reason, e.g.
159 	 * arrival of an interrupt
160 	 */
161 	psci_cpu_suspend_start(&ep,
162 			    target_pwrlvl,
163 			    &state_info,
164 			    is_power_down_state);
165 
166 	return PSCI_E_SUCCESS;
167 }
168 
169 
170 int psci_system_suspend(uintptr_t entrypoint, u_register_t context_id)
171 {
172 	int rc;
173 	psci_power_state_t state_info;
174 	entry_point_info_t ep;
175 
176 	/* Check if the current CPU is the last ON CPU in the system */
177 	if (!psci_is_last_on_cpu())
178 		return PSCI_E_DENIED;
179 
180 	/* Validate the entry point and get the entry_point_info */
181 	rc = psci_validate_entry_point(&ep, entrypoint, context_id);
182 	if (rc != PSCI_E_SUCCESS)
183 		return rc;
184 
185 	/* Query the psci_power_state for system suspend */
186 	psci_query_sys_suspend_pwrstate(&state_info);
187 
188 	/* Ensure that the psci_power_state makes sense */
189 	assert(psci_find_target_suspend_lvl(&state_info) == PLAT_MAX_PWR_LVL);
190 	assert(psci_validate_suspend_req(&state_info, PSTATE_TYPE_POWERDOWN)
191 						== PSCI_E_SUCCESS);
192 	assert(is_local_state_off(state_info.pwr_domain_state[PLAT_MAX_PWR_LVL]));
193 
194 	/*
195 	 * Do what is needed to enter the system suspend state. This function
196 	 * might return if the power down was abandoned for any reason, e.g.
197 	 * arrival of an interrupt
198 	 */
199 	psci_cpu_suspend_start(&ep,
200 			    PLAT_MAX_PWR_LVL,
201 			    &state_info,
202 			    PSTATE_TYPE_POWERDOWN);
203 
204 	return PSCI_E_SUCCESS;
205 }
206 
207 int psci_cpu_off(void)
208 {
209 	int rc;
210 	unsigned int target_pwrlvl = PLAT_MAX_PWR_LVL;
211 
212 	/*
213 	 * Do what is needed to power off this CPU and possible higher power
214 	 * levels if it able to do so. Upon success, enter the final wfi
215 	 * which will power down this CPU.
216 	 */
217 	rc = psci_do_cpu_off(target_pwrlvl);
218 
219 	/*
220 	 * The only error cpu_off can return is E_DENIED. So check if that's
221 	 * indeed the case.
222 	 */
223 	assert(rc == PSCI_E_DENIED);
224 
225 	return rc;
226 }
227 
228 int psci_affinity_info(u_register_t target_affinity,
229 		       unsigned int lowest_affinity_level)
230 {
231 	unsigned int target_idx;
232 
233 	/* We dont support level higher than PSCI_CPU_PWR_LVL */
234 	if (lowest_affinity_level > PSCI_CPU_PWR_LVL)
235 		return PSCI_E_INVALID_PARAMS;
236 
237 	/* Calculate the cpu index of the target */
238 	target_idx = plat_core_pos_by_mpidr(target_affinity);
239 	if (target_idx == -1)
240 		return PSCI_E_INVALID_PARAMS;
241 
242 	return psci_get_aff_info_state_by_idx(target_idx);
243 }
244 
245 int psci_migrate(u_register_t target_cpu)
246 {
247 	int rc;
248 	u_register_t resident_cpu_mpidr;
249 
250 	rc = psci_spd_migrate_info(&resident_cpu_mpidr);
251 	if (rc != PSCI_TOS_UP_MIG_CAP)
252 		return (rc == PSCI_TOS_NOT_UP_MIG_CAP) ?
253 			  PSCI_E_DENIED : PSCI_E_NOT_SUPPORTED;
254 
255 	/*
256 	 * Migrate should only be invoked on the CPU where
257 	 * the Secure OS is resident.
258 	 */
259 	if (resident_cpu_mpidr != read_mpidr_el1())
260 		return PSCI_E_NOT_PRESENT;
261 
262 	/* Check the validity of the specified target cpu */
263 	rc = psci_validate_mpidr(target_cpu);
264 	if (rc != PSCI_E_SUCCESS)
265 		return PSCI_E_INVALID_PARAMS;
266 
267 	assert(psci_spd_pm && psci_spd_pm->svc_migrate);
268 
269 	rc = psci_spd_pm->svc_migrate(read_mpidr_el1(), target_cpu);
270 	assert(rc == PSCI_E_SUCCESS || rc == PSCI_E_INTERN_FAIL);
271 
272 	return rc;
273 }
274 
275 int psci_migrate_info_type(void)
276 {
277 	u_register_t resident_cpu_mpidr;
278 
279 	return psci_spd_migrate_info(&resident_cpu_mpidr);
280 }
281 
282 long psci_migrate_info_up_cpu(void)
283 {
284 	u_register_t resident_cpu_mpidr;
285 	int rc;
286 
287 	/*
288 	 * Return value of this depends upon what
289 	 * psci_spd_migrate_info() returns.
290 	 */
291 	rc = psci_spd_migrate_info(&resident_cpu_mpidr);
292 	if (rc != PSCI_TOS_NOT_UP_MIG_CAP && rc != PSCI_TOS_UP_MIG_CAP)
293 		return PSCI_E_INVALID_PARAMS;
294 
295 	return resident_cpu_mpidr;
296 }
297 
298 int psci_node_hw_state(u_register_t target_cpu,
299 		       unsigned int power_level)
300 {
301 	int rc;
302 
303 	/* Validate target_cpu */
304 	rc = psci_validate_mpidr(target_cpu);
305 	if (rc != PSCI_E_SUCCESS)
306 		return PSCI_E_INVALID_PARAMS;
307 
308 	/* Validate power_level against PLAT_MAX_PWR_LVL */
309 	if (power_level > PLAT_MAX_PWR_LVL)
310 		return PSCI_E_INVALID_PARAMS;
311 
312 	/*
313 	 * Dispatch this call to platform to query power controller, and pass on
314 	 * to the caller what it returns
315 	 */
316 	assert(psci_plat_pm_ops->get_node_hw_state);
317 	rc = psci_plat_pm_ops->get_node_hw_state(target_cpu, power_level);
318 	assert((rc >= HW_ON && rc <= HW_STANDBY) || rc == PSCI_E_NOT_SUPPORTED
319 			|| rc == PSCI_E_INVALID_PARAMS);
320 	return rc;
321 }
322 
323 int psci_features(unsigned int psci_fid)
324 {
325 	unsigned int local_caps = psci_caps;
326 
327 	/* Check if it is a 64 bit function */
328 	if (((psci_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_64)
329 		local_caps &= PSCI_CAP_64BIT_MASK;
330 
331 	/* Check for invalid fid */
332 	if (!(is_std_svc_call(psci_fid) && is_valid_fast_smc(psci_fid)
333 			&& is_psci_fid(psci_fid)))
334 		return PSCI_E_NOT_SUPPORTED;
335 
336 
337 	/* Check if the psci fid is supported or not */
338 	if (!(local_caps & define_psci_cap(psci_fid)))
339 		return PSCI_E_NOT_SUPPORTED;
340 
341 	/* Format the feature flags */
342 	if (psci_fid == PSCI_CPU_SUSPEND_AARCH32 ||
343 			psci_fid == PSCI_CPU_SUSPEND_AARCH64) {
344 		/*
345 		 * The trusted firmware does not support OS Initiated Mode.
346 		 */
347 		return (FF_PSTATE << FF_PSTATE_SHIFT) |
348 			((!FF_SUPPORTS_OS_INIT_MODE) << FF_MODE_SUPPORT_SHIFT);
349 	}
350 
351 	/* Return 0 for all other fid's */
352 	return PSCI_E_SUCCESS;
353 }
354 
355 /*******************************************************************************
356  * PSCI top level handler for servicing SMCs.
357  ******************************************************************************/
358 u_register_t psci_smc_handler(uint32_t smc_fid,
359 			  u_register_t x1,
360 			  u_register_t x2,
361 			  u_register_t x3,
362 			  u_register_t x4,
363 			  void *cookie,
364 			  void *handle,
365 			  u_register_t flags)
366 {
367 	if (is_caller_secure(flags))
368 		return SMC_UNK;
369 
370 	/* Check the fid against the capabilities */
371 	if (!(psci_caps & define_psci_cap(smc_fid)))
372 		return SMC_UNK;
373 
374 	if (((smc_fid >> FUNCID_CC_SHIFT) & FUNCID_CC_MASK) == SMC_32) {
375 		/* 32-bit PSCI function, clear top parameter bits */
376 
377 		x1 = (uint32_t)x1;
378 		x2 = (uint32_t)x2;
379 		x3 = (uint32_t)x3;
380 
381 		switch (smc_fid) {
382 		case PSCI_VERSION:
383 			return psci_version();
384 
385 		case PSCI_CPU_OFF:
386 			return psci_cpu_off();
387 
388 		case PSCI_CPU_SUSPEND_AARCH32:
389 			return psci_cpu_suspend(x1, x2, x3);
390 
391 		case PSCI_CPU_ON_AARCH32:
392 			return psci_cpu_on(x1, x2, x3);
393 
394 		case PSCI_AFFINITY_INFO_AARCH32:
395 			return psci_affinity_info(x1, x2);
396 
397 		case PSCI_MIG_AARCH32:
398 			return psci_migrate(x1);
399 
400 		case PSCI_MIG_INFO_TYPE:
401 			return psci_migrate_info_type();
402 
403 		case PSCI_MIG_INFO_UP_CPU_AARCH32:
404 			return psci_migrate_info_up_cpu();
405 
406 		case PSCI_NODE_HW_STATE_AARCH32:
407 			return psci_node_hw_state(x1, x2);
408 
409 		case PSCI_SYSTEM_SUSPEND_AARCH32:
410 			return psci_system_suspend(x1, x2);
411 
412 		case PSCI_SYSTEM_OFF:
413 			psci_system_off();
414 			/* We should never return from psci_system_off() */
415 
416 		case PSCI_SYSTEM_RESET:
417 			psci_system_reset();
418 			/* We should never return from psci_system_reset() */
419 
420 		case PSCI_FEATURES:
421 			return psci_features(x1);
422 
423 #if ENABLE_PSCI_STAT
424 		case PSCI_STAT_RESIDENCY_AARCH32:
425 			return psci_stat_residency(x1, x2);
426 
427 		case PSCI_STAT_COUNT_AARCH32:
428 			return psci_stat_count(x1, x2);
429 #endif
430 
431 		default:
432 			break;
433 		}
434 	} else {
435 		/* 64-bit PSCI function */
436 
437 		switch (smc_fid) {
438 		case PSCI_CPU_SUSPEND_AARCH64:
439 			return psci_cpu_suspend(x1, x2, x3);
440 
441 		case PSCI_CPU_ON_AARCH64:
442 			return psci_cpu_on(x1, x2, x3);
443 
444 		case PSCI_AFFINITY_INFO_AARCH64:
445 			return psci_affinity_info(x1, x2);
446 
447 		case PSCI_MIG_AARCH64:
448 			return psci_migrate(x1);
449 
450 		case PSCI_MIG_INFO_UP_CPU_AARCH64:
451 			return psci_migrate_info_up_cpu();
452 
453 		case PSCI_NODE_HW_STATE_AARCH64:
454 			return psci_node_hw_state(x1, x2);
455 
456 		case PSCI_SYSTEM_SUSPEND_AARCH64:
457 			return psci_system_suspend(x1, x2);
458 
459 #if ENABLE_PSCI_STAT
460 		case PSCI_STAT_RESIDENCY_AARCH64:
461 			return psci_stat_residency(x1, x2);
462 
463 		case PSCI_STAT_COUNT_AARCH64:
464 			return psci_stat_count(x1, x2);
465 #endif
466 
467 		default:
468 			break;
469 		}
470 	}
471 
472 	WARN("Unimplemented PSCI Call: 0x%x \n", smc_fid);
473 	return SMC_UNK;
474 }
475