xref: /rk3399_ARM-atf/lib/psci/psci_common.c (revision fd6d90d8a65800ad1b45825f0290bdc4aee2ccbd)
1 /*
2  * Copyright (c) 2013-2017, ARM Limited and Contributors. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * Redistributions of source code must retain the above copyright notice, this
8  * list of conditions and the following disclaimer.
9  *
10  * Redistributions in binary form must reproduce the above copyright notice,
11  * this list of conditions and the following disclaimer in the documentation
12  * and/or other materials provided with the distribution.
13  *
14  * Neither the name of ARM nor the names of its contributors may be used
15  * to endorse or promote products derived from this software without specific
16  * prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #include <arch.h>
32 #include <arch_helpers.h>
33 #include <assert.h>
34 #include <bl_common.h>
35 #include <context.h>
36 #include <context_mgmt.h>
37 #include <debug.h>
38 #include <platform.h>
39 #include <string.h>
40 #include "psci_private.h"
41 
42 /*
43  * SPD power management operations, expected to be supplied by the registered
44  * SPD on successful SP initialization
45  */
46 const spd_pm_ops_t *psci_spd_pm;
47 
48 /*
49  * PSCI requested local power state map. This array is used to store the local
50  * power states requested by a CPU for power levels from level 1 to
51  * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
52  * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
53  * CPU are the same.
54  *
55  * During state coordination, the platform is passed an array containing the
56  * local states requested for a particular non cpu power domain by each cpu
57  * within the domain.
58  *
59  * TODO: Dense packing of the requested states will cause cache thrashing
60  * when multiple power domains write to it. If we allocate the requested
61  * states at each power level in a cache-line aligned per-domain memory,
62  * the cache thrashing can be avoided.
63  */
64 static plat_local_state_t
65 	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
66 
67 
68 /*******************************************************************************
69  * Arrays that hold the platform's power domain tree information for state
70  * management of power domains.
71  * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
72  * which is an ancestor of a CPU power domain.
73  * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
74  ******************************************************************************/
75 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
76 #if USE_COHERENT_MEM
77 __section("tzfw_coherent_mem")
78 #endif
79 ;
80 
81 DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
82 
83 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
84 
85 /*******************************************************************************
86  * Pointer to functions exported by the platform to complete power mgmt. ops
87  ******************************************************************************/
88 const plat_psci_ops_t *psci_plat_pm_ops;
89 
90 /******************************************************************************
91  * Check that the maximum power level supported by the platform makes sense
92  *****************************************************************************/
93 CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
94 		PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
95 		assert_platform_max_pwrlvl_check);
96 
97 /*
98  * The plat_local_state used by the platform is one of these types: RUN,
99  * RETENTION and OFF. The platform can define further sub-states for each type
100  * apart from RUN. This categorization is done to verify the sanity of the
101  * psci_power_state passed by the platform and to print debug information. The
102  * categorization is done on the basis of the following conditions:
103  *
104  * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
105  *
106  * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
107  *    STATE_TYPE_RETN.
108  *
109  * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
110  *    STATE_TYPE_OFF.
111  */
112 typedef enum plat_local_state_type {
113 	STATE_TYPE_RUN = 0,
114 	STATE_TYPE_RETN,
115 	STATE_TYPE_OFF
116 } plat_local_state_type_t;
117 
118 /* The macro used to categorize plat_local_state. */
119 #define find_local_state_type(plat_local_state)					\
120 		((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE)	\
121 		? STATE_TYPE_OFF : STATE_TYPE_RETN)				\
122 		: STATE_TYPE_RUN)
123 
124 /******************************************************************************
125  * Check that the maximum retention level supported by the platform is less
126  * than the maximum off level.
127  *****************************************************************************/
128 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
129 		assert_platform_max_off_and_retn_state_check);
130 
131 /******************************************************************************
132  * This function ensures that the power state parameter in a CPU_SUSPEND request
133  * is valid. If so, it returns the requested states for each power level.
134  *****************************************************************************/
135 int psci_validate_power_state(unsigned int power_state,
136 			      psci_power_state_t *state_info)
137 {
138 	/* Check SBZ bits in power state are zero */
139 	if (psci_check_power_state(power_state))
140 		return PSCI_E_INVALID_PARAMS;
141 
142 	assert(psci_plat_pm_ops->validate_power_state);
143 
144 	/* Validate the power_state using platform pm_ops */
145 	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
146 }
147 
148 /******************************************************************************
149  * This function retrieves the `psci_power_state_t` for system suspend from
150  * the platform.
151  *****************************************************************************/
152 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
153 {
154 	/*
155 	 * Assert that the required pm_ops hook is implemented to ensure that
156 	 * the capability detected during psci_setup() is valid.
157 	 */
158 	assert(psci_plat_pm_ops->get_sys_suspend_power_state);
159 
160 	/*
161 	 * Query the platform for the power_state required for system suspend
162 	 */
163 	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
164 }
165 
166 /*******************************************************************************
167  * This function verifies that the all the other cores in the system have been
168  * turned OFF and the current CPU is the last running CPU in the system.
169  * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
170  * otherwise.
171  ******************************************************************************/
172 unsigned int psci_is_last_on_cpu(void)
173 {
174 	unsigned int cpu_idx, my_idx = plat_my_core_pos();
175 
176 	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
177 		if (cpu_idx == my_idx) {
178 			assert(psci_get_aff_info_state() == AFF_STATE_ON);
179 			continue;
180 		}
181 
182 		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
183 			return 0;
184 	}
185 
186 	return 1;
187 }
188 
189 /*******************************************************************************
190  * Routine to return the maximum power level to traverse to after a cpu has
191  * been physically powered up. It is expected to be called immediately after
192  * reset from assembler code.
193  ******************************************************************************/
194 static unsigned int get_power_on_target_pwrlvl(void)
195 {
196 	unsigned int pwrlvl;
197 
198 	/*
199 	 * Assume that this cpu was suspended and retrieve its target power
200 	 * level. If it is invalid then it could only have been turned off
201 	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
202 	 * cpu can be turned off to.
203 	 */
204 	pwrlvl = psci_get_suspend_pwrlvl();
205 	if (pwrlvl == PSCI_INVALID_PWR_LVL)
206 		pwrlvl = PLAT_MAX_PWR_LVL;
207 	return pwrlvl;
208 }
209 
210 /******************************************************************************
211  * Helper function to update the requested local power state array. This array
212  * does not store the requested state for the CPU power level. Hence an
213  * assertion is added to prevent us from accessing the wrong index.
214  *****************************************************************************/
215 static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
216 					 unsigned int cpu_idx,
217 					 plat_local_state_t req_pwr_state)
218 {
219 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
220 	psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
221 }
222 
223 /******************************************************************************
224  * This function initializes the psci_req_local_pwr_states.
225  *****************************************************************************/
226 void psci_init_req_local_pwr_states(void)
227 {
228 	/* Initialize the requested state of all non CPU power domains as OFF */
229 	memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
230 			sizeof(psci_req_local_pwr_states));
231 }
232 
233 /******************************************************************************
234  * Helper function to return a reference to an array containing the local power
235  * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
236  * array will be the number of cpu power domains of which this power domain is
237  * an ancestor. These requested states will be used to determine a suitable
238  * target state for this power domain during psci state coordination. An
239  * assertion is added to prevent us from accessing the CPU power level.
240  *****************************************************************************/
241 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
242 							 unsigned int cpu_idx)
243 {
244 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
245 
246 	return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
247 }
248 
249 /******************************************************************************
250  * Helper function to return the current local power state of each power domain
251  * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
252  * function will be called after a cpu is powered on to find the local state
253  * each power domain has emerged from.
254  *****************************************************************************/
255 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
256 				      psci_power_state_t *target_state)
257 {
258 	unsigned int parent_idx, lvl;
259 	plat_local_state_t *pd_state = target_state->pwr_domain_state;
260 
261 	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
262 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
263 
264 	/* Copy the local power state from node to state_info */
265 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
266 #if !USE_COHERENT_MEM
267 		/*
268 		 * If using normal memory for psci_non_cpu_pd_nodes, we need
269 		 * to flush before reading the local power state as another
270 		 * cpu in the same power domain could have updated it and this
271 		 * code runs before caches are enabled.
272 		 */
273 		flush_dcache_range(
274 				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
275 				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
276 #endif
277 		pd_state[lvl] =	psci_non_cpu_pd_nodes[parent_idx].local_state;
278 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
279 	}
280 
281 	/* Set the the higher levels to RUN */
282 	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
283 		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
284 }
285 
286 /******************************************************************************
287  * Helper function to set the target local power state that each power domain
288  * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
289  * enter. This function will be called after coordination of requested power
290  * states has been done for each power level.
291  *****************************************************************************/
292 static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
293 					const psci_power_state_t *target_state)
294 {
295 	unsigned int parent_idx, lvl;
296 	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
297 
298 	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
299 
300 	/*
301 	 * Need to flush as local_state will be accessed with Data Cache
302 	 * disabled during power on
303 	 */
304 	flush_cpu_data(psci_svc_cpu_data.local_state);
305 
306 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
307 
308 	/* Copy the local_state from state_info */
309 	for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
310 		psci_non_cpu_pd_nodes[parent_idx].local_state =	pd_state[lvl];
311 #if !USE_COHERENT_MEM
312 		flush_dcache_range(
313 				(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
314 				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
315 #endif
316 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
317 	}
318 }
319 
320 
321 /*******************************************************************************
322  * PSCI helper function to get the parent nodes corresponding to a cpu_index.
323  ******************************************************************************/
324 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
325 				      unsigned int end_lvl,
326 				      unsigned int node_index[])
327 {
328 	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
329 	int i;
330 
331 	for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
332 		*node_index++ = parent_node;
333 		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
334 	}
335 }
336 
337 /******************************************************************************
338  * This function is invoked post CPU power up and initialization. It sets the
339  * affinity info state, target power state and requested power state for the
340  * current CPU and all its ancestor power domains to RUN.
341  *****************************************************************************/
342 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
343 {
344 	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
345 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
346 
347 	/* Reset the local_state to RUN for the non cpu power domains. */
348 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
349 		psci_non_cpu_pd_nodes[parent_idx].local_state =
350 				PSCI_LOCAL_STATE_RUN;
351 #if !USE_COHERENT_MEM
352 		flush_dcache_range(
353 				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
354 				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
355 #endif
356 		psci_set_req_local_pwr_state(lvl,
357 					     cpu_idx,
358 					     PSCI_LOCAL_STATE_RUN);
359 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
360 	}
361 
362 	/* Set the affinity info state to ON */
363 	psci_set_aff_info_state(AFF_STATE_ON);
364 
365 	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
366 	flush_cpu_data(psci_svc_cpu_data);
367 }
368 
369 /******************************************************************************
370  * This function is passed the local power states requested for each power
371  * domain (state_info) between the current CPU domain and its ancestors until
372  * the target power level (end_pwrlvl). It updates the array of requested power
373  * states with this information.
374  *
375  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
376  * retrieves the states requested by all the cpus of which the power domain at
377  * that level is an ancestor. It passes this information to the platform to
378  * coordinate and return the target power state. If the target state for a level
379  * is RUN then subsequent levels are not considered. At the CPU level, state
380  * coordination is not required. Hence, the requested and the target states are
381  * the same.
382  *
383  * The 'state_info' is updated with the target state for each level between the
384  * CPU and the 'end_pwrlvl' and returned to the caller.
385  *
386  * This function will only be invoked with data cache enabled and while
387  * powering down a core.
388  *****************************************************************************/
389 void psci_do_state_coordination(unsigned int end_pwrlvl,
390 				psci_power_state_t *state_info)
391 {
392 	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
393 	unsigned int start_idx, ncpus;
394 	plat_local_state_t target_state, *req_states;
395 
396 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
397 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
398 
399 	/* For level 0, the requested state will be equivalent
400 	   to target state */
401 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
402 
403 		/* First update the requested power state */
404 		psci_set_req_local_pwr_state(lvl, cpu_idx,
405 					     state_info->pwr_domain_state[lvl]);
406 
407 		/* Get the requested power states for this power level */
408 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
409 		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
410 
411 		/*
412 		 * Let the platform coordinate amongst the requested states at
413 		 * this power level and return the target local power state.
414 		 */
415 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
416 		target_state = plat_get_target_pwr_state(lvl,
417 							 req_states,
418 							 ncpus);
419 
420 		state_info->pwr_domain_state[lvl] = target_state;
421 
422 		/* Break early if the negotiated target power state is RUN */
423 		if (is_local_state_run(state_info->pwr_domain_state[lvl]))
424 			break;
425 
426 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
427 	}
428 
429 	/*
430 	 * This is for cases when we break out of the above loop early because
431 	 * the target power state is RUN at a power level < end_pwlvl.
432 	 * We update the requested power state from state_info and then
433 	 * set the target state as RUN.
434 	 */
435 	for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
436 		psci_set_req_local_pwr_state(lvl, cpu_idx,
437 					     state_info->pwr_domain_state[lvl]);
438 		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
439 
440 	}
441 
442 	/* Update the target state in the power domain nodes */
443 	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
444 }
445 
446 /******************************************************************************
447  * This function validates a suspend request by making sure that if a standby
448  * state is requested then no power level is turned off and the highest power
449  * level is placed in a standby/retention state.
450  *
451  * It also ensures that the state level X will enter is not shallower than the
452  * state level X + 1 will enter.
453  *
454  * This validation will be enabled only for DEBUG builds as the platform is
455  * expected to perform these validations as well.
456  *****************************************************************************/
457 int psci_validate_suspend_req(const psci_power_state_t *state_info,
458 			      unsigned int is_power_down_state)
459 {
460 	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
461 	plat_local_state_t state;
462 	plat_local_state_type_t req_state_type, deepest_state_type;
463 	int i;
464 
465 	/* Find the target suspend power level */
466 	target_lvl = psci_find_target_suspend_lvl(state_info);
467 	if (target_lvl == PSCI_INVALID_PWR_LVL)
468 		return PSCI_E_INVALID_PARAMS;
469 
470 	/* All power domain levels are in a RUN state to begin with */
471 	deepest_state_type = STATE_TYPE_RUN;
472 
473 	for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
474 		state = state_info->pwr_domain_state[i];
475 		req_state_type = find_local_state_type(state);
476 
477 		/*
478 		 * While traversing from the highest power level to the lowest,
479 		 * the state requested for lower levels has to be the same or
480 		 * deeper i.e. equal to or greater than the state at the higher
481 		 * levels. If this condition is true, then the requested state
482 		 * becomes the deepest state encountered so far.
483 		 */
484 		if (req_state_type < deepest_state_type)
485 			return PSCI_E_INVALID_PARAMS;
486 		deepest_state_type = req_state_type;
487 	}
488 
489 	/* Find the highest off power level */
490 	max_off_lvl = psci_find_max_off_lvl(state_info);
491 
492 	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
493 	max_retn_lvl = PSCI_INVALID_PWR_LVL;
494 	if (target_lvl != max_off_lvl)
495 		max_retn_lvl = target_lvl;
496 
497 	/*
498 	 * If this is not a request for a power down state then max off level
499 	 * has to be invalid and max retention level has to be a valid power
500 	 * level.
501 	 */
502 	if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
503 				    max_retn_lvl == PSCI_INVALID_PWR_LVL))
504 		return PSCI_E_INVALID_PARAMS;
505 
506 	return PSCI_E_SUCCESS;
507 }
508 
509 /******************************************************************************
510  * This function finds the highest power level which will be powered down
511  * amongst all the power levels specified in the 'state_info' structure
512  *****************************************************************************/
513 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
514 {
515 	int i;
516 
517 	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
518 		if (is_local_state_off(state_info->pwr_domain_state[i]))
519 			return i;
520 	}
521 
522 	return PSCI_INVALID_PWR_LVL;
523 }
524 
525 /******************************************************************************
526  * This functions finds the level of the highest power domain which will be
527  * placed in a low power state during a suspend operation.
528  *****************************************************************************/
529 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
530 {
531 	int i;
532 
533 	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
534 		if (!is_local_state_run(state_info->pwr_domain_state[i]))
535 			return i;
536 	}
537 
538 	return PSCI_INVALID_PWR_LVL;
539 }
540 
541 /*******************************************************************************
542  * This function is passed a cpu_index and the highest level in the topology
543  * tree that the operation should be applied to. It picks up locks in order of
544  * increasing power domain level in the range specified.
545  ******************************************************************************/
546 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
547 				   unsigned int cpu_idx)
548 {
549 	unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
550 	unsigned int level;
551 
552 	/* No locking required for level 0. Hence start locking from level 1 */
553 	for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
554 		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
555 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
556 	}
557 }
558 
559 /*******************************************************************************
560  * This function is passed a cpu_index and the highest level in the topology
561  * tree that the operation should be applied to. It releases the locks in order
562  * of decreasing power domain level in the range specified.
563  ******************************************************************************/
564 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
565 				   unsigned int cpu_idx)
566 {
567 	unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
568 	int level;
569 
570 	/* Get the parent nodes */
571 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
572 
573 	/* Unlock top down. No unlocking required for level 0. */
574 	for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
575 		parent_idx = parent_nodes[level - 1];
576 		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
577 	}
578 }
579 
580 /*******************************************************************************
581  * Simple routine to determine whether a mpidr is valid or not.
582  ******************************************************************************/
583 int psci_validate_mpidr(u_register_t mpidr)
584 {
585 	if (plat_core_pos_by_mpidr(mpidr) < 0)
586 		return PSCI_E_INVALID_PARAMS;
587 
588 	return PSCI_E_SUCCESS;
589 }
590 
591 /*******************************************************************************
592  * This function determines the full entrypoint information for the requested
593  * PSCI entrypoint on power on/resume and returns it.
594  ******************************************************************************/
595 #ifdef AARCH32
596 static int psci_get_ns_ep_info(entry_point_info_t *ep,
597 			       uintptr_t entrypoint,
598 			       u_register_t context_id)
599 {
600 	u_register_t ep_attr;
601 	unsigned int aif, ee, mode;
602 	u_register_t scr = read_scr();
603 	u_register_t ns_sctlr, sctlr;
604 
605 	/* Switch to non secure state */
606 	write_scr(scr | SCR_NS_BIT);
607 	isb();
608 	ns_sctlr = read_sctlr();
609 
610 	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
611 
612 	/* Return to original state */
613 	write_scr(scr);
614 	isb();
615 	ee = 0;
616 
617 	ep_attr = NON_SECURE | EP_ST_DISABLE;
618 	if (sctlr & SCTLR_EE_BIT) {
619 		ep_attr |= EP_EE_BIG;
620 		ee = 1;
621 	}
622 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
623 
624 	ep->pc = entrypoint;
625 	memset(&ep->args, 0, sizeof(ep->args));
626 	ep->args.arg0 = context_id;
627 
628 	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
629 
630 	/*
631 	 * TODO: Choose async. exception bits if HYP mode is not
632 	 * implemented according to the values of SCR.{AW, FW} bits
633 	 */
634 	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
635 
636 	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
637 
638 	return PSCI_E_SUCCESS;
639 }
640 
641 #else
642 static int psci_get_ns_ep_info(entry_point_info_t *ep,
643 			       uintptr_t entrypoint,
644 			       u_register_t context_id)
645 {
646 	u_register_t ep_attr, sctlr;
647 	unsigned int daif, ee, mode;
648 	u_register_t ns_scr_el3 = read_scr_el3();
649 	u_register_t ns_sctlr_el1 = read_sctlr_el1();
650 
651 	sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
652 	ee = 0;
653 
654 	ep_attr = NON_SECURE | EP_ST_DISABLE;
655 	if (sctlr & SCTLR_EE_BIT) {
656 		ep_attr |= EP_EE_BIG;
657 		ee = 1;
658 	}
659 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
660 
661 	ep->pc = entrypoint;
662 	memset(&ep->args, 0, sizeof(ep->args));
663 	ep->args.arg0 = context_id;
664 
665 	/*
666 	 * Figure out whether the cpu enters the non-secure address space
667 	 * in aarch32 or aarch64
668 	 */
669 	if (ns_scr_el3 & SCR_RW_BIT) {
670 
671 		/*
672 		 * Check whether a Thumb entry point has been provided for an
673 		 * aarch64 EL
674 		 */
675 		if (entrypoint & 0x1)
676 			return PSCI_E_INVALID_ADDRESS;
677 
678 		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
679 
680 		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
681 	} else {
682 
683 		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
684 
685 		/*
686 		 * TODO: Choose async. exception bits if HYP mode is not
687 		 * implemented according to the values of SCR.{AW, FW} bits
688 		 */
689 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
690 
691 		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
692 	}
693 
694 	return PSCI_E_SUCCESS;
695 }
696 #endif
697 
698 /*******************************************************************************
699  * This function validates the entrypoint with the platform layer if the
700  * appropriate pm_ops hook is exported by the platform and returns the
701  * 'entry_point_info'.
702  ******************************************************************************/
703 int psci_validate_entry_point(entry_point_info_t *ep,
704 			      uintptr_t entrypoint,
705 			      u_register_t context_id)
706 {
707 	int rc;
708 
709 	/* Validate the entrypoint using platform psci_ops */
710 	if (psci_plat_pm_ops->validate_ns_entrypoint) {
711 		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
712 		if (rc != PSCI_E_SUCCESS)
713 			return PSCI_E_INVALID_ADDRESS;
714 	}
715 
716 	/*
717 	 * Verify and derive the re-entry information for
718 	 * the non-secure world from the non-secure state from
719 	 * where this call originated.
720 	 */
721 	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
722 	return rc;
723 }
724 
725 /*******************************************************************************
726  * Generic handler which is called when a cpu is physically powered on. It
727  * traverses the node information and finds the highest power level powered
728  * off and performs generic, architectural, platform setup and state management
729  * to power on that power level and power levels below it.
730  * e.g. For a cpu that's been powered on, it will call the platform specific
731  * code to enable the gic cpu interface and for a cluster it will enable
732  * coherency at the interconnect level in addition to gic cpu interface.
733  ******************************************************************************/
734 void psci_warmboot_entrypoint(void)
735 {
736 	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
737 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
738 
739 	/*
740 	 * Verify that we have been explicitly turned ON or resumed from
741 	 * suspend.
742 	 */
743 	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
744 		ERROR("Unexpected affinity info state");
745 		panic();
746 	}
747 
748 	/*
749 	 * Get the maximum power domain level to traverse to after this cpu
750 	 * has been physically powered up.
751 	 */
752 	end_pwrlvl = get_power_on_target_pwrlvl();
753 
754 	/*
755 	 * This function acquires the lock corresponding to each power level so
756 	 * that by the time all locks are taken, the system topology is snapshot
757 	 * and state management can be done safely.
758 	 */
759 	psci_acquire_pwr_domain_locks(end_pwrlvl,
760 				      cpu_idx);
761 
762 #if ENABLE_PSCI_STAT
763 	plat_psci_stat_accounting_stop(&state_info);
764 #endif
765 
766 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
767 
768 	/*
769 	 * This CPU could be resuming from suspend or it could have just been
770 	 * turned on. To distinguish between these 2 cases, we examine the
771 	 * affinity state of the CPU:
772 	 *  - If the affinity state is ON_PENDING then it has just been
773 	 *    turned on.
774 	 *  - Else it is resuming from suspend.
775 	 *
776 	 * Depending on the type of warm reset identified, choose the right set
777 	 * of power management handler and perform the generic, architecture
778 	 * and platform specific handling.
779 	 */
780 	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
781 		psci_cpu_on_finish(cpu_idx, &state_info);
782 	else
783 		psci_cpu_suspend_finish(cpu_idx, &state_info);
784 
785 	/*
786 	 * Set the requested and target state of this CPU and all the higher
787 	 * power domains which are ancestors of this CPU to run.
788 	 */
789 	psci_set_pwr_domains_to_run(end_pwrlvl);
790 
791 #if ENABLE_PSCI_STAT
792 	/*
793 	 * Update PSCI stats.
794 	 * Caches are off when writing stats data on the power down path.
795 	 * Since caches are now enabled, it's necessary to do cache
796 	 * maintenance before reading that same data.
797 	 */
798 	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
799 #endif
800 
801 	/*
802 	 * This loop releases the lock corresponding to each power level
803 	 * in the reverse order to which they were acquired.
804 	 */
805 	psci_release_pwr_domain_locks(end_pwrlvl,
806 				      cpu_idx);
807 }
808 
809 /*******************************************************************************
810  * This function initializes the set of hooks that PSCI invokes as part of power
811  * management operation. The power management hooks are expected to be provided
812  * by the SPD, after it finishes all its initialization
813  ******************************************************************************/
814 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
815 {
816 	assert(pm);
817 	psci_spd_pm = pm;
818 
819 	if (pm->svc_migrate)
820 		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
821 
822 	if (pm->svc_migrate_info)
823 		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
824 				| define_psci_cap(PSCI_MIG_INFO_TYPE);
825 }
826 
827 /*******************************************************************************
828  * This function invokes the migrate info hook in the spd_pm_ops. It performs
829  * the necessary return value validation. If the Secure Payload is UP and
830  * migrate capable, it returns the mpidr of the CPU on which the Secure payload
831  * is resident through the mpidr parameter. Else the value of the parameter on
832  * return is undefined.
833  ******************************************************************************/
834 int psci_spd_migrate_info(u_register_t *mpidr)
835 {
836 	int rc;
837 
838 	if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
839 		return PSCI_E_NOT_SUPPORTED;
840 
841 	rc = psci_spd_pm->svc_migrate_info(mpidr);
842 
843 	assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
844 		|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
845 
846 	return rc;
847 }
848 
849 
850 /*******************************************************************************
851  * This function prints the state of all power domains present in the
852  * system
853  ******************************************************************************/
854 void psci_print_power_domain_map(void)
855 {
856 #if LOG_LEVEL >= LOG_LEVEL_INFO
857 	unsigned int idx;
858 	plat_local_state_t state;
859 	plat_local_state_type_t state_type;
860 
861 	/* This array maps to the PSCI_STATE_X definitions in psci.h */
862 	static const char * const psci_state_type_str[] = {
863 		"ON",
864 		"RETENTION",
865 		"OFF",
866 	};
867 
868 	INFO("PSCI Power Domain Map:\n");
869 	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
870 							idx++) {
871 		state_type = find_local_state_type(
872 				psci_non_cpu_pd_nodes[idx].local_state);
873 		INFO("  Domain Node : Level %u, parent_node %d,"
874 				" State %s (0x%x)\n",
875 				psci_non_cpu_pd_nodes[idx].level,
876 				psci_non_cpu_pd_nodes[idx].parent_node,
877 				psci_state_type_str[state_type],
878 				psci_non_cpu_pd_nodes[idx].local_state);
879 	}
880 
881 	for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
882 		state = psci_get_cpu_local_state_by_idx(idx);
883 		state_type = find_local_state_type(state);
884 		INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
885 				" State %s (0x%x)\n",
886 				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
887 				psci_cpu_pd_nodes[idx].parent_node,
888 				psci_state_type_str[state_type],
889 				psci_get_cpu_local_state_by_idx(idx));
890 	}
891 #endif
892 }
893 
894 #if ENABLE_PLAT_COMPAT
895 /*******************************************************************************
896  * PSCI Compatibility helper function to return the 'power_state' parameter of
897  * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
898  * if not invoked within CPU_SUSPEND for the current CPU.
899  ******************************************************************************/
900 int psci_get_suspend_powerstate(void)
901 {
902 	/* Sanity check to verify that CPU is within CPU_SUSPEND */
903 	if (psci_get_aff_info_state() == AFF_STATE_ON &&
904 		!is_local_state_run(psci_get_cpu_local_state()))
905 		return psci_power_state_compat[plat_my_core_pos()];
906 
907 	return PSCI_INVALID_DATA;
908 }
909 
910 /*******************************************************************************
911  * PSCI Compatibility helper function to return the state id of the current
912  * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
913  * if not invoked within CPU_SUSPEND for the current CPU.
914  ******************************************************************************/
915 int psci_get_suspend_stateid(void)
916 {
917 	unsigned int power_state;
918 	power_state = psci_get_suspend_powerstate();
919 	if (power_state != PSCI_INVALID_DATA)
920 		return psci_get_pstate_id(power_state);
921 
922 	return PSCI_INVALID_DATA;
923 }
924 
925 /*******************************************************************************
926  * PSCI Compatibility helper function to return the state id encoded in the
927  * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
928  * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
929  ******************************************************************************/
930 int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
931 {
932 	int cpu_idx = plat_core_pos_by_mpidr(mpidr);
933 
934 	if (cpu_idx == -1)
935 		return PSCI_INVALID_DATA;
936 
937 	/* Sanity check to verify that the CPU is in CPU_SUSPEND */
938 	if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
939 		!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
940 		return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
941 
942 	return PSCI_INVALID_DATA;
943 }
944 
945 /*******************************************************************************
946  * This function returns highest affinity level which is in OFF
947  * state. The affinity instance with which the level is associated is
948  * determined by the caller.
949  ******************************************************************************/
950 unsigned int psci_get_max_phys_off_afflvl(void)
951 {
952 	psci_power_state_t state_info;
953 
954 	memset(&state_info, 0, sizeof(state_info));
955 	psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
956 
957 	return psci_find_target_suspend_lvl(&state_info);
958 }
959 
960 /*******************************************************************************
961  * PSCI Compatibility helper function to return target affinity level requested
962  * for the CPU_SUSPEND. This function assumes affinity levels correspond to
963  * power domain levels on the platform.
964  ******************************************************************************/
965 int psci_get_suspend_afflvl(void)
966 {
967 	return psci_get_suspend_pwrlvl();
968 }
969 
970 #endif
971