xref: /rk3399_ARM-atf/lib/psci/psci_common.c (revision b88a4416b5e5f2bda2240c632ba79e15a9a75c45)
1 /*
2  * Copyright (c) 2013-2022, ARM Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_helpers.h>
12 #include <common/bl_common.h>
13 #include <common/debug.h>
14 #include <context.h>
15 #include <drivers/delay_timer.h>
16 #include <lib/el3_runtime/context_mgmt.h>
17 #include <lib/utils.h>
18 #include <plat/common/platform.h>
19 
20 #include "psci_private.h"
21 
22 /*
23  * SPD power management operations, expected to be supplied by the registered
24  * SPD on successful SP initialization
25  */
26 const spd_pm_ops_t *psci_spd_pm;
27 
28 /*
29  * PSCI requested local power state map. This array is used to store the local
30  * power states requested by a CPU for power levels from level 1 to
31  * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
32  * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
33  * CPU are the same.
34  *
35  * During state coordination, the platform is passed an array containing the
36  * local states requested for a particular non cpu power domain by each cpu
37  * within the domain.
38  *
39  * TODO: Dense packing of the requested states will cause cache thrashing
40  * when multiple power domains write to it. If we allocate the requested
41  * states at each power level in a cache-line aligned per-domain memory,
42  * the cache thrashing can be avoided.
43  */
44 static plat_local_state_t
45 	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
46 
47 unsigned int psci_plat_core_count;
48 
49 /*******************************************************************************
50  * Arrays that hold the platform's power domain tree information for state
51  * management of power domains.
52  * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
53  * which is an ancestor of a CPU power domain.
54  * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
55  ******************************************************************************/
56 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
57 #if USE_COHERENT_MEM
58 __section(".tzfw_coherent_mem")
59 #endif
60 ;
61 
62 /* Lock for PSCI state coordination */
63 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
64 
65 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
66 
67 /*******************************************************************************
68  * Pointer to functions exported by the platform to complete power mgmt. ops
69  ******************************************************************************/
70 const plat_psci_ops_t *psci_plat_pm_ops;
71 
72 /******************************************************************************
73  * Check that the maximum power level supported by the platform makes sense
74  *****************************************************************************/
75 CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
76 	(PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
77 	assert_platform_max_pwrlvl_check);
78 
79 #if PSCI_OS_INIT_MODE
80 /*******************************************************************************
81  * The power state coordination mode used in CPU_SUSPEND.
82  * Defaults to platform-coordinated mode.
83  ******************************************************************************/
84 suspend_mode_t psci_suspend_mode = PLAT_COORD;
85 #endif
86 
87 /*
88  * The plat_local_state used by the platform is one of these types: RUN,
89  * RETENTION and OFF. The platform can define further sub-states for each type
90  * apart from RUN. This categorization is done to verify the sanity of the
91  * psci_power_state passed by the platform and to print debug information. The
92  * categorization is done on the basis of the following conditions:
93  *
94  * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
95  *
96  * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
97  *    STATE_TYPE_RETN.
98  *
99  * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
100  *    STATE_TYPE_OFF.
101  */
102 typedef enum plat_local_state_type {
103 	STATE_TYPE_RUN = 0,
104 	STATE_TYPE_RETN,
105 	STATE_TYPE_OFF
106 } plat_local_state_type_t;
107 
108 /* Function used to categorize plat_local_state. */
109 static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
110 {
111 	if (state != 0U) {
112 		if (state > PLAT_MAX_RET_STATE) {
113 			return STATE_TYPE_OFF;
114 		} else {
115 			return STATE_TYPE_RETN;
116 		}
117 	} else {
118 		return STATE_TYPE_RUN;
119 	}
120 }
121 
122 /******************************************************************************
123  * Check that the maximum retention level supported by the platform is less
124  * than the maximum off level.
125  *****************************************************************************/
126 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
127 		assert_platform_max_off_and_retn_state_check);
128 
129 /******************************************************************************
130  * This function ensures that the power state parameter in a CPU_SUSPEND request
131  * is valid. If so, it returns the requested states for each power level.
132  *****************************************************************************/
133 int psci_validate_power_state(unsigned int power_state,
134 			      psci_power_state_t *state_info)
135 {
136 	/* Check SBZ bits in power state are zero */
137 	if (psci_check_power_state(power_state) != 0U)
138 		return PSCI_E_INVALID_PARAMS;
139 
140 	assert(psci_plat_pm_ops->validate_power_state != NULL);
141 
142 	/* Validate the power_state using platform pm_ops */
143 	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
144 }
145 
146 /******************************************************************************
147  * This function retrieves the `psci_power_state_t` for system suspend from
148  * the platform.
149  *****************************************************************************/
150 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
151 {
152 	/*
153 	 * Assert that the required pm_ops hook is implemented to ensure that
154 	 * the capability detected during psci_setup() is valid.
155 	 */
156 	assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
157 
158 	/*
159 	 * Query the platform for the power_state required for system suspend
160 	 */
161 	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
162 }
163 
164 /*******************************************************************************
165  * This function verifies that all the other cores in the system have been
166  * turned OFF and the current CPU is the last running CPU in the system.
167  * Returns true, if the current CPU is the last ON CPU or false otherwise.
168  ******************************************************************************/
169 bool psci_is_last_on_cpu(void)
170 {
171 	unsigned int cpu_idx, my_idx = plat_my_core_pos();
172 
173 	for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
174 		if (cpu_idx == my_idx) {
175 			assert(psci_get_aff_info_state() == AFF_STATE_ON);
176 			continue;
177 		}
178 
179 		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) {
180 			VERBOSE("core=%u other than current core=%u %s\n",
181 				cpu_idx, my_idx, "running in the system");
182 			return false;
183 		}
184 	}
185 
186 	return true;
187 }
188 
189 /*******************************************************************************
190  * This function verifies that all cores in the system have been turned ON.
191  * Returns true, if all CPUs are ON or false otherwise.
192  ******************************************************************************/
193 static bool psci_are_all_cpus_on(void)
194 {
195 	unsigned int cpu_idx;
196 
197 	for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
198 		if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) {
199 			return false;
200 		}
201 	}
202 
203 	return true;
204 }
205 
206 /*******************************************************************************
207  * Routine to return the maximum power level to traverse to after a cpu has
208  * been physically powered up. It is expected to be called immediately after
209  * reset from assembler code.
210  ******************************************************************************/
211 static unsigned int get_power_on_target_pwrlvl(void)
212 {
213 	unsigned int pwrlvl;
214 
215 	/*
216 	 * Assume that this cpu was suspended and retrieve its target power
217 	 * level. If it is invalid then it could only have been turned off
218 	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
219 	 * cpu can be turned off to.
220 	 */
221 	pwrlvl = psci_get_suspend_pwrlvl();
222 	if (pwrlvl == PSCI_INVALID_PWR_LVL)
223 		pwrlvl = PLAT_MAX_PWR_LVL;
224 	assert(pwrlvl < PSCI_INVALID_PWR_LVL);
225 	return pwrlvl;
226 }
227 
228 /******************************************************************************
229  * Helper function to update the requested local power state array. This array
230  * does not store the requested state for the CPU power level. Hence an
231  * assertion is added to prevent us from accessing the CPU power level.
232  *****************************************************************************/
233 static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
234 					 unsigned int cpu_idx,
235 					 plat_local_state_t req_pwr_state)
236 {
237 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
238 	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
239 			(cpu_idx < psci_plat_core_count)) {
240 		psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
241 	}
242 }
243 
244 /******************************************************************************
245  * This function initializes the psci_req_local_pwr_states.
246  *****************************************************************************/
247 void __init psci_init_req_local_pwr_states(void)
248 {
249 	/* Initialize the requested state of all non CPU power domains as OFF */
250 	unsigned int pwrlvl;
251 	unsigned int core;
252 
253 	for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
254 		for (core = 0; core < psci_plat_core_count; core++) {
255 			psci_req_local_pwr_states[pwrlvl][core] =
256 				PLAT_MAX_OFF_STATE;
257 		}
258 	}
259 }
260 
261 /******************************************************************************
262  * Helper function to return a reference to an array containing the local power
263  * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
264  * array will be the number of cpu power domains of which this power domain is
265  * an ancestor. These requested states will be used to determine a suitable
266  * target state for this power domain during psci state coordination. An
267  * assertion is added to prevent us from accessing the CPU power level.
268  *****************************************************************************/
269 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
270 							 unsigned int cpu_idx)
271 {
272 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
273 
274 	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
275 			(cpu_idx < psci_plat_core_count)) {
276 		return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
277 	} else
278 		return NULL;
279 }
280 
281 /*
282  * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
283  * memory.
284  *
285  * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
286  * it's accessed by both cached and non-cached participants. To serve the common
287  * minimum, perform a cache flush before read and after write so that non-cached
288  * participants operate on latest data in main memory.
289  *
290  * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
291  * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
292  * In both cases, no cache operations are required.
293  */
294 
295 /*
296  * Retrieve local state of non-CPU power domain node from a non-cached CPU,
297  * after any required cache maintenance operation.
298  */
299 static plat_local_state_t get_non_cpu_pd_node_local_state(
300 		unsigned int parent_idx)
301 {
302 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
303 	flush_dcache_range(
304 			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
305 			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
306 #endif
307 	return psci_non_cpu_pd_nodes[parent_idx].local_state;
308 }
309 
310 /*
311  * Update local state of non-CPU power domain node from a cached CPU; perform
312  * any required cache maintenance operation afterwards.
313  */
314 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
315 		plat_local_state_t state)
316 {
317 	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
318 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
319 	flush_dcache_range(
320 			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
321 			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
322 #endif
323 }
324 
325 /******************************************************************************
326  * Helper function to return the current local power state of each power domain
327  * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
328  * function will be called after a cpu is powered on to find the local state
329  * each power domain has emerged from.
330  *****************************************************************************/
331 void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
332 				      psci_power_state_t *target_state)
333 {
334 	unsigned int parent_idx, lvl;
335 	plat_local_state_t *pd_state = target_state->pwr_domain_state;
336 
337 	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
338 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
339 
340 	/* Copy the local power state from node to state_info */
341 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
342 		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
343 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
344 	}
345 
346 	/* Set the the higher levels to RUN */
347 	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
348 		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
349 }
350 
351 /******************************************************************************
352  * Helper function to set the target local power state that each power domain
353  * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
354  * enter. This function will be called after coordination of requested power
355  * states has been done for each power level.
356  *****************************************************************************/
357 static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
358 					const psci_power_state_t *target_state)
359 {
360 	unsigned int parent_idx, lvl;
361 	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
362 
363 	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
364 
365 	/*
366 	 * Need to flush as local_state might be accessed with Data Cache
367 	 * disabled during power on
368 	 */
369 	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
370 
371 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
372 
373 	/* Copy the local_state from state_info */
374 	for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
375 		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
376 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
377 	}
378 }
379 
380 
381 /*******************************************************************************
382  * PSCI helper function to get the parent nodes corresponding to a cpu_index.
383  ******************************************************************************/
384 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
385 				      unsigned int end_lvl,
386 				      unsigned int *node_index)
387 {
388 	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
389 	unsigned int i;
390 	unsigned int *node = node_index;
391 
392 	for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
393 		*node = parent_node;
394 		node++;
395 		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
396 	}
397 }
398 
399 /******************************************************************************
400  * This function is invoked post CPU power up and initialization. It sets the
401  * affinity info state, target power state and requested power state for the
402  * current CPU and all its ancestor power domains to RUN.
403  *****************************************************************************/
404 void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
405 {
406 	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
407 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
408 
409 	/* Reset the local_state to RUN for the non cpu power domains. */
410 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
411 		set_non_cpu_pd_node_local_state(parent_idx,
412 				PSCI_LOCAL_STATE_RUN);
413 		psci_set_req_local_pwr_state(lvl,
414 					     cpu_idx,
415 					     PSCI_LOCAL_STATE_RUN);
416 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
417 	}
418 
419 	/* Set the affinity info state to ON */
420 	psci_set_aff_info_state(AFF_STATE_ON);
421 
422 	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
423 	psci_flush_cpu_data(psci_svc_cpu_data);
424 }
425 
426 /******************************************************************************
427  * This function is passed the local power states requested for each power
428  * domain (state_info) between the current CPU domain and its ancestors until
429  * the target power level (end_pwrlvl). It updates the array of requested power
430  * states with this information.
431  *
432  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
433  * retrieves the states requested by all the cpus of which the power domain at
434  * that level is an ancestor. It passes this information to the platform to
435  * coordinate and return the target power state. If the target state for a level
436  * is RUN then subsequent levels are not considered. At the CPU level, state
437  * coordination is not required. Hence, the requested and the target states are
438  * the same.
439  *
440  * The 'state_info' is updated with the target state for each level between the
441  * CPU and the 'end_pwrlvl' and returned to the caller.
442  *
443  * This function will only be invoked with data cache enabled and while
444  * powering down a core.
445  *****************************************************************************/
446 void psci_do_state_coordination(unsigned int end_pwrlvl,
447 				psci_power_state_t *state_info)
448 {
449 	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
450 	unsigned int start_idx;
451 	unsigned int ncpus;
452 	plat_local_state_t target_state, *req_states;
453 
454 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
455 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
456 
457 	/* For level 0, the requested state will be equivalent
458 	   to target state */
459 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
460 
461 		/* First update the requested power state */
462 		psci_set_req_local_pwr_state(lvl, cpu_idx,
463 					     state_info->pwr_domain_state[lvl]);
464 
465 		/* Get the requested power states for this power level */
466 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
467 		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
468 
469 		/*
470 		 * Let the platform coordinate amongst the requested states at
471 		 * this power level and return the target local power state.
472 		 */
473 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
474 		target_state = plat_get_target_pwr_state(lvl,
475 							 req_states,
476 							 ncpus);
477 
478 		state_info->pwr_domain_state[lvl] = target_state;
479 
480 		/* Break early if the negotiated target power state is RUN */
481 		if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0)
482 			break;
483 
484 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
485 	}
486 
487 	/*
488 	 * This is for cases when we break out of the above loop early because
489 	 * the target power state is RUN at a power level < end_pwlvl.
490 	 * We update the requested power state from state_info and then
491 	 * set the target state as RUN.
492 	 */
493 	for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
494 		psci_set_req_local_pwr_state(lvl, cpu_idx,
495 					     state_info->pwr_domain_state[lvl]);
496 		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
497 
498 	}
499 
500 	/* Update the target state in the power domain nodes */
501 	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
502 }
503 
504 /******************************************************************************
505  * This function validates a suspend request by making sure that if a standby
506  * state is requested then no power level is turned off and the highest power
507  * level is placed in a standby/retention state.
508  *
509  * It also ensures that the state level X will enter is not shallower than the
510  * state level X + 1 will enter.
511  *
512  * This validation will be enabled only for DEBUG builds as the platform is
513  * expected to perform these validations as well.
514  *****************************************************************************/
515 int psci_validate_suspend_req(const psci_power_state_t *state_info,
516 			      unsigned int is_power_down_state)
517 {
518 	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
519 	plat_local_state_t state;
520 	plat_local_state_type_t req_state_type, deepest_state_type;
521 	int i;
522 
523 	/* Find the target suspend power level */
524 	target_lvl = psci_find_target_suspend_lvl(state_info);
525 	if (target_lvl == PSCI_INVALID_PWR_LVL)
526 		return PSCI_E_INVALID_PARAMS;
527 
528 	/* All power domain levels are in a RUN state to begin with */
529 	deepest_state_type = STATE_TYPE_RUN;
530 
531 	for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
532 		state = state_info->pwr_domain_state[i];
533 		req_state_type = find_local_state_type(state);
534 
535 		/*
536 		 * While traversing from the highest power level to the lowest,
537 		 * the state requested for lower levels has to be the same or
538 		 * deeper i.e. equal to or greater than the state at the higher
539 		 * levels. If this condition is true, then the requested state
540 		 * becomes the deepest state encountered so far.
541 		 */
542 		if (req_state_type < deepest_state_type)
543 			return PSCI_E_INVALID_PARAMS;
544 		deepest_state_type = req_state_type;
545 	}
546 
547 	/* Find the highest off power level */
548 	max_off_lvl = psci_find_max_off_lvl(state_info);
549 
550 	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
551 	max_retn_lvl = PSCI_INVALID_PWR_LVL;
552 	if (target_lvl != max_off_lvl)
553 		max_retn_lvl = target_lvl;
554 
555 	/*
556 	 * If this is not a request for a power down state then max off level
557 	 * has to be invalid and max retention level has to be a valid power
558 	 * level.
559 	 */
560 	if ((is_power_down_state == 0U) &&
561 			((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
562 			 (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
563 		return PSCI_E_INVALID_PARAMS;
564 
565 	return PSCI_E_SUCCESS;
566 }
567 
568 /******************************************************************************
569  * This function finds the highest power level which will be powered down
570  * amongst all the power levels specified in the 'state_info' structure
571  *****************************************************************************/
572 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
573 {
574 	int i;
575 
576 	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
577 		if (is_local_state_off(state_info->pwr_domain_state[i]) != 0)
578 			return (unsigned int) i;
579 	}
580 
581 	return PSCI_INVALID_PWR_LVL;
582 }
583 
584 /******************************************************************************
585  * This functions finds the level of the highest power domain which will be
586  * placed in a low power state during a suspend operation.
587  *****************************************************************************/
588 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
589 {
590 	int i;
591 
592 	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
593 		if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
594 			return (unsigned int) i;
595 	}
596 
597 	return PSCI_INVALID_PWR_LVL;
598 }
599 
600 /*******************************************************************************
601  * This function is passed the highest level in the topology tree that the
602  * operation should be applied to and a list of node indexes. It picks up locks
603  * from the node index list in order of increasing power domain level in the
604  * range specified.
605  ******************************************************************************/
606 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
607 				   const unsigned int *parent_nodes)
608 {
609 	unsigned int parent_idx;
610 	unsigned int level;
611 
612 	/* No locking required for level 0. Hence start locking from level 1 */
613 	for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
614 		parent_idx = parent_nodes[level - 1U];
615 		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
616 	}
617 }
618 
619 /*******************************************************************************
620  * This function is passed the highest level in the topology tree that the
621  * operation should be applied to and a list of node indexes. It releases the
622  * locks in order of decreasing power domain level in the range specified.
623  ******************************************************************************/
624 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
625 				   const unsigned int *parent_nodes)
626 {
627 	unsigned int parent_idx;
628 	unsigned int level;
629 
630 	/* Unlock top down. No unlocking required for level 0. */
631 	for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
632 		parent_idx = parent_nodes[level - 1U];
633 		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
634 	}
635 }
636 
637 /*******************************************************************************
638  * Simple routine to determine whether a mpidr is valid or not.
639  ******************************************************************************/
640 int psci_validate_mpidr(u_register_t mpidr)
641 {
642 	if (plat_core_pos_by_mpidr(mpidr) < 0)
643 		return PSCI_E_INVALID_PARAMS;
644 
645 	return PSCI_E_SUCCESS;
646 }
647 
648 /*******************************************************************************
649  * This function determines the full entrypoint information for the requested
650  * PSCI entrypoint on power on/resume and returns it.
651  ******************************************************************************/
652 #ifdef __aarch64__
653 static int psci_get_ns_ep_info(entry_point_info_t *ep,
654 			       uintptr_t entrypoint,
655 			       u_register_t context_id)
656 {
657 	u_register_t ep_attr, sctlr;
658 	unsigned int daif, ee, mode;
659 	u_register_t ns_scr_el3 = read_scr_el3();
660 	u_register_t ns_sctlr_el1 = read_sctlr_el1();
661 
662 	sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
663 		read_sctlr_el2() : ns_sctlr_el1;
664 	ee = 0;
665 
666 	ep_attr = NON_SECURE | EP_ST_DISABLE;
667 	if ((sctlr & SCTLR_EE_BIT) != 0U) {
668 		ep_attr |= EP_EE_BIG;
669 		ee = 1;
670 	}
671 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
672 
673 	ep->pc = entrypoint;
674 	zeromem(&ep->args, sizeof(ep->args));
675 	ep->args.arg0 = context_id;
676 
677 	/*
678 	 * Figure out whether the cpu enters the non-secure address space
679 	 * in aarch32 or aarch64
680 	 */
681 	if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
682 
683 		/*
684 		 * Check whether a Thumb entry point has been provided for an
685 		 * aarch64 EL
686 		 */
687 		if ((entrypoint & 0x1UL) != 0UL)
688 			return PSCI_E_INVALID_ADDRESS;
689 
690 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
691 
692 		ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
693 				   DISABLE_ALL_EXCEPTIONS);
694 	} else {
695 
696 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
697 			MODE32_hyp : MODE32_svc;
698 
699 		/*
700 		 * TODO: Choose async. exception bits if HYP mode is not
701 		 * implemented according to the values of SCR.{AW, FW} bits
702 		 */
703 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
704 
705 		ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
706 				       daif);
707 	}
708 
709 	return PSCI_E_SUCCESS;
710 }
711 #else /* !__aarch64__ */
712 static int psci_get_ns_ep_info(entry_point_info_t *ep,
713 			       uintptr_t entrypoint,
714 			       u_register_t context_id)
715 {
716 	u_register_t ep_attr;
717 	unsigned int aif, ee, mode;
718 	u_register_t scr = read_scr();
719 	u_register_t ns_sctlr, sctlr;
720 
721 	/* Switch to non secure state */
722 	write_scr(scr | SCR_NS_BIT);
723 	isb();
724 	ns_sctlr = read_sctlr();
725 
726 	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
727 
728 	/* Return to original state */
729 	write_scr(scr);
730 	isb();
731 	ee = 0;
732 
733 	ep_attr = NON_SECURE | EP_ST_DISABLE;
734 	if (sctlr & SCTLR_EE_BIT) {
735 		ep_attr |= EP_EE_BIG;
736 		ee = 1;
737 	}
738 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
739 
740 	ep->pc = entrypoint;
741 	zeromem(&ep->args, sizeof(ep->args));
742 	ep->args.arg0 = context_id;
743 
744 	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
745 
746 	/*
747 	 * TODO: Choose async. exception bits if HYP mode is not
748 	 * implemented according to the values of SCR.{AW, FW} bits
749 	 */
750 	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
751 
752 	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
753 
754 	return PSCI_E_SUCCESS;
755 }
756 
757 #endif /* __aarch64__ */
758 
759 /*******************************************************************************
760  * This function validates the entrypoint with the platform layer if the
761  * appropriate pm_ops hook is exported by the platform and returns the
762  * 'entry_point_info'.
763  ******************************************************************************/
764 int psci_validate_entry_point(entry_point_info_t *ep,
765 			      uintptr_t entrypoint,
766 			      u_register_t context_id)
767 {
768 	int rc;
769 
770 	/* Validate the entrypoint using platform psci_ops */
771 	if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
772 		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
773 		if (rc != PSCI_E_SUCCESS)
774 			return PSCI_E_INVALID_ADDRESS;
775 	}
776 
777 	/*
778 	 * Verify and derive the re-entry information for
779 	 * the non-secure world from the non-secure state from
780 	 * where this call originated.
781 	 */
782 	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
783 	return rc;
784 }
785 
786 /*******************************************************************************
787  * Generic handler which is called when a cpu is physically powered on. It
788  * traverses the node information and finds the highest power level powered
789  * off and performs generic, architectural, platform setup and state management
790  * to power on that power level and power levels below it.
791  * e.g. For a cpu that's been powered on, it will call the platform specific
792  * code to enable the gic cpu interface and for a cluster it will enable
793  * coherency at the interconnect level in addition to gic cpu interface.
794  ******************************************************************************/
795 void psci_warmboot_entrypoint(void)
796 {
797 	unsigned int end_pwrlvl;
798 	unsigned int cpu_idx = plat_my_core_pos();
799 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
800 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
801 
802 	/*
803 	 * Verify that we have been explicitly turned ON or resumed from
804 	 * suspend.
805 	 */
806 	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
807 		ERROR("Unexpected affinity info state.\n");
808 		panic();
809 	}
810 
811 	/*
812 	 * Get the maximum power domain level to traverse to after this cpu
813 	 * has been physically powered up.
814 	 */
815 	end_pwrlvl = get_power_on_target_pwrlvl();
816 
817 	/* Get the parent nodes */
818 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
819 
820 	/*
821 	 * This function acquires the lock corresponding to each power level so
822 	 * that by the time all locks are taken, the system topology is snapshot
823 	 * and state management can be done safely.
824 	 */
825 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
826 
827 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
828 
829 #if ENABLE_PSCI_STAT
830 	plat_psci_stat_accounting_stop(&state_info);
831 #endif
832 
833 	/*
834 	 * This CPU could be resuming from suspend or it could have just been
835 	 * turned on. To distinguish between these 2 cases, we examine the
836 	 * affinity state of the CPU:
837 	 *  - If the affinity state is ON_PENDING then it has just been
838 	 *    turned on.
839 	 *  - Else it is resuming from suspend.
840 	 *
841 	 * Depending on the type of warm reset identified, choose the right set
842 	 * of power management handler and perform the generic, architecture
843 	 * and platform specific handling.
844 	 */
845 	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
846 		psci_cpu_on_finish(cpu_idx, &state_info);
847 	else
848 		psci_cpu_suspend_finish(cpu_idx, &state_info);
849 
850 	/*
851 	 * Set the requested and target state of this CPU and all the higher
852 	 * power domains which are ancestors of this CPU to run.
853 	 */
854 	psci_set_pwr_domains_to_run(end_pwrlvl);
855 
856 #if ENABLE_PSCI_STAT
857 	/*
858 	 * Update PSCI stats.
859 	 * Caches are off when writing stats data on the power down path.
860 	 * Since caches are now enabled, it's necessary to do cache
861 	 * maintenance before reading that same data.
862 	 */
863 	psci_stats_update_pwr_up(end_pwrlvl, &state_info);
864 #endif
865 
866 	/*
867 	 * This loop releases the lock corresponding to each power level
868 	 * in the reverse order to which they were acquired.
869 	 */
870 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
871 }
872 
873 /*******************************************************************************
874  * This function initializes the set of hooks that PSCI invokes as part of power
875  * management operation. The power management hooks are expected to be provided
876  * by the SPD, after it finishes all its initialization
877  ******************************************************************************/
878 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
879 {
880 	assert(pm != NULL);
881 	psci_spd_pm = pm;
882 
883 	if (pm->svc_migrate != NULL)
884 		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
885 
886 	if (pm->svc_migrate_info != NULL)
887 		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
888 				| define_psci_cap(PSCI_MIG_INFO_TYPE);
889 }
890 
891 /*******************************************************************************
892  * This function invokes the migrate info hook in the spd_pm_ops. It performs
893  * the necessary return value validation. If the Secure Payload is UP and
894  * migrate capable, it returns the mpidr of the CPU on which the Secure payload
895  * is resident through the mpidr parameter. Else the value of the parameter on
896  * return is undefined.
897  ******************************************************************************/
898 int psci_spd_migrate_info(u_register_t *mpidr)
899 {
900 	int rc;
901 
902 	if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
903 		return PSCI_E_NOT_SUPPORTED;
904 
905 	rc = psci_spd_pm->svc_migrate_info(mpidr);
906 
907 	assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
908 	       (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
909 
910 	return rc;
911 }
912 
913 
914 /*******************************************************************************
915  * This function prints the state of all power domains present in the
916  * system
917  ******************************************************************************/
918 void psci_print_power_domain_map(void)
919 {
920 #if LOG_LEVEL >= LOG_LEVEL_INFO
921 	unsigned int idx;
922 	plat_local_state_t state;
923 	plat_local_state_type_t state_type;
924 
925 	/* This array maps to the PSCI_STATE_X definitions in psci.h */
926 	static const char * const psci_state_type_str[] = {
927 		"ON",
928 		"RETENTION",
929 		"OFF",
930 	};
931 
932 	INFO("PSCI Power Domain Map:\n");
933 	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
934 							idx++) {
935 		state_type = find_local_state_type(
936 				psci_non_cpu_pd_nodes[idx].local_state);
937 		INFO("  Domain Node : Level %u, parent_node %u,"
938 				" State %s (0x%x)\n",
939 				psci_non_cpu_pd_nodes[idx].level,
940 				psci_non_cpu_pd_nodes[idx].parent_node,
941 				psci_state_type_str[state_type],
942 				psci_non_cpu_pd_nodes[idx].local_state);
943 	}
944 
945 	for (idx = 0; idx < psci_plat_core_count; idx++) {
946 		state = psci_get_cpu_local_state_by_idx(idx);
947 		state_type = find_local_state_type(state);
948 		INFO("  CPU Node : MPID 0x%llx, parent_node %u,"
949 				" State %s (0x%x)\n",
950 				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
951 				psci_cpu_pd_nodes[idx].parent_node,
952 				psci_state_type_str[state_type],
953 				psci_get_cpu_local_state_by_idx(idx));
954 	}
955 #endif
956 }
957 
958 /******************************************************************************
959  * Return whether any secondaries were powered up with CPU_ON call. A CPU that
960  * have ever been powered up would have set its MPDIR value to something other
961  * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
962  * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
963  * meaningful only when called on the primary CPU during early boot.
964  *****************************************************************************/
965 int psci_secondaries_brought_up(void)
966 {
967 	unsigned int idx, n_valid = 0U;
968 
969 	for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
970 		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
971 			n_valid++;
972 	}
973 
974 	assert(n_valid > 0U);
975 
976 	return (n_valid > 1U) ? 1 : 0;
977 }
978 
979 /*******************************************************************************
980  * Initiate power down sequence, by calling power down operations registered for
981  * this CPU.
982  ******************************************************************************/
983 void psci_pwrdown_cpu(unsigned int power_level)
984 {
985 #if HW_ASSISTED_COHERENCY
986 	/*
987 	 * With hardware-assisted coherency, the CPU drivers only initiate the
988 	 * power down sequence, without performing cache-maintenance operations
989 	 * in software. Data caches enabled both before and after this call.
990 	 */
991 	prepare_cpu_pwr_dwn(power_level);
992 #else
993 	/*
994 	 * Without hardware-assisted coherency, the CPU drivers disable data
995 	 * caches, then perform cache-maintenance operations in software.
996 	 *
997 	 * This also calls prepare_cpu_pwr_dwn() to initiate power down
998 	 * sequence, but that function will return with data caches disabled.
999 	 * We must ensure that the stack memory is flushed out to memory before
1000 	 * we start popping from it again.
1001 	 */
1002 	psci_do_pwrdown_cache_maintenance(power_level);
1003 #endif
1004 }
1005 
1006 /*******************************************************************************
1007  * This function invokes the callback 'stop_func()' with the 'mpidr' of each
1008  * online PE. Caller can pass suitable method to stop a remote core.
1009  *
1010  * 'wait_ms' is the timeout value in milliseconds for the other cores to
1011  * transition to power down state. Passing '0' makes it non-blocking.
1012  *
1013  * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
1014  * given timeout.
1015  ******************************************************************************/
1016 int psci_stop_other_cores(unsigned int wait_ms,
1017 				   void (*stop_func)(u_register_t mpidr))
1018 {
1019 	unsigned int idx, this_cpu_idx;
1020 
1021 	this_cpu_idx = plat_my_core_pos();
1022 
1023 	/* Invoke stop_func for each core */
1024 	for (idx = 0U; idx < psci_plat_core_count; idx++) {
1025 		/* skip current CPU */
1026 		if (idx == this_cpu_idx) {
1027 			continue;
1028 		}
1029 
1030 		/* Check if the CPU is ON */
1031 		if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
1032 			(*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
1033 		}
1034 	}
1035 
1036 	/* Need to wait for other cores to shutdown */
1037 	if (wait_ms != 0U) {
1038 		while ((wait_ms-- != 0U) && (!psci_is_last_on_cpu())) {
1039 			mdelay(1U);
1040 		}
1041 
1042 		if (!psci_is_last_on_cpu()) {
1043 			WARN("Failed to stop all cores!\n");
1044 			psci_print_power_domain_map();
1045 			return PSCI_E_DENIED;
1046 		}
1047 	}
1048 
1049 	return PSCI_E_SUCCESS;
1050 }
1051 
1052 /*******************************************************************************
1053  * This function verifies that all the other cores in the system have been
1054  * turned OFF and the current CPU is the last running CPU in the system.
1055  * Returns true if the current CPU is the last ON CPU or false otherwise.
1056  *
1057  * This API has following differences with psci_is_last_on_cpu
1058  *  1. PSCI states are locked
1059  ******************************************************************************/
1060 bool psci_is_last_on_cpu_safe(void)
1061 {
1062 	unsigned int this_core = plat_my_core_pos();
1063 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1064 
1065 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1066 
1067 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1068 
1069 	if (!psci_is_last_on_cpu()) {
1070 		psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1071 		return false;
1072 	}
1073 
1074 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1075 
1076 	return true;
1077 }
1078 
1079 /*******************************************************************************
1080  * This function verifies that all cores in the system have been turned ON.
1081  * Returns true, if all CPUs are ON or false otherwise.
1082  *
1083  * This API has following differences with psci_are_all_cpus_on
1084  *  1. PSCI states are locked
1085  ******************************************************************************/
1086 bool psci_are_all_cpus_on_safe(void)
1087 {
1088 	unsigned int this_core = plat_my_core_pos();
1089 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1090 
1091 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1092 
1093 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1094 
1095 	if (!psci_are_all_cpus_on()) {
1096 		psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1097 		return false;
1098 	}
1099 
1100 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1101 
1102 	return true;
1103 }
1104