xref: /rk3399_ARM-atf/lib/psci/psci_common.c (revision 04cf04c72d403e0c057505882fac9002d39d4102)
1 /*
2  * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_features.h>
12 #include <arch_helpers.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <context.h>
16 #include <drivers/delay_timer.h>
17 #include <lib/cpus/cpu_ops.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/extensions/spe.h>
20 #include <lib/pmf/pmf.h>
21 #include <lib/runtime_instr.h>
22 #include <lib/utils.h>
23 #include <plat/common/platform.h>
24 
25 #include "psci_private.h"
26 
27 /*
28  * SPD power management operations, expected to be supplied by the registered
29  * SPD on successful SP initialization
30  */
31 const spd_pm_ops_t *psci_spd_pm;
32 
33 /*
34  * PSCI requested local power state map. This array is used to store the local
35  * power states requested by a CPU for power levels from level 1 to
36  * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
37  * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
38  * CPU are the same.
39  *
40  * During state coordination, the platform is passed an array containing the
41  * local states requested for a particular non cpu power domain by each cpu
42  * within the domain.
43  *
44  * TODO: Dense packing of the requested states will cause cache thrashing
45  * when multiple power domains write to it. If we allocate the requested
46  * states at each power level in a cache-line aligned per-domain memory,
47  * the cache thrashing can be avoided.
48  */
49 static plat_local_state_t
50 	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
51 
52 unsigned int psci_plat_core_count;
53 
54 /*******************************************************************************
55  * Arrays that hold the platform's power domain tree information for state
56  * management of power domains.
57  * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
58  * which is an ancestor of a CPU power domain.
59  * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
60  ******************************************************************************/
61 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
62 #if USE_COHERENT_MEM
63 __section(".tzfw_coherent_mem")
64 #endif
65 ;
66 
67 /* Lock for PSCI state coordination */
68 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
69 
70 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
71 
72 /*******************************************************************************
73  * Pointer to functions exported by the platform to complete power mgmt. ops
74  ******************************************************************************/
75 const plat_psci_ops_t *psci_plat_pm_ops;
76 
77 /******************************************************************************
78  * Check that the maximum power level supported by the platform makes sense
79  *****************************************************************************/
80 CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
81 	(PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
82 	assert_platform_max_pwrlvl_check);
83 
84 #if PSCI_OS_INIT_MODE
85 /*******************************************************************************
86  * The power state coordination mode used in CPU_SUSPEND.
87  * Defaults to platform-coordinated mode.
88  ******************************************************************************/
89 suspend_mode_t psci_suspend_mode = PLAT_COORD;
90 #endif
91 
92 /*
93  * The plat_local_state used by the platform is one of these types: RUN,
94  * RETENTION and OFF. The platform can define further sub-states for each type
95  * apart from RUN. This categorization is done to verify the sanity of the
96  * psci_power_state passed by the platform and to print debug information. The
97  * categorization is done on the basis of the following conditions:
98  *
99  * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
100  *
101  * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
102  *    STATE_TYPE_RETN.
103  *
104  * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
105  *    STATE_TYPE_OFF.
106  */
107 typedef enum plat_local_state_type {
108 	STATE_TYPE_RUN = 0,
109 	STATE_TYPE_RETN,
110 	STATE_TYPE_OFF
111 } plat_local_state_type_t;
112 
113 /* Function used to categorize plat_local_state. */
114 static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
115 {
116 	if (state != 0U) {
117 		if (state > PLAT_MAX_RET_STATE) {
118 			return STATE_TYPE_OFF;
119 		} else {
120 			return STATE_TYPE_RETN;
121 		}
122 	} else {
123 		return STATE_TYPE_RUN;
124 	}
125 }
126 
127 /******************************************************************************
128  * Check that the maximum retention level supported by the platform is less
129  * than the maximum off level.
130  *****************************************************************************/
131 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
132 		assert_platform_max_off_and_retn_state_check);
133 
134 /******************************************************************************
135  * This function ensures that the power state parameter in a CPU_SUSPEND request
136  * is valid. If so, it returns the requested states for each power level.
137  *****************************************************************************/
138 int psci_validate_power_state(unsigned int power_state,
139 			      psci_power_state_t *state_info)
140 {
141 	/* Check SBZ bits in power state are zero */
142 	if (psci_check_power_state(power_state) != 0U) {
143 		return PSCI_E_INVALID_PARAMS;
144 	}
145 	assert(psci_plat_pm_ops->validate_power_state != NULL);
146 
147 	/* Validate the power_state using platform pm_ops */
148 	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
149 }
150 
151 /******************************************************************************
152  * This function retrieves the `psci_power_state_t` for system suspend from
153  * the platform.
154  *****************************************************************************/
155 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
156 {
157 	/*
158 	 * Assert that the required pm_ops hook is implemented to ensure that
159 	 * the capability detected during psci_setup() is valid.
160 	 */
161 	assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
162 
163 	/*
164 	 * Query the platform for the power_state required for system suspend
165 	 */
166 	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
167 }
168 
169 #if PSCI_OS_INIT_MODE
170 /*******************************************************************************
171  * This function verifies that all the other cores at the 'end_pwrlvl' have been
172  * idled and the current CPU is the last running CPU at the 'end_pwrlvl'.
173  * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
174  * otherwise.
175  ******************************************************************************/
176 static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int my_idx, unsigned int end_pwrlvl)
177 {
178 	unsigned int lvl;
179 	unsigned int parent_idx = 0;
180 	unsigned int cpu_start_idx, ncpus, cpu_idx;
181 	plat_local_state_t local_state;
182 
183 	if (end_pwrlvl == PSCI_CPU_PWR_LVL) {
184 		return true;
185 	}
186 
187 	parent_idx = psci_cpu_pd_nodes[my_idx].parent_node;
188 	for (lvl = PSCI_CPU_PWR_LVL + U(1); lvl < end_pwrlvl; lvl++) {
189 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
190 	}
191 
192 	cpu_start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
193 	ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
194 
195 	for (cpu_idx = cpu_start_idx; cpu_idx < cpu_start_idx + ncpus;
196 			cpu_idx++) {
197 		local_state = psci_get_cpu_local_state_by_idx(cpu_idx);
198 		if (cpu_idx == my_idx) {
199 			assert(is_local_state_run(local_state) != 0);
200 			continue;
201 		}
202 
203 		if (is_local_state_run(local_state) != 0) {
204 			return false;
205 		}
206 	}
207 
208 	return true;
209 }
210 #endif
211 
212 /*******************************************************************************
213  * This function verifies that all the other cores in the system have been
214  * turned OFF and the current CPU is the last running CPU in the system.
215  * Returns true, if the current CPU is the last ON CPU or false otherwise.
216  ******************************************************************************/
217 bool psci_is_last_on_cpu(unsigned int my_idx)
218 {
219 	for (unsigned int cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
220 		if (cpu_idx == my_idx) {
221 			assert(psci_get_aff_info_state() == AFF_STATE_ON);
222 			continue;
223 		}
224 
225 		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) {
226 			VERBOSE("core=%u other than current core=%u %s\n",
227 				cpu_idx, my_idx, "running in the system");
228 			return false;
229 		}
230 	}
231 
232 	return true;
233 }
234 
235 /*******************************************************************************
236  * This function verifies that all cores in the system have been turned ON.
237  * Returns true, if all CPUs are ON or false otherwise.
238  ******************************************************************************/
239 static bool psci_are_all_cpus_on(void)
240 {
241 	unsigned int cpu_idx;
242 
243 	for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
244 		if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) {
245 			return false;
246 		}
247 	}
248 
249 	return true;
250 }
251 
252 /*******************************************************************************
253  * Counts the number of CPUs in the system that are currently in the ON or
254  * ON_PENDING state.
255  *
256  * @note This function does not acquire any power domain locks. It must only be
257  *       called in contexts where it is guaranteed that PSCI state transitions
258  *       are not concurrently happening, or where locks are already held.
259  *
260  * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING.
261  ******************************************************************************/
262 static unsigned int psci_num_cpus_running(void)
263 {
264 	unsigned int cpu_idx;
265 	unsigned int no_of_cpus = 0U;
266 	aff_info_state_t aff_state;
267 
268 	for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
269 		aff_state = psci_get_aff_info_state_by_idx(cpu_idx);
270 		if (aff_state == AFF_STATE_ON ||
271 		    aff_state == AFF_STATE_ON_PENDING) {
272 			no_of_cpus++;
273 		}
274 	}
275 
276 	return no_of_cpus;
277 }
278 
279 /*******************************************************************************
280  * Routine to return the maximum power level to traverse to after a cpu has
281  * been physically powered up. It is expected to be called immediately after
282  * reset from assembler code.
283  ******************************************************************************/
284 static unsigned int get_power_on_target_pwrlvl(void)
285 {
286 	unsigned int pwrlvl;
287 
288 	/*
289 	 * Assume that this cpu was suspended and retrieve its target power
290 	 * level. If it wasn't, the cpu is off so this will be PLAT_MAX_PWR_LVL.
291 	 */
292 	pwrlvl = psci_get_suspend_pwrlvl();
293 	assert(pwrlvl < PSCI_INVALID_PWR_LVL);
294 	return pwrlvl;
295 }
296 
297 /******************************************************************************
298  * Helper function to update the requested local power state array. This array
299  * does not store the requested state for the CPU power level. Hence an
300  * assertion is added to prevent us from accessing the CPU power level.
301  *****************************************************************************/
302 static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
303 					 unsigned int cpu_idx,
304 					 plat_local_state_t req_pwr_state)
305 {
306 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
307 	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
308 			(cpu_idx < psci_plat_core_count)) {
309 		psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
310 	}
311 }
312 
313 /******************************************************************************
314  * This function initializes the psci_req_local_pwr_states.
315  *****************************************************************************/
316 void __init psci_init_req_local_pwr_states(void)
317 {
318 	/* Initialize the requested state of all non CPU power domains as OFF */
319 	unsigned int pwrlvl;
320 	unsigned int core;
321 
322 	for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
323 		for (core = 0; core < psci_plat_core_count; core++) {
324 			psci_req_local_pwr_states[pwrlvl][core] =
325 				PLAT_MAX_OFF_STATE;
326 		}
327 	}
328 }
329 
330 /******************************************************************************
331  * Helper function to return a reference to an array containing the local power
332  * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
333  * array will be the number of cpu power domains of which this power domain is
334  * an ancestor. These requested states will be used to determine a suitable
335  * target state for this power domain during psci state coordination. An
336  * assertion is added to prevent us from accessing the CPU power level.
337  *****************************************************************************/
338 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
339 							 unsigned int cpu_idx)
340 {
341 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
342 
343 	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
344 			(cpu_idx < psci_plat_core_count)) {
345 		return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
346 	} else {
347 		return NULL;
348 	}
349 }
350 
351 #if PSCI_OS_INIT_MODE
352 /******************************************************************************
353  * Helper function to save a copy of the psci_req_local_pwr_states (prev) for a
354  * CPU (cpu_idx), and update psci_req_local_pwr_states with the new requested
355  * local power states (state_info).
356  *****************************************************************************/
357 void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
358 				      unsigned int cpu_idx,
359 				      psci_power_state_t *state_info,
360 				      plat_local_state_t *prev)
361 {
362 	unsigned int lvl;
363 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
364 	unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
365 #else
366 	unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
367 #endif
368 	plat_local_state_t req_state;
369 
370 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
371 		/* Save the previous requested local power state */
372 		prev[lvl - 1U] = *psci_get_req_local_pwr_states(lvl, cpu_idx);
373 
374 		/* Update the new requested local power state */
375 		if (lvl <= end_pwrlvl) {
376 			req_state = state_info->pwr_domain_state[lvl];
377 		} else {
378 			req_state = state_info->pwr_domain_state[end_pwrlvl];
379 		}
380 		psci_set_req_local_pwr_state(lvl, cpu_idx, req_state);
381 	}
382 }
383 
384 /******************************************************************************
385  * Helper function to restore the previously saved requested local power states
386  * (prev) for a CPU (cpu_idx) to psci_req_local_pwr_states.
387  *****************************************************************************/
388 void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
389 				       plat_local_state_t *prev)
390 {
391 	unsigned int lvl;
392 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
393 	unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
394 #else
395 	unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
396 #endif
397 
398 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
399 		/* Restore the previous requested local power state */
400 		psci_set_req_local_pwr_state(lvl, cpu_idx, prev[lvl - 1U]);
401 	}
402 }
403 #endif
404 
405 /*
406  * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
407  * memory.
408  *
409  * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
410  * it's accessed by both cached and non-cached participants. To serve the common
411  * minimum, perform a cache flush before read and after write so that non-cached
412  * participants operate on latest data in main memory.
413  *
414  * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
415  * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
416  * In both cases, no cache operations are required.
417  */
418 
419 /*
420  * Retrieve local state of non-CPU power domain node from a non-cached CPU,
421  * after any required cache maintenance operation.
422  */
423 static plat_local_state_t get_non_cpu_pd_node_local_state(
424 		unsigned int parent_idx)
425 {
426 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
427 	flush_dcache_range(
428 			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
429 			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
430 #endif
431 	return psci_non_cpu_pd_nodes[parent_idx].local_state;
432 }
433 
434 /*
435  * Update local state of non-CPU power domain node from a cached CPU; perform
436  * any required cache maintenance operation afterwards.
437  */
438 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
439 		plat_local_state_t state)
440 {
441 	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
442 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
443 	flush_dcache_range(
444 			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
445 			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
446 #endif
447 }
448 
449 /******************************************************************************
450  * Helper function to return the current local power state of each power domain
451  * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
452  * function will be called after a cpu is powered on to find the local state
453  * each power domain has emerged from.
454  *****************************************************************************/
455 void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
456 				      psci_power_state_t *target_state)
457 {
458 	unsigned int parent_idx, lvl;
459 	plat_local_state_t *pd_state = target_state->pwr_domain_state;
460 
461 	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
462 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
463 
464 	/* Copy the local power state from node to state_info */
465 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
466 		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
467 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
468 	}
469 
470 	/* Set the the higher levels to RUN */
471 	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
472 		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
473 	}
474 }
475 
476 /******************************************************************************
477  * Helper function to set the target local power state that each power domain
478  * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
479  * enter. This function will be called after coordination of requested power
480  * states has been done for each power level.
481  *****************************************************************************/
482 void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
483 				      const psci_power_state_t *target_state)
484 {
485 	unsigned int parent_idx, lvl;
486 	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
487 
488 	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
489 
490 	/*
491 	 * Need to flush as local_state might be accessed with Data Cache
492 	 * disabled during power on
493 	 */
494 	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
495 
496 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
497 
498 	/* Copy the local_state from state_info */
499 	for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
500 		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
501 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
502 	}
503 }
504 
505 /*******************************************************************************
506  * PSCI helper function to get the parent nodes corresponding to a cpu_index.
507  ******************************************************************************/
508 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
509 				      unsigned int end_lvl,
510 				      unsigned int *node_index)
511 {
512 	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
513 	unsigned int i;
514 	unsigned int *node = node_index;
515 
516 	for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
517 		*node = parent_node;
518 		node++;
519 		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
520 	}
521 }
522 
523 /******************************************************************************
524  * This function is invoked post CPU power up and initialization. It sets the
525  * affinity info state, target power state and requested power state for the
526  * current CPU and all its ancestor power domains to RUN.
527  *****************************************************************************/
528 void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl)
529 {
530 	unsigned int parent_idx, lvl;
531 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
532 
533 	/* Reset the local_state to RUN for the non cpu power domains. */
534 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
535 		set_non_cpu_pd_node_local_state(parent_idx,
536 				PSCI_LOCAL_STATE_RUN);
537 		psci_set_req_local_pwr_state(lvl,
538 					     cpu_idx,
539 					     PSCI_LOCAL_STATE_RUN);
540 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
541 	}
542 
543 	/* Set the affinity info state to ON */
544 	psci_set_aff_info_state(AFF_STATE_ON);
545 
546 	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
547 	psci_flush_cpu_data(psci_svc_cpu_data);
548 }
549 
550 /******************************************************************************
551  * This function is used in platform-coordinated mode.
552  *
553  * This function is passed the local power states requested for each power
554  * domain (state_info) between the current CPU domain and its ancestors until
555  * the target power level (end_pwrlvl). It updates the array of requested power
556  * states with this information.
557  *
558  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
559  * retrieves the states requested by all the cpus of which the power domain at
560  * that level is an ancestor. It passes this information to the platform to
561  * coordinate and return the target power state. If the target state for a level
562  * is RUN then subsequent levels are not considered. At the CPU level, state
563  * coordination is not required. Hence, the requested and the target states are
564  * the same.
565  *
566  * The 'state_info' is updated with the target state for each level between the
567  * CPU and the 'end_pwrlvl' and returned to the caller.
568  *
569  * This function will only be invoked with data cache enabled and while
570  * powering down a core.
571  *****************************************************************************/
572 void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
573 				psci_power_state_t *state_info)
574 {
575 	unsigned int lvl, parent_idx;
576 	unsigned int start_idx;
577 	unsigned int ncpus;
578 	plat_local_state_t target_state;
579 
580 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
581 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
582 
583 	/* For level 0, the requested state will be equivalent
584 	   to target state */
585 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
586 
587 		/* First update the requested power state */
588 		psci_set_req_local_pwr_state(lvl, cpu_idx,
589 					     state_info->pwr_domain_state[lvl]);
590 
591 		/* Get the requested power states for this power level */
592 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
593 		plat_local_state_t const *req_states = psci_get_req_local_pwr_states(lvl,
594 										start_idx);
595 
596 		/*
597 		 * Let the platform coordinate amongst the requested states at
598 		 * this power level and return the target local power state.
599 		 */
600 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
601 		target_state = plat_get_target_pwr_state(lvl,
602 							 req_states,
603 							 ncpus);
604 
605 		state_info->pwr_domain_state[lvl] = target_state;
606 
607 		/* Break early if the negotiated target power state is RUN */
608 		if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) {
609 			break;
610 		}
611 
612 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
613 	}
614 
615 	/*
616 	 * This is for cases when we break out of the above loop early because
617 	 * the target power state is RUN at a power level < end_pwlvl.
618 	 * We update the requested power state from state_info and then
619 	 * set the target state as RUN.
620 	 */
621 	for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
622 		psci_set_req_local_pwr_state(lvl, cpu_idx,
623 					     state_info->pwr_domain_state[lvl]);
624 		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
625 
626 	}
627 }
628 
629 #if PSCI_OS_INIT_MODE
630 /******************************************************************************
631  * This function is used in OS-initiated mode.
632  *
633  * This function is passed the local power states requested for each power
634  * domain (state_info) between the current CPU domain and its ancestors until
635  * the target power level (end_pwrlvl), and ensures the requested power states
636  * are valid. It updates the array of requested power states with this
637  * information.
638  *
639  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
640  * retrieves the states requested by all the cpus of which the power domain at
641  * that level is an ancestor. It passes this information to the platform to
642  * coordinate and return the target power state. If the requested state does
643  * not match the target state, the request is denied.
644  *
645  * The 'state_info' is not modified.
646  *
647  * This function will only be invoked with data cache enabled and while
648  * powering down a core.
649  *****************************************************************************/
650 int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
651 				     psci_power_state_t *state_info)
652 {
653 	int rc = PSCI_E_SUCCESS;
654 	unsigned int lvl, parent_idx;
655 	unsigned int start_idx;
656 	unsigned int ncpus;
657 	plat_local_state_t target_state, *req_states;
658 	plat_local_state_t prev[PLAT_MAX_PWR_LVL];
659 
660 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
661 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
662 
663 	/*
664 	 * Save a copy of the previous requested local power states and update
665 	 * the new requested local power states.
666 	 */
667 	psci_update_req_local_pwr_states(end_pwrlvl, cpu_idx, state_info, prev);
668 
669 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
670 		/* Get the requested power states for this power level */
671 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
672 		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
673 
674 		/*
675 		 * Let the platform coordinate amongst the requested states at
676 		 * this power level and return the target local power state.
677 		 */
678 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
679 		target_state = plat_get_target_pwr_state(lvl,
680 							 req_states,
681 							 ncpus);
682 
683 		/*
684 		 * Verify that the requested power state matches the target
685 		 * local power state.
686 		 */
687 		if (state_info->pwr_domain_state[lvl] != target_state) {
688 			if (target_state == PSCI_LOCAL_STATE_RUN) {
689 				rc = PSCI_E_DENIED;
690 			} else {
691 				rc = PSCI_E_INVALID_PARAMS;
692 			}
693 			goto exit;
694 		}
695 
696 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
697 	}
698 
699 	/*
700 	 * Verify that the current core is the last running core at the
701 	 * specified power level.
702 	 */
703 	lvl = state_info->last_at_pwrlvl;
704 	if (!psci_is_last_cpu_to_idle_at_pwrlvl(cpu_idx, lvl)) {
705 		rc = PSCI_E_DENIED;
706 	}
707 
708 exit:
709 	if (rc != PSCI_E_SUCCESS) {
710 		/* Restore the previous requested local power states. */
711 		psci_restore_req_local_pwr_states(cpu_idx, prev);
712 		return rc;
713 	}
714 
715 	return rc;
716 }
717 #endif
718 
719 /******************************************************************************
720  * This function validates a suspend request by making sure that if a standby
721  * state is requested then no power level is turned off and the highest power
722  * level is placed in a standby/retention state.
723  *
724  * It also ensures that the state level X will enter is not shallower than the
725  * state level X + 1 will enter.
726  *
727  * This validation will be enabled only for DEBUG builds as the platform is
728  * expected to perform these validations as well.
729  *****************************************************************************/
730 int psci_validate_suspend_req(const psci_power_state_t *state_info,
731 			      unsigned int is_power_down_state)
732 {
733 	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
734 	plat_local_state_t state;
735 	plat_local_state_type_t req_state_type, deepest_state_type;
736 	int i;
737 
738 	/* Find the target suspend power level */
739 	target_lvl = psci_find_target_suspend_lvl(state_info);
740 	if (target_lvl == PSCI_INVALID_PWR_LVL) {
741 		return PSCI_E_INVALID_PARAMS;
742 	}
743 
744 	/* All power domain levels are in a RUN state to begin with */
745 	deepest_state_type = STATE_TYPE_RUN;
746 
747 	for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
748 		state = state_info->pwr_domain_state[i];
749 		req_state_type = find_local_state_type(state);
750 
751 		/*
752 		 * While traversing from the highest power level to the lowest,
753 		 * the state requested for lower levels has to be the same or
754 		 * deeper i.e. equal to or greater than the state at the higher
755 		 * levels. If this condition is true, then the requested state
756 		 * becomes the deepest state encountered so far.
757 		 */
758 		if (req_state_type < deepest_state_type) {
759 			return PSCI_E_INVALID_PARAMS;
760 		}
761 		deepest_state_type = req_state_type;
762 	}
763 
764 	/* Find the highest off power level */
765 	max_off_lvl = psci_find_max_off_lvl(state_info);
766 
767 	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
768 	max_retn_lvl = PSCI_INVALID_PWR_LVL;
769 	if (target_lvl != max_off_lvl) {
770 		max_retn_lvl = target_lvl;
771 	}
772 
773 	/*
774 	 * If this is not a request for a power down state then max off level
775 	 * has to be invalid and max retention level has to be a valid power
776 	 * level.
777 	 */
778 	if ((is_power_down_state == 0U) &&
779 			((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
780 			 (max_retn_lvl == PSCI_INVALID_PWR_LVL))) {
781 		return PSCI_E_INVALID_PARAMS;
782 	}
783 
784 	return PSCI_E_SUCCESS;
785 }
786 
787 /******************************************************************************
788  * This function finds the highest power level which will be powered down
789  * amongst all the power levels specified in the 'state_info' structure
790  *****************************************************************************/
791 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
792 {
793 	int i;
794 
795 	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
796 		if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) {
797 			return (unsigned int) i;
798 		}
799 	}
800 
801 	return PSCI_INVALID_PWR_LVL;
802 }
803 
804 /******************************************************************************
805  * This functions finds the level of the highest power domain which will be
806  * placed in a low power state during a suspend operation.
807  *****************************************************************************/
808 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
809 {
810 	int i;
811 
812 	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
813 		if (is_local_state_run(state_info->pwr_domain_state[i]) == 0) {
814 			return (unsigned int) i;
815 		}
816 	}
817 
818 	return PSCI_INVALID_PWR_LVL;
819 }
820 
821 /*******************************************************************************
822  * This function is passed the highest level in the topology tree that the
823  * operation should be applied to and a list of node indexes. It picks up locks
824  * from the node index list in order of increasing power domain level in the
825  * range specified.
826  ******************************************************************************/
827 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
828 				   const unsigned int *parent_nodes)
829 {
830 	unsigned int parent_idx;
831 	unsigned int level;
832 
833 	/* No locking required for level 0. Hence start locking from level 1 */
834 	for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
835 		parent_idx = parent_nodes[level - 1U];
836 		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
837 	}
838 }
839 
840 /*******************************************************************************
841  * This function is passed the highest level in the topology tree that the
842  * operation should be applied to and a list of node indexes. It releases the
843  * locks in order of decreasing power domain level in the range specified.
844  ******************************************************************************/
845 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
846 				   const unsigned int *parent_nodes)
847 {
848 	unsigned int parent_idx;
849 	unsigned int level;
850 
851 	/* Unlock top down. No unlocking required for level 0. */
852 	for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
853 		parent_idx = parent_nodes[level - 1U];
854 		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
855 	}
856 }
857 
858 /*******************************************************************************
859  * This function determines the full entrypoint information for the requested
860  * PSCI entrypoint on power on/resume and returns it.
861  ******************************************************************************/
862 #ifdef __aarch64__
863 static int psci_get_ns_ep_info(entry_point_info_t *ep,
864 			       uintptr_t entrypoint,
865 			       u_register_t context_id)
866 {
867 	u_register_t ep_attr, sctlr;
868 	unsigned int daif, ee, mode;
869 	u_register_t ns_scr_el3 = read_scr_el3();
870 	u_register_t ns_sctlr_el1 = read_sctlr_el1();
871 
872 	sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
873 		read_sctlr_el2() : ns_sctlr_el1;
874 	ee = 0;
875 
876 	ep_attr = NON_SECURE | EP_ST_DISABLE;
877 	if ((sctlr & SCTLR_EE_BIT) != 0U) {
878 		ep_attr |= EP_EE_BIG;
879 		ee = 1;
880 	}
881 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
882 
883 	ep->pc = entrypoint;
884 	zeromem(&ep->args, sizeof(ep->args));
885 	ep->args.arg0 = context_id;
886 
887 	/*
888 	 * Figure out whether the cpu enters the non-secure address space
889 	 * in aarch32 or aarch64
890 	 */
891 	if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
892 
893 		/*
894 		 * Check whether a Thumb entry point has been provided for an
895 		 * aarch64 EL
896 		 */
897 		if ((entrypoint & 0x1UL) != 0UL) {
898 			return PSCI_E_INVALID_ADDRESS;
899 		}
900 
901 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
902 
903 		ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
904 				   DISABLE_ALL_EXCEPTIONS);
905 	} else {
906 
907 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
908 			MODE32_hyp : MODE32_svc;
909 
910 		/*
911 		 * TODO: Choose async. exception bits if HYP mode is not
912 		 * implemented according to the values of SCR.{AW, FW} bits
913 		 */
914 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
915 
916 		ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
917 				       daif);
918 	}
919 
920 	return PSCI_E_SUCCESS;
921 }
922 #else /* !__aarch64__ */
923 static int psci_get_ns_ep_info(entry_point_info_t *ep,
924 			       uintptr_t entrypoint,
925 			       u_register_t context_id)
926 {
927 	u_register_t ep_attr;
928 	unsigned int aif, ee, mode;
929 	u_register_t scr = read_scr();
930 	u_register_t ns_sctlr, sctlr;
931 
932 	/* Switch to non secure state */
933 	write_scr(scr | SCR_NS_BIT);
934 	isb();
935 	ns_sctlr = read_sctlr();
936 
937 	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
938 
939 	/* Return to original state */
940 	write_scr(scr);
941 	isb();
942 	ee = 0;
943 
944 	ep_attr = NON_SECURE | EP_ST_DISABLE;
945 	if (sctlr & SCTLR_EE_BIT) {
946 		ep_attr |= EP_EE_BIG;
947 		ee = 1;
948 	}
949 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
950 
951 	ep->pc = entrypoint;
952 	zeromem(&ep->args, sizeof(ep->args));
953 	ep->args.arg0 = context_id;
954 
955 	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
956 
957 	/*
958 	 * TODO: Choose async. exception bits if HYP mode is not
959 	 * implemented according to the values of SCR.{AW, FW} bits
960 	 */
961 	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
962 
963 	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
964 
965 	return PSCI_E_SUCCESS;
966 }
967 
968 #endif /* __aarch64__ */
969 
970 /*******************************************************************************
971  * This function validates the entrypoint with the platform layer if the
972  * appropriate pm_ops hook is exported by the platform and returns the
973  * 'entry_point_info'.
974  ******************************************************************************/
975 int psci_validate_entry_point(entry_point_info_t *ep,
976 			      uintptr_t entrypoint,
977 			      u_register_t context_id)
978 {
979 	int rc;
980 
981 	/* Validate the entrypoint using platform psci_ops */
982 	if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
983 		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
984 		if (rc != PSCI_E_SUCCESS) {
985 			return PSCI_E_INVALID_ADDRESS;
986 		}
987 	}
988 
989 	/*
990 	 * Verify and derive the re-entry information for
991 	 * the non-secure world from the non-secure state from
992 	 * where this call originated.
993 	 */
994 	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
995 	return rc;
996 }
997 
998 /*******************************************************************************
999  * Generic handler which is called when a cpu is physically powered on. It
1000  * traverses the node information and finds the highest power level powered
1001  * off and performs generic, architectural, platform setup and state management
1002  * to power on that power level and power levels below it.
1003  * e.g. For a cpu that's been powered on, it will call the platform specific
1004  * code to enable the gic cpu interface and for a cluster it will enable
1005  * coherency at the interconnect level in addition to gic cpu interface.
1006  ******************************************************************************/
1007 void psci_warmboot_entrypoint(void)
1008 {
1009 	unsigned int end_pwrlvl;
1010 	unsigned int cpu_idx = plat_my_core_pos();
1011 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1012 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
1013 
1014 #if FEATURE_DETECTION
1015 	/* Detect if features enabled during compilation are supported by PE. */
1016 	detect_arch_features(cpu_idx);
1017 #endif /* FEATURE_DETECTION */
1018 
1019 	/* Init registers that never change for the lifetime of TF-A */
1020 	cm_manage_extensions_el3(cpu_idx);
1021 
1022 	/*
1023 	 * Verify that we have been explicitly turned ON or resumed from
1024 	 * suspend.
1025 	 */
1026 	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
1027 		ERROR("Unexpected affinity info state.\n");
1028 		panic();
1029 	}
1030 
1031 	/*
1032 	 * Get the maximum power domain level to traverse to after this cpu
1033 	 * has been physically powered up.
1034 	 */
1035 	end_pwrlvl = get_power_on_target_pwrlvl();
1036 
1037 	/* Get the parent nodes */
1038 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
1039 
1040 	/*
1041 	 * This function acquires the lock corresponding to each power level so
1042 	 * that by the time all locks are taken, the system topology is snapshot
1043 	 * and state management can be done safely.
1044 	 */
1045 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
1046 
1047 	psci_get_target_local_pwr_states(cpu_idx, end_pwrlvl, &state_info);
1048 
1049 #if ENABLE_PSCI_STAT
1050 	plat_psci_stat_accounting_stop(&state_info);
1051 #endif
1052 
1053 	/*
1054 	 * This CPU could be resuming from suspend or it could have just been
1055 	 * turned on. To distinguish between these 2 cases, we examine the
1056 	 * affinity state of the CPU:
1057 	 *  - If the affinity state is ON_PENDING then it has just been
1058 	 *    turned on.
1059 	 *  - Else it is resuming from suspend.
1060 	 *
1061 	 * Depending on the type of warm reset identified, choose the right set
1062 	 * of power management handler and perform the generic, architecture
1063 	 * and platform specific handling.
1064 	 */
1065 	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) {
1066 		psci_cpu_on_finish(cpu_idx, &state_info);
1067 	} else {
1068 		unsigned int max_off_lvl = psci_find_max_off_lvl(&state_info);
1069 
1070 		assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
1071 		psci_cpu_suspend_to_powerdown_finish(cpu_idx, max_off_lvl, &state_info, false);
1072 	}
1073 
1074 	/*
1075 	 * Caches and (importantly) coherency are on so we can rely on seeing
1076 	 * whatever the primary gave us without explicit cache maintenance
1077 	 */
1078 	entry_point_info_t *ep = get_cpu_data(warmboot_ep_info);
1079 	cm_init_my_context(ep);
1080 
1081 	/*
1082 	 * Generic management: Now we just need to retrieve the
1083 	 * information that we had stashed away during the cpu_on
1084 	 * call to set this cpu on its way.
1085 	 */
1086 	cm_prepare_el3_exit_ns();
1087 
1088 	/*
1089 	 * Set the requested and target state of this CPU and all the higher
1090 	 * power domains which are ancestors of this CPU to run.
1091 	 */
1092 	psci_set_pwr_domains_to_run(cpu_idx, end_pwrlvl);
1093 
1094 #if ENABLE_PSCI_STAT
1095 	psci_stats_update_pwr_up(cpu_idx, end_pwrlvl, &state_info);
1096 #endif
1097 
1098 	/*
1099 	 * This loop releases the lock corresponding to each power level
1100 	 * in the reverse order to which they were acquired.
1101 	 */
1102 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
1103 }
1104 
1105 /*******************************************************************************
1106  * This function initializes the set of hooks that PSCI invokes as part of power
1107  * management operation. The power management hooks are expected to be provided
1108  * by the SPD, after it finishes all its initialization
1109  ******************************************************************************/
1110 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
1111 {
1112 	assert(pm != NULL);
1113 	psci_spd_pm = pm;
1114 
1115 	if (pm->svc_migrate != NULL) {
1116 		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
1117 	}
1118 
1119 	if (pm->svc_migrate_info != NULL) {
1120 		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
1121 				| define_psci_cap(PSCI_MIG_INFO_TYPE);
1122 	}
1123 }
1124 
1125 /*******************************************************************************
1126  * This function invokes the migrate info hook in the spd_pm_ops. It performs
1127  * the necessary return value validation. If the Secure Payload is UP and
1128  * migrate capable, it returns the mpidr of the CPU on which the Secure payload
1129  * is resident through the mpidr parameter. Else the value of the parameter on
1130  * return is undefined.
1131  ******************************************************************************/
1132 int psci_spd_migrate_info(u_register_t *mpidr)
1133 {
1134 	int rc;
1135 
1136 	if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL)) {
1137 		return PSCI_E_NOT_SUPPORTED;
1138 	}
1139 
1140 	rc = psci_spd_pm->svc_migrate_info(mpidr);
1141 
1142 	assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
1143 	       (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
1144 
1145 	return rc;
1146 }
1147 
1148 
1149 /*******************************************************************************
1150  * This function prints the state of all power domains present in the
1151  * system
1152  ******************************************************************************/
1153 void psci_print_power_domain_map(void)
1154 {
1155 #if LOG_LEVEL >= LOG_LEVEL_INFO
1156 	unsigned int idx;
1157 	plat_local_state_t state;
1158 	plat_local_state_type_t state_type;
1159 
1160 	/* This array maps to the PSCI_STATE_X definitions in psci.h */
1161 	static const char * const psci_state_type_str[] = {
1162 		"ON",
1163 		"RETENTION",
1164 		"OFF",
1165 	};
1166 
1167 	INFO("PSCI Power Domain Map:\n");
1168 	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
1169 							idx++) {
1170 		state_type = find_local_state_type(
1171 				psci_non_cpu_pd_nodes[idx].local_state);
1172 		INFO("  Domain Node : Level %u, parent_node %u,"
1173 				" State %s (0x%x)\n",
1174 				psci_non_cpu_pd_nodes[idx].level,
1175 				psci_non_cpu_pd_nodes[idx].parent_node,
1176 				psci_state_type_str[state_type],
1177 				psci_non_cpu_pd_nodes[idx].local_state);
1178 	}
1179 
1180 	for (idx = 0; idx < psci_plat_core_count; idx++) {
1181 		state = psci_get_cpu_local_state_by_idx(idx);
1182 		state_type = find_local_state_type(state);
1183 		INFO("  CPU Node : MPID 0x%llx, parent_node %u,"
1184 				" State %s (0x%x)\n",
1185 				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
1186 				psci_cpu_pd_nodes[idx].parent_node,
1187 				psci_state_type_str[state_type],
1188 				psci_get_cpu_local_state_by_idx(idx));
1189 	}
1190 #endif
1191 }
1192 
1193 /******************************************************************************
1194  * Return whether any secondaries were powered up with CPU_ON call. A CPU that
1195  * have ever been powered up would have set its MPDIR value to something other
1196  * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
1197  * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
1198  * meaningful only when called on the primary CPU during early boot.
1199  *****************************************************************************/
1200 int psci_secondaries_brought_up(void)
1201 {
1202 	unsigned int idx, n_valid = 0U;
1203 
1204 	for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
1205 		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR) {
1206 			n_valid++;
1207 		}
1208 	}
1209 
1210 	assert(n_valid > 0U);
1211 
1212 	return (n_valid > 1U) ? 1 : 0;
1213 }
1214 
1215 static u_register_t call_cpu_pwr_dwn(unsigned int power_level)
1216 {
1217 	struct cpu_ops *ops = get_cpu_data(cpu_ops_ptr);
1218 
1219 	/* Call the last available power down handler */
1220 	if (power_level > CPU_MAX_PWR_DWN_OPS - 1) {
1221 		power_level = CPU_MAX_PWR_DWN_OPS - 1;
1222 	}
1223 
1224 	assert(ops != NULL);
1225 	assert(ops->pwr_dwn_ops[power_level] != NULL);
1226 
1227 	return ops->pwr_dwn_ops[power_level]();
1228 }
1229 
1230 static void prepare_cpu_pwr_dwn(unsigned int power_level)
1231 {
1232 	/* ignore the return, all cpus should behave the same */
1233 	(void)call_cpu_pwr_dwn(power_level);
1234 }
1235 
1236 static void prepare_cpu_pwr_up(unsigned int power_level)
1237 {
1238 	/*
1239 	 * Call the pwr_dwn cpu hook again, indicating that an abandon happened.
1240 	 * The cpu driver is expected to clean up. We ask it to return
1241 	 * PABANDON_ACK to indicate that it has handled this. This is a
1242 	 * heuristic: the value has been chosen such that an unported CPU is
1243 	 * extremely unlikely to return this value.
1244 	 */
1245 	u_register_t ret = call_cpu_pwr_dwn(power_level);
1246 
1247 	/* unreachable on AArch32 so cast down to calm the compiler */
1248 	if (ret != (u_register_t) PABANDON_ACK) {
1249 		panic();
1250 	}
1251 }
1252 
1253 /*******************************************************************************
1254  * Initiate power down sequence, by calling power down operations registered for
1255  * this CPU.
1256  ******************************************************************************/
1257 void psci_pwrdown_cpu_start(unsigned int power_level)
1258 {
1259 #if ENABLE_RUNTIME_INSTRUMENTATION
1260 
1261 	/*
1262 	 * Flush cache line so that even if CPU power down happens
1263 	 * the timestamp update is reflected in memory.
1264 	 */
1265 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1266 		RT_INSTR_ENTER_CFLUSH,
1267 		PMF_CACHE_MAINT);
1268 #endif
1269 
1270 #if !HW_ASSISTED_COHERENCY
1271 	/*
1272 	 * Disable data caching and handle the stack's cache maintenance.
1273 	 *
1274 	 * If the core can't automatically exit coherency, the cpu driver needs
1275 	 * to flush caches and exit coherency. We can't do this with data caches
1276 	 * enabled. The cpu driver will decide which caches to flush based on
1277 	 * the power level.
1278 	 *
1279 	 * If automatic coherency management is possible, we can keep data
1280 	 * caches on until the very end and let hardware do cache maintenance.
1281 	 */
1282 	psci_do_pwrdown_cache_maintenance();
1283 #endif
1284 
1285 	/* Initiate the power down sequence by calling into the cpu driver. */
1286 	prepare_cpu_pwr_dwn(power_level);
1287 
1288 #if ENABLE_RUNTIME_INSTRUMENTATION
1289 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1290 		RT_INSTR_EXIT_CFLUSH,
1291 		PMF_NO_CACHE_MAINT);
1292 #endif
1293 }
1294 
1295 /*******************************************************************************
1296  * Finish a terminal power down sequence, ending with a wfi. In case of wakeup
1297  * will retry the sleep and panic if it persists.
1298  ******************************************************************************/
1299 void __dead2 psci_pwrdown_cpu_end_terminal(void)
1300 {
1301 #if ERRATA_SME_POWER_DOWN
1302 	/*
1303 	 * force SME off to not get power down rejected. Getting here is
1304 	 * terminal so we don't care if we lose context because of another
1305 	 * wakeup
1306 	 */
1307 	if (is_feat_sme_supported()) {
1308 		write_svcr(0);
1309 		isb();
1310 	}
1311 #endif /* ERRATA_SME_POWER_DOWN */
1312 
1313 	/* ensure write buffer empty */
1314 	dsbsy();
1315 
1316 	/*
1317 	 * Execute a wfi which, in most cases, will allow the power controller
1318 	 * to physically power down this cpu. Under some circumstances that may
1319 	 * be denied. Hopefully this is transient, retrying a few times should
1320 	 * power down.
1321 	 */
1322 	for (int i = 0; i < 32; i++)
1323 		wfi();
1324 
1325 	/* Wake up wasn't transient. System is probably in a bad state. */
1326 	ERROR("Could not power off CPU.\n");
1327 	panic();
1328 }
1329 
1330 /*******************************************************************************
1331  * Finish a non-terminal power down sequence, ending with a wfi. In case of
1332  * wakeup will unwind any CPU specific actions and return.
1333  ******************************************************************************/
1334 
1335 void psci_pwrdown_cpu_end_wakeup(unsigned int power_level)
1336 {
1337 	/* ensure write buffer empty */
1338 	dsbsy();
1339 
1340 	/*
1341 	 * Turn the core off. Usually, will be terminal. In some circumstances
1342 	 * the powerdown will be denied and we'll need to unwind.
1343 	 */
1344 	wfi();
1345 
1346 	/*
1347 	 * Waking up does not require hardware-assisted coherency, but that is
1348 	 * the case for every core that can wake up. Can either happen because
1349 	 * of errata or pabandon.
1350 	 */
1351 #if !defined(__aarch64__) || !HW_ASSISTED_COHERENCY
1352 	ERROR("AArch32 systems shouldn't wake up.\n");
1353 	panic();
1354 #endif
1355 	/*
1356 	 * Begin unwinding. Everything can be shared with CPU_ON and co later,
1357 	 * except the CPU specific bit. Cores that have hardware-assisted
1358 	 * coherency should be able to handle this.
1359 	 */
1360 	prepare_cpu_pwr_up(power_level);
1361 }
1362 
1363 /*******************************************************************************
1364  * This function invokes the callback 'stop_func()' with the 'mpidr' of each
1365  * online PE. Caller can pass suitable method to stop a remote core.
1366  *
1367  * 'wait_ms' is the timeout value in milliseconds for the other cores to
1368  * transition to power down state. Passing '0' makes it non-blocking.
1369  *
1370  * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
1371  * given timeout.
1372  ******************************************************************************/
1373 int psci_stop_other_cores(unsigned int this_cpu_idx, unsigned int wait_ms,
1374 				   void (*stop_func)(u_register_t mpidr))
1375 {
1376 	/* Invoke stop_func for each core */
1377 	for (unsigned int idx = 0U; idx < psci_plat_core_count; idx++) {
1378 		/* skip current CPU */
1379 		if (idx == this_cpu_idx) {
1380 			continue;
1381 		}
1382 
1383 		/* Check if the CPU is ON */
1384 		if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
1385 			(*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
1386 		}
1387 	}
1388 
1389 	/* Need to wait for other cores to shutdown */
1390 	if (wait_ms != 0U) {
1391 		for (uint32_t delay_ms = wait_ms; ((delay_ms != 0U) &&
1392 					(!psci_is_last_on_cpu(this_cpu_idx))); delay_ms--) {
1393 			mdelay(1U);
1394 		}
1395 
1396 		if (!psci_is_last_on_cpu(this_cpu_idx)) {
1397 			WARN("Failed to stop all cores!\n");
1398 			psci_print_power_domain_map();
1399 			return PSCI_E_DENIED;
1400 		}
1401 	}
1402 
1403 	return PSCI_E_SUCCESS;
1404 }
1405 
1406 /*******************************************************************************
1407  * This function verifies that all the other cores in the system have been
1408  * turned OFF and the current CPU is the last running CPU in the system.
1409  * Returns true if the current CPU is the last ON CPU or false otherwise.
1410  *
1411  * This API has following differences with psci_is_last_on_cpu
1412  *  1. PSCI states are locked
1413  ******************************************************************************/
1414 bool psci_is_last_on_cpu_safe(unsigned int this_core)
1415 {
1416 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1417 
1418 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1419 
1420 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1421 
1422 	if (!psci_is_last_on_cpu(this_core)) {
1423 		psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1424 		return false;
1425 	}
1426 
1427 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1428 
1429 	return true;
1430 }
1431 
1432 /*******************************************************************************
1433  * This function verifies that all cores in the system have been turned ON.
1434  * Returns true, if all CPUs are ON or false otherwise.
1435  *
1436  * This API has following differences with psci_are_all_cpus_on
1437  *  1. PSCI states are locked
1438  ******************************************************************************/
1439 bool psci_are_all_cpus_on_safe(unsigned int this_core)
1440 {
1441 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1442 
1443 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1444 
1445 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1446 
1447 	if (!psci_are_all_cpus_on()) {
1448 		psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1449 		return false;
1450 	}
1451 
1452 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1453 
1454 	return true;
1455 }
1456 
1457 /*******************************************************************************
1458  * Safely counts the number of CPUs in the system that are currently in the ON
1459  * or ON_PENDING state.
1460  *
1461  * This function acquires and releases the necessary power domain locks to
1462  * ensure consistency of the CPU state information.
1463  *
1464  * @param this_core The index of the current core making the query.
1465  *
1466  * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING.
1467  ******************************************************************************/
1468 unsigned int psci_num_cpus_running_on_safe(unsigned int this_core)
1469 {
1470 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1471 	unsigned int no_of_cpus;
1472 
1473 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1474 
1475 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1476 
1477 	no_of_cpus = psci_num_cpus_running();
1478 
1479 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1480 
1481 	return no_of_cpus;
1482 }
1483