xref: /rk3399_ARM-atf/lib/psci/psci_common.c (revision d335bbb1e20d4a8f0a6a26b97ba2a710015bf727)
1 /*
2  * Copyright (c) 2013-2025, Arm Limited and Contributors. All rights reserved.
3  *
4  * SPDX-License-Identifier: BSD-3-Clause
5  */
6 
7 #include <assert.h>
8 #include <string.h>
9 
10 #include <arch.h>
11 #include <arch_features.h>
12 #include <arch_helpers.h>
13 #include <common/bl_common.h>
14 #include <common/debug.h>
15 #include <context.h>
16 #include <drivers/delay_timer.h>
17 #include <lib/cpus/cpu_ops.h>
18 #include <lib/el3_runtime/context_mgmt.h>
19 #include <lib/extensions/spe.h>
20 #include <lib/pmf/pmf.h>
21 #include <lib/runtime_instr.h>
22 #include <lib/utils.h>
23 #include <plat/common/platform.h>
24 
25 #include "psci_private.h"
26 
27 /*
28  * SPD power management operations, expected to be supplied by the registered
29  * SPD on successful SP initialization
30  */
31 const spd_pm_ops_t *psci_spd_pm;
32 
33 /*
34  * PSCI requested local power state map. This array is used to store the local
35  * power states requested by a CPU for power levels from level 1 to
36  * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
37  * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
38  * CPU are the same.
39  *
40  * During state coordination, the platform is passed an array containing the
41  * local states requested for a particular non cpu power domain by each cpu
42  * within the domain.
43  *
44  * TODO: Dense packing of the requested states will cause cache thrashing
45  * when multiple power domains write to it. If we allocate the requested
46  * states at each power level in a cache-line aligned per-domain memory,
47  * the cache thrashing can be avoided.
48  */
49 static plat_local_state_t
50 	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
51 
52 unsigned int psci_plat_core_count;
53 
54 /*******************************************************************************
55  * Arrays that hold the platform's power domain tree information for state
56  * management of power domains.
57  * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
58  * which is an ancestor of a CPU power domain.
59  * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
60  ******************************************************************************/
61 non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
62 #if USE_COHERENT_MEM
63 __section(".tzfw_coherent_mem")
64 #endif
65 ;
66 
67 /* Lock for PSCI state coordination */
68 DEFINE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
69 
70 cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
71 
72 /*******************************************************************************
73  * Pointer to functions exported by the platform to complete power mgmt. ops
74  ******************************************************************************/
75 const plat_psci_ops_t *psci_plat_pm_ops;
76 
77 /******************************************************************************
78  * Check that the maximum power level supported by the platform makes sense
79  *****************************************************************************/
80 CASSERT((PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL) &&
81 	(PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL),
82 	assert_platform_max_pwrlvl_check);
83 
84 #if PSCI_OS_INIT_MODE
85 /*******************************************************************************
86  * The power state coordination mode used in CPU_SUSPEND.
87  * Defaults to platform-coordinated mode.
88  ******************************************************************************/
89 suspend_mode_t psci_suspend_mode = PLAT_COORD;
90 #endif
91 
92 /*
93  * The plat_local_state used by the platform is one of these types: RUN,
94  * RETENTION and OFF. The platform can define further sub-states for each type
95  * apart from RUN. This categorization is done to verify the sanity of the
96  * psci_power_state passed by the platform and to print debug information. The
97  * categorization is done on the basis of the following conditions:
98  *
99  * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
100  *
101  * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
102  *    STATE_TYPE_RETN.
103  *
104  * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
105  *    STATE_TYPE_OFF.
106  */
107 typedef enum plat_local_state_type {
108 	STATE_TYPE_RUN = 0,
109 	STATE_TYPE_RETN,
110 	STATE_TYPE_OFF
111 } plat_local_state_type_t;
112 
113 /* Function used to categorize plat_local_state. */
114 static plat_local_state_type_t find_local_state_type(plat_local_state_t state)
115 {
116 	if (state != 0U) {
117 		if (state > PLAT_MAX_RET_STATE) {
118 			return STATE_TYPE_OFF;
119 		} else {
120 			return STATE_TYPE_RETN;
121 		}
122 	} else {
123 		return STATE_TYPE_RUN;
124 	}
125 }
126 
127 /******************************************************************************
128  * Check that the maximum retention level supported by the platform is less
129  * than the maximum off level.
130  *****************************************************************************/
131 CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE,
132 		assert_platform_max_off_and_retn_state_check);
133 
134 /******************************************************************************
135  * This function ensures that the power state parameter in a CPU_SUSPEND request
136  * is valid. If so, it returns the requested states for each power level.
137  *****************************************************************************/
138 int psci_validate_power_state(unsigned int power_state,
139 			      psci_power_state_t *state_info)
140 {
141 	/* Check SBZ bits in power state are zero */
142 	if (psci_check_power_state(power_state) != 0U) {
143 		return PSCI_E_INVALID_PARAMS;
144 	}
145 	assert(psci_plat_pm_ops->validate_power_state != NULL);
146 
147 	/* Validate the power_state using platform pm_ops */
148 	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
149 }
150 
151 /******************************************************************************
152  * This function retrieves the `psci_power_state_t` for system suspend from
153  * the platform.
154  *****************************************************************************/
155 void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
156 {
157 	/*
158 	 * Assert that the required pm_ops hook is implemented to ensure that
159 	 * the capability detected during psci_setup() is valid.
160 	 */
161 	assert(psci_plat_pm_ops->get_sys_suspend_power_state != NULL);
162 
163 	/*
164 	 * Query the platform for the power_state required for system suspend
165 	 */
166 	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
167 }
168 
169 #if PSCI_OS_INIT_MODE
170 /*******************************************************************************
171  * This function verifies that all the other cores at the 'end_pwrlvl' have been
172  * idled and the current CPU is the last running CPU at the 'end_pwrlvl'.
173  * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
174  * otherwise.
175  ******************************************************************************/
176 static bool psci_is_last_cpu_to_idle_at_pwrlvl(unsigned int my_idx, unsigned int end_pwrlvl)
177 {
178 	unsigned int lvl;
179 	unsigned int parent_idx = 0;
180 	unsigned int cpu_start_idx, ncpus, cpu_idx;
181 	plat_local_state_t local_state;
182 
183 	if (end_pwrlvl == PSCI_CPU_PWR_LVL) {
184 		return true;
185 	}
186 
187 	parent_idx = psci_cpu_pd_nodes[my_idx].parent_node;
188 	for (lvl = PSCI_CPU_PWR_LVL + U(1); lvl < end_pwrlvl; lvl++) {
189 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
190 	}
191 
192 	cpu_start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
193 	ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
194 
195 	for (cpu_idx = cpu_start_idx; cpu_idx < cpu_start_idx + ncpus;
196 			cpu_idx++) {
197 		local_state = psci_get_cpu_local_state_by_idx(cpu_idx);
198 		if (cpu_idx == my_idx) {
199 			assert(is_local_state_run(local_state) != 0);
200 			continue;
201 		}
202 
203 		if (is_local_state_run(local_state) != 0) {
204 			return false;
205 		}
206 	}
207 
208 	return true;
209 }
210 #endif
211 
212 /*******************************************************************************
213  * This function verifies that all the other cores in the system have been
214  * turned OFF and the current CPU is the last running CPU in the system.
215  * Returns true, if the current CPU is the last ON CPU or false otherwise.
216  ******************************************************************************/
217 bool psci_is_last_on_cpu(unsigned int my_idx)
218 {
219 	for (unsigned int cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
220 		if (cpu_idx == my_idx) {
221 			assert(psci_get_aff_info_state() == AFF_STATE_ON);
222 			continue;
223 		}
224 
225 		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF) {
226 			VERBOSE("core=%u other than current core=%u %s\n",
227 				cpu_idx, my_idx, "running in the system");
228 			return false;
229 		}
230 	}
231 
232 	return true;
233 }
234 
235 /*******************************************************************************
236  * This function verifies that all cores in the system have been turned ON.
237  * Returns true, if all CPUs are ON or false otherwise.
238  ******************************************************************************/
239 static bool psci_are_all_cpus_on(void)
240 {
241 	unsigned int cpu_idx;
242 
243 	for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
244 		if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_OFF) {
245 			return false;
246 		}
247 	}
248 
249 	return true;
250 }
251 
252 /*******************************************************************************
253  * Counts the number of CPUs in the system that are currently in the ON or
254  * ON_PENDING state.
255  *
256  * @note This function does not acquire any power domain locks. It must only be
257  *       called in contexts where it is guaranteed that PSCI state transitions
258  *       are not concurrently happening, or where locks are already held.
259  *
260  * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING.
261  ******************************************************************************/
262 static unsigned int psci_num_cpus_running(void)
263 {
264 	unsigned int cpu_idx;
265 	unsigned int no_of_cpus = 0U;
266 	aff_info_state_t aff_state;
267 
268 	for (cpu_idx = 0U; cpu_idx < psci_plat_core_count; cpu_idx++) {
269 		aff_state = psci_get_aff_info_state_by_idx(cpu_idx);
270 		if (aff_state == AFF_STATE_ON ||
271 		    aff_state == AFF_STATE_ON_PENDING) {
272 			no_of_cpus++;
273 		}
274 	}
275 
276 	return no_of_cpus;
277 }
278 
279 /*******************************************************************************
280  * Routine to return the maximum power level to traverse to after a cpu has
281  * been physically powered up. It is expected to be called immediately after
282  * reset from assembler code.
283  ******************************************************************************/
284 static unsigned int get_power_on_target_pwrlvl(void)
285 {
286 	unsigned int pwrlvl;
287 
288 	/*
289 	 * Assume that this cpu was suspended and retrieve its target power
290 	 * level. If it wasn't, the cpu is off so this will be PLAT_MAX_PWR_LVL.
291 	 */
292 	pwrlvl = psci_get_suspend_pwrlvl();
293 	assert(pwrlvl < PSCI_INVALID_PWR_LVL);
294 	return pwrlvl;
295 }
296 
297 /******************************************************************************
298  * Helper function to update the requested local power state array. This array
299  * does not store the requested state for the CPU power level. Hence an
300  * assertion is added to prevent us from accessing the CPU power level.
301  *****************************************************************************/
302 static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
303 					 unsigned int cpu_idx,
304 					 plat_local_state_t req_pwr_state)
305 {
306 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
307 	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
308 			(cpu_idx < psci_plat_core_count)) {
309 		psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx] = req_pwr_state;
310 	}
311 }
312 
313 /******************************************************************************
314  * This function initializes the psci_req_local_pwr_states.
315  *****************************************************************************/
316 void __init psci_init_req_local_pwr_states(void)
317 {
318 	/* Initialize the requested state of all non CPU power domains as OFF */
319 	unsigned int pwrlvl;
320 	unsigned int core;
321 
322 	for (pwrlvl = 0U; pwrlvl < PLAT_MAX_PWR_LVL; pwrlvl++) {
323 		for (core = 0; core < psci_plat_core_count; core++) {
324 			psci_req_local_pwr_states[pwrlvl][core] =
325 				PLAT_MAX_OFF_STATE;
326 		}
327 	}
328 }
329 
330 /******************************************************************************
331  * Helper function to return a reference to an array containing the local power
332  * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
333  * array will be the number of cpu power domains of which this power domain is
334  * an ancestor. These requested states will be used to determine a suitable
335  * target state for this power domain during psci state coordination. An
336  * assertion is added to prevent us from accessing the CPU power level.
337  *****************************************************************************/
338 static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
339 							 unsigned int cpu_idx)
340 {
341 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
342 
343 	if ((pwrlvl > PSCI_CPU_PWR_LVL) && (pwrlvl <= PLAT_MAX_PWR_LVL) &&
344 			(cpu_idx < psci_plat_core_count)) {
345 		return &psci_req_local_pwr_states[pwrlvl - 1U][cpu_idx];
346 	} else
347 		return NULL;
348 }
349 
350 #if PSCI_OS_INIT_MODE
351 /******************************************************************************
352  * Helper function to save a copy of the psci_req_local_pwr_states (prev) for a
353  * CPU (cpu_idx), and update psci_req_local_pwr_states with the new requested
354  * local power states (state_info).
355  *****************************************************************************/
356 void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
357 				      unsigned int cpu_idx,
358 				      psci_power_state_t *state_info,
359 				      plat_local_state_t *prev)
360 {
361 	unsigned int lvl;
362 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
363 	unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
364 #else
365 	unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
366 #endif
367 	plat_local_state_t req_state;
368 
369 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
370 		/* Save the previous requested local power state */
371 		prev[lvl - 1U] = *psci_get_req_local_pwr_states(lvl, cpu_idx);
372 
373 		/* Update the new requested local power state */
374 		if (lvl <= end_pwrlvl) {
375 			req_state = state_info->pwr_domain_state[lvl];
376 		} else {
377 			req_state = state_info->pwr_domain_state[end_pwrlvl];
378 		}
379 		psci_set_req_local_pwr_state(lvl, cpu_idx, req_state);
380 	}
381 }
382 
383 /******************************************************************************
384  * Helper function to restore the previously saved requested local power states
385  * (prev) for a CPU (cpu_idx) to psci_req_local_pwr_states.
386  *****************************************************************************/
387 void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
388 				       plat_local_state_t *prev)
389 {
390 	unsigned int lvl;
391 #ifdef PLAT_MAX_CPU_SUSPEND_PWR_LVL
392 	unsigned int max_pwrlvl = PLAT_MAX_CPU_SUSPEND_PWR_LVL;
393 #else
394 	unsigned int max_pwrlvl = PLAT_MAX_PWR_LVL;
395 #endif
396 
397 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= max_pwrlvl; lvl++) {
398 		/* Restore the previous requested local power state */
399 		psci_set_req_local_pwr_state(lvl, cpu_idx, prev[lvl - 1U]);
400 	}
401 }
402 #endif
403 
404 /*
405  * psci_non_cpu_pd_nodes can be placed either in normal memory or coherent
406  * memory.
407  *
408  * With !USE_COHERENT_MEM, psci_non_cpu_pd_nodes is placed in normal memory,
409  * it's accessed by both cached and non-cached participants. To serve the common
410  * minimum, perform a cache flush before read and after write so that non-cached
411  * participants operate on latest data in main memory.
412  *
413  * When USE_COHERENT_MEM is used, psci_non_cpu_pd_nodes is placed in coherent
414  * memory. With HW_ASSISTED_COHERENCY, all PSCI participants are cache-coherent.
415  * In both cases, no cache operations are required.
416  */
417 
418 /*
419  * Retrieve local state of non-CPU power domain node from a non-cached CPU,
420  * after any required cache maintenance operation.
421  */
422 static plat_local_state_t get_non_cpu_pd_node_local_state(
423 		unsigned int parent_idx)
424 {
425 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
426 	flush_dcache_range(
427 			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
428 			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
429 #endif
430 	return psci_non_cpu_pd_nodes[parent_idx].local_state;
431 }
432 
433 /*
434  * Update local state of non-CPU power domain node from a cached CPU; perform
435  * any required cache maintenance operation afterwards.
436  */
437 static void set_non_cpu_pd_node_local_state(unsigned int parent_idx,
438 		plat_local_state_t state)
439 {
440 	psci_non_cpu_pd_nodes[parent_idx].local_state = state;
441 #if !(USE_COHERENT_MEM || HW_ASSISTED_COHERENCY || WARMBOOT_ENABLE_DCACHE_EARLY)
442 	flush_dcache_range(
443 			(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
444 			sizeof(psci_non_cpu_pd_nodes[parent_idx]));
445 #endif
446 }
447 
448 /******************************************************************************
449  * Helper function to return the current local power state of each power domain
450  * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
451  * function will be called after a cpu is powered on to find the local state
452  * each power domain has emerged from.
453  *****************************************************************************/
454 void psci_get_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
455 				      psci_power_state_t *target_state)
456 {
457 	unsigned int parent_idx, lvl;
458 	plat_local_state_t *pd_state = target_state->pwr_domain_state;
459 
460 	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
461 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
462 
463 	/* Copy the local power state from node to state_info */
464 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
465 		pd_state[lvl] = get_non_cpu_pd_node_local_state(parent_idx);
466 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
467 	}
468 
469 	/* Set the the higher levels to RUN */
470 	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++) {
471 		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
472 	}
473 }
474 
475 /******************************************************************************
476  * Helper function to set the target local power state that each power domain
477  * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
478  * enter. This function will be called after coordination of requested power
479  * states has been done for each power level.
480  *****************************************************************************/
481 void psci_set_target_local_pwr_states(unsigned int cpu_idx, unsigned int end_pwrlvl,
482 				      const psci_power_state_t *target_state)
483 {
484 	unsigned int parent_idx, lvl;
485 	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
486 
487 	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
488 
489 	/*
490 	 * Need to flush as local_state might be accessed with Data Cache
491 	 * disabled during power on
492 	 */
493 	psci_flush_cpu_data(psci_svc_cpu_data.local_state);
494 
495 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
496 
497 	/* Copy the local_state from state_info */
498 	for (lvl = 1U; lvl <= end_pwrlvl; lvl++) {
499 		set_non_cpu_pd_node_local_state(parent_idx, pd_state[lvl]);
500 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
501 	}
502 }
503 
504 /*******************************************************************************
505  * PSCI helper function to get the parent nodes corresponding to a cpu_index.
506  ******************************************************************************/
507 void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
508 				      unsigned int end_lvl,
509 				      unsigned int *node_index)
510 {
511 	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
512 	unsigned int i;
513 	unsigned int *node = node_index;
514 
515 	for (i = PSCI_CPU_PWR_LVL + 1U; i <= end_lvl; i++) {
516 		*node = parent_node;
517 		node++;
518 		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
519 	}
520 }
521 
522 /******************************************************************************
523  * This function is invoked post CPU power up and initialization. It sets the
524  * affinity info state, target power state and requested power state for the
525  * current CPU and all its ancestor power domains to RUN.
526  *****************************************************************************/
527 void psci_set_pwr_domains_to_run(unsigned int cpu_idx, unsigned int end_pwrlvl)
528 {
529 	unsigned int parent_idx, lvl;
530 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
531 
532 	/* Reset the local_state to RUN for the non cpu power domains. */
533 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
534 		set_non_cpu_pd_node_local_state(parent_idx,
535 				PSCI_LOCAL_STATE_RUN);
536 		psci_set_req_local_pwr_state(lvl,
537 					     cpu_idx,
538 					     PSCI_LOCAL_STATE_RUN);
539 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
540 	}
541 
542 	/* Set the affinity info state to ON */
543 	psci_set_aff_info_state(AFF_STATE_ON);
544 
545 	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
546 	psci_flush_cpu_data(psci_svc_cpu_data);
547 }
548 
549 /******************************************************************************
550  * This function is used in platform-coordinated mode.
551  *
552  * This function is passed the local power states requested for each power
553  * domain (state_info) between the current CPU domain and its ancestors until
554  * the target power level (end_pwrlvl). It updates the array of requested power
555  * states with this information.
556  *
557  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
558  * retrieves the states requested by all the cpus of which the power domain at
559  * that level is an ancestor. It passes this information to the platform to
560  * coordinate and return the target power state. If the target state for a level
561  * is RUN then subsequent levels are not considered. At the CPU level, state
562  * coordination is not required. Hence, the requested and the target states are
563  * the same.
564  *
565  * The 'state_info' is updated with the target state for each level between the
566  * CPU and the 'end_pwrlvl' and returned to the caller.
567  *
568  * This function will only be invoked with data cache enabled and while
569  * powering down a core.
570  *****************************************************************************/
571 void psci_do_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
572 				psci_power_state_t *state_info)
573 {
574 	unsigned int lvl, parent_idx;
575 	unsigned int start_idx;
576 	unsigned int ncpus;
577 	plat_local_state_t target_state;
578 
579 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
580 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
581 
582 	/* For level 0, the requested state will be equivalent
583 	   to target state */
584 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
585 
586 		/* First update the requested power state */
587 		psci_set_req_local_pwr_state(lvl, cpu_idx,
588 					     state_info->pwr_domain_state[lvl]);
589 
590 		/* Get the requested power states for this power level */
591 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
592 		plat_local_state_t const *req_states = psci_get_req_local_pwr_states(lvl,
593 										start_idx);
594 
595 		/*
596 		 * Let the platform coordinate amongst the requested states at
597 		 * this power level and return the target local power state.
598 		 */
599 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
600 		target_state = plat_get_target_pwr_state(lvl,
601 							 req_states,
602 							 ncpus);
603 
604 		state_info->pwr_domain_state[lvl] = target_state;
605 
606 		/* Break early if the negotiated target power state is RUN */
607 		if (is_local_state_run(state_info->pwr_domain_state[lvl]) != 0) {
608 			break;
609 		}
610 
611 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
612 	}
613 
614 	/*
615 	 * This is for cases when we break out of the above loop early because
616 	 * the target power state is RUN at a power level < end_pwlvl.
617 	 * We update the requested power state from state_info and then
618 	 * set the target state as RUN.
619 	 */
620 	for (lvl = lvl + 1U; lvl <= end_pwrlvl; lvl++) {
621 		psci_set_req_local_pwr_state(lvl, cpu_idx,
622 					     state_info->pwr_domain_state[lvl]);
623 		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
624 
625 	}
626 }
627 
628 #if PSCI_OS_INIT_MODE
629 /******************************************************************************
630  * This function is used in OS-initiated mode.
631  *
632  * This function is passed the local power states requested for each power
633  * domain (state_info) between the current CPU domain and its ancestors until
634  * the target power level (end_pwrlvl), and ensures the requested power states
635  * are valid. It updates the array of requested power states with this
636  * information.
637  *
638  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
639  * retrieves the states requested by all the cpus of which the power domain at
640  * that level is an ancestor. It passes this information to the platform to
641  * coordinate and return the target power state. If the requested state does
642  * not match the target state, the request is denied.
643  *
644  * The 'state_info' is not modified.
645  *
646  * This function will only be invoked with data cache enabled and while
647  * powering down a core.
648  *****************************************************************************/
649 int psci_validate_state_coordination(unsigned int cpu_idx, unsigned int end_pwrlvl,
650 				     psci_power_state_t *state_info)
651 {
652 	int rc = PSCI_E_SUCCESS;
653 	unsigned int lvl, parent_idx;
654 	unsigned int start_idx;
655 	unsigned int ncpus;
656 	plat_local_state_t target_state, *req_states;
657 	plat_local_state_t prev[PLAT_MAX_PWR_LVL];
658 
659 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
660 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
661 
662 	/*
663 	 * Save a copy of the previous requested local power states and update
664 	 * the new requested local power states.
665 	 */
666 	psci_update_req_local_pwr_states(end_pwrlvl, cpu_idx, state_info, prev);
667 
668 	for (lvl = PSCI_CPU_PWR_LVL + 1U; lvl <= end_pwrlvl; lvl++) {
669 		/* Get the requested power states for this power level */
670 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
671 		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
672 
673 		/*
674 		 * Let the platform coordinate amongst the requested states at
675 		 * this power level and return the target local power state.
676 		 */
677 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
678 		target_state = plat_get_target_pwr_state(lvl,
679 							 req_states,
680 							 ncpus);
681 
682 		/*
683 		 * Verify that the requested power state matches the target
684 		 * local power state.
685 		 */
686 		if (state_info->pwr_domain_state[lvl] != target_state) {
687 			if (target_state == PSCI_LOCAL_STATE_RUN) {
688 				rc = PSCI_E_DENIED;
689 			} else {
690 				rc = PSCI_E_INVALID_PARAMS;
691 			}
692 			goto exit;
693 		}
694 
695 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
696 	}
697 
698 	/*
699 	 * Verify that the current core is the last running core at the
700 	 * specified power level.
701 	 */
702 	lvl = state_info->last_at_pwrlvl;
703 	if (!psci_is_last_cpu_to_idle_at_pwrlvl(cpu_idx, lvl)) {
704 		rc = PSCI_E_DENIED;
705 	}
706 
707 exit:
708 	if (rc != PSCI_E_SUCCESS) {
709 		/* Restore the previous requested local power states. */
710 		psci_restore_req_local_pwr_states(cpu_idx, prev);
711 		return rc;
712 	}
713 
714 	return rc;
715 }
716 #endif
717 
718 /******************************************************************************
719  * This function validates a suspend request by making sure that if a standby
720  * state is requested then no power level is turned off and the highest power
721  * level is placed in a standby/retention state.
722  *
723  * It also ensures that the state level X will enter is not shallower than the
724  * state level X + 1 will enter.
725  *
726  * This validation will be enabled only for DEBUG builds as the platform is
727  * expected to perform these validations as well.
728  *****************************************************************************/
729 int psci_validate_suspend_req(const psci_power_state_t *state_info,
730 			      unsigned int is_power_down_state)
731 {
732 	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
733 	plat_local_state_t state;
734 	plat_local_state_type_t req_state_type, deepest_state_type;
735 	int i;
736 
737 	/* Find the target suspend power level */
738 	target_lvl = psci_find_target_suspend_lvl(state_info);
739 	if (target_lvl == PSCI_INVALID_PWR_LVL)
740 		return PSCI_E_INVALID_PARAMS;
741 
742 	/* All power domain levels are in a RUN state to begin with */
743 	deepest_state_type = STATE_TYPE_RUN;
744 
745 	for (i = (int) target_lvl; i >= (int) PSCI_CPU_PWR_LVL; i--) {
746 		state = state_info->pwr_domain_state[i];
747 		req_state_type = find_local_state_type(state);
748 
749 		/*
750 		 * While traversing from the highest power level to the lowest,
751 		 * the state requested for lower levels has to be the same or
752 		 * deeper i.e. equal to or greater than the state at the higher
753 		 * levels. If this condition is true, then the requested state
754 		 * becomes the deepest state encountered so far.
755 		 */
756 		if (req_state_type < deepest_state_type)
757 			return PSCI_E_INVALID_PARAMS;
758 		deepest_state_type = req_state_type;
759 	}
760 
761 	/* Find the highest off power level */
762 	max_off_lvl = psci_find_max_off_lvl(state_info);
763 
764 	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
765 	max_retn_lvl = PSCI_INVALID_PWR_LVL;
766 	if (target_lvl != max_off_lvl)
767 		max_retn_lvl = target_lvl;
768 
769 	/*
770 	 * If this is not a request for a power down state then max off level
771 	 * has to be invalid and max retention level has to be a valid power
772 	 * level.
773 	 */
774 	if ((is_power_down_state == 0U) &&
775 			((max_off_lvl != PSCI_INVALID_PWR_LVL) ||
776 			 (max_retn_lvl == PSCI_INVALID_PWR_LVL)))
777 		return PSCI_E_INVALID_PARAMS;
778 
779 	return PSCI_E_SUCCESS;
780 }
781 
782 /******************************************************************************
783  * This function finds the highest power level which will be powered down
784  * amongst all the power levels specified in the 'state_info' structure
785  *****************************************************************************/
786 unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
787 {
788 	int i;
789 
790 	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
791 		if (is_local_state_off(state_info->pwr_domain_state[i]) != 0) {
792 			return (unsigned int) i;
793 		}
794 	}
795 
796 	return PSCI_INVALID_PWR_LVL;
797 }
798 
799 /******************************************************************************
800  * This functions finds the level of the highest power domain which will be
801  * placed in a low power state during a suspend operation.
802  *****************************************************************************/
803 unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
804 {
805 	int i;
806 
807 	for (i = (int) PLAT_MAX_PWR_LVL; i >= (int) PSCI_CPU_PWR_LVL; i--) {
808 		if (is_local_state_run(state_info->pwr_domain_state[i]) == 0)
809 			return (unsigned int) i;
810 	}
811 
812 	return PSCI_INVALID_PWR_LVL;
813 }
814 
815 /*******************************************************************************
816  * This function is passed the highest level in the topology tree that the
817  * operation should be applied to and a list of node indexes. It picks up locks
818  * from the node index list in order of increasing power domain level in the
819  * range specified.
820  ******************************************************************************/
821 void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
822 				   const unsigned int *parent_nodes)
823 {
824 	unsigned int parent_idx;
825 	unsigned int level;
826 
827 	/* No locking required for level 0. Hence start locking from level 1 */
828 	for (level = PSCI_CPU_PWR_LVL + 1U; level <= end_pwrlvl; level++) {
829 		parent_idx = parent_nodes[level - 1U];
830 		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
831 	}
832 }
833 
834 /*******************************************************************************
835  * This function is passed the highest level in the topology tree that the
836  * operation should be applied to and a list of node indexes. It releases the
837  * locks in order of decreasing power domain level in the range specified.
838  ******************************************************************************/
839 void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
840 				   const unsigned int *parent_nodes)
841 {
842 	unsigned int parent_idx;
843 	unsigned int level;
844 
845 	/* Unlock top down. No unlocking required for level 0. */
846 	for (level = end_pwrlvl; level >= (PSCI_CPU_PWR_LVL + 1U); level--) {
847 		parent_idx = parent_nodes[level - 1U];
848 		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
849 	}
850 }
851 
852 /*******************************************************************************
853  * This function determines the full entrypoint information for the requested
854  * PSCI entrypoint on power on/resume and returns it.
855  ******************************************************************************/
856 #ifdef __aarch64__
857 static int psci_get_ns_ep_info(entry_point_info_t *ep,
858 			       uintptr_t entrypoint,
859 			       u_register_t context_id)
860 {
861 	u_register_t ep_attr, sctlr;
862 	unsigned int daif, ee, mode;
863 	u_register_t ns_scr_el3 = read_scr_el3();
864 	u_register_t ns_sctlr_el1 = read_sctlr_el1();
865 
866 	sctlr = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
867 		read_sctlr_el2() : ns_sctlr_el1;
868 	ee = 0;
869 
870 	ep_attr = NON_SECURE | EP_ST_DISABLE;
871 	if ((sctlr & SCTLR_EE_BIT) != 0U) {
872 		ep_attr |= EP_EE_BIG;
873 		ee = 1;
874 	}
875 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
876 
877 	ep->pc = entrypoint;
878 	zeromem(&ep->args, sizeof(ep->args));
879 	ep->args.arg0 = context_id;
880 
881 	/*
882 	 * Figure out whether the cpu enters the non-secure address space
883 	 * in aarch32 or aarch64
884 	 */
885 	if ((ns_scr_el3 & SCR_RW_BIT) != 0U) {
886 
887 		/*
888 		 * Check whether a Thumb entry point has been provided for an
889 		 * aarch64 EL
890 		 */
891 		if ((entrypoint & 0x1UL) != 0UL)
892 			return PSCI_E_INVALID_ADDRESS;
893 
894 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ? MODE_EL2 : MODE_EL1;
895 
896 		ep->spsr = SPSR_64((uint64_t)mode, MODE_SP_ELX,
897 				   DISABLE_ALL_EXCEPTIONS);
898 	} else {
899 
900 		mode = ((ns_scr_el3 & SCR_HCE_BIT) != 0U) ?
901 			MODE32_hyp : MODE32_svc;
902 
903 		/*
904 		 * TODO: Choose async. exception bits if HYP mode is not
905 		 * implemented according to the values of SCR.{AW, FW} bits
906 		 */
907 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
908 
909 		ep->spsr = SPSR_MODE32((uint64_t)mode, entrypoint & 0x1, ee,
910 				       daif);
911 	}
912 
913 	return PSCI_E_SUCCESS;
914 }
915 #else /* !__aarch64__ */
916 static int psci_get_ns_ep_info(entry_point_info_t *ep,
917 			       uintptr_t entrypoint,
918 			       u_register_t context_id)
919 {
920 	u_register_t ep_attr;
921 	unsigned int aif, ee, mode;
922 	u_register_t scr = read_scr();
923 	u_register_t ns_sctlr, sctlr;
924 
925 	/* Switch to non secure state */
926 	write_scr(scr | SCR_NS_BIT);
927 	isb();
928 	ns_sctlr = read_sctlr();
929 
930 	sctlr = scr & SCR_HCE_BIT ? read_hsctlr() : ns_sctlr;
931 
932 	/* Return to original state */
933 	write_scr(scr);
934 	isb();
935 	ee = 0;
936 
937 	ep_attr = NON_SECURE | EP_ST_DISABLE;
938 	if (sctlr & SCTLR_EE_BIT) {
939 		ep_attr |= EP_EE_BIG;
940 		ee = 1;
941 	}
942 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
943 
944 	ep->pc = entrypoint;
945 	zeromem(&ep->args, sizeof(ep->args));
946 	ep->args.arg0 = context_id;
947 
948 	mode = scr & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
949 
950 	/*
951 	 * TODO: Choose async. exception bits if HYP mode is not
952 	 * implemented according to the values of SCR.{AW, FW} bits
953 	 */
954 	aif = SPSR_ABT_BIT | SPSR_IRQ_BIT | SPSR_FIQ_BIT;
955 
956 	ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, aif);
957 
958 	return PSCI_E_SUCCESS;
959 }
960 
961 #endif /* __aarch64__ */
962 
963 /*******************************************************************************
964  * This function validates the entrypoint with the platform layer if the
965  * appropriate pm_ops hook is exported by the platform and returns the
966  * 'entry_point_info'.
967  ******************************************************************************/
968 int psci_validate_entry_point(entry_point_info_t *ep,
969 			      uintptr_t entrypoint,
970 			      u_register_t context_id)
971 {
972 	int rc;
973 
974 	/* Validate the entrypoint using platform psci_ops */
975 	if (psci_plat_pm_ops->validate_ns_entrypoint != NULL) {
976 		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
977 		if (rc != PSCI_E_SUCCESS) {
978 			return PSCI_E_INVALID_ADDRESS;
979 		}
980 	}
981 
982 	/*
983 	 * Verify and derive the re-entry information for
984 	 * the non-secure world from the non-secure state from
985 	 * where this call originated.
986 	 */
987 	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
988 	return rc;
989 }
990 
991 /*******************************************************************************
992  * Generic handler which is called when a cpu is physically powered on. It
993  * traverses the node information and finds the highest power level powered
994  * off and performs generic, architectural, platform setup and state management
995  * to power on that power level and power levels below it.
996  * e.g. For a cpu that's been powered on, it will call the platform specific
997  * code to enable the gic cpu interface and for a cluster it will enable
998  * coherency at the interconnect level in addition to gic cpu interface.
999  ******************************************************************************/
1000 void psci_warmboot_entrypoint(void)
1001 {
1002 	unsigned int end_pwrlvl;
1003 	unsigned int cpu_idx = plat_my_core_pos();
1004 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1005 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
1006 
1007 #if FEATURE_DETECTION
1008 	/* Detect if features enabled during compilation are supported by PE. */
1009 	detect_arch_features(cpu_idx);
1010 #endif /* FEATURE_DETECTION */
1011 
1012 	/* Init registers that never change for the lifetime of TF-A */
1013 	cm_manage_extensions_el3(cpu_idx);
1014 
1015 	/*
1016 	 * Verify that we have been explicitly turned ON or resumed from
1017 	 * suspend.
1018 	 */
1019 	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
1020 		ERROR("Unexpected affinity info state.\n");
1021 		panic();
1022 	}
1023 
1024 	/*
1025 	 * Get the maximum power domain level to traverse to after this cpu
1026 	 * has been physically powered up.
1027 	 */
1028 	end_pwrlvl = get_power_on_target_pwrlvl();
1029 
1030 	/* Get the parent nodes */
1031 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
1032 
1033 	/*
1034 	 * This function acquires the lock corresponding to each power level so
1035 	 * that by the time all locks are taken, the system topology is snapshot
1036 	 * and state management can be done safely.
1037 	 */
1038 	psci_acquire_pwr_domain_locks(end_pwrlvl, parent_nodes);
1039 
1040 	psci_get_target_local_pwr_states(cpu_idx, end_pwrlvl, &state_info);
1041 
1042 #if ENABLE_PSCI_STAT
1043 	plat_psci_stat_accounting_stop(&state_info);
1044 #endif
1045 
1046 	/*
1047 	 * This CPU could be resuming from suspend or it could have just been
1048 	 * turned on. To distinguish between these 2 cases, we examine the
1049 	 * affinity state of the CPU:
1050 	 *  - If the affinity state is ON_PENDING then it has just been
1051 	 *    turned on.
1052 	 *  - Else it is resuming from suspend.
1053 	 *
1054 	 * Depending on the type of warm reset identified, choose the right set
1055 	 * of power management handler and perform the generic, architecture
1056 	 * and platform specific handling.
1057 	 */
1058 	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING) {
1059 		psci_cpu_on_finish(cpu_idx, &state_info);
1060 	} else {
1061 		unsigned int max_off_lvl = psci_find_max_off_lvl(&state_info);
1062 
1063 		assert(max_off_lvl != PSCI_INVALID_PWR_LVL);
1064 		psci_cpu_suspend_to_powerdown_finish(cpu_idx, max_off_lvl, &state_info, false);
1065 	}
1066 
1067 	/*
1068 	 * Caches and (importantly) coherency are on so we can rely on seeing
1069 	 * whatever the primary gave us without explicit cache maintenance
1070 	 */
1071 	entry_point_info_t *ep = get_cpu_data(warmboot_ep_info);
1072 	cm_init_my_context(ep);
1073 
1074 	/*
1075 	 * Generic management: Now we just need to retrieve the
1076 	 * information that we had stashed away during the cpu_on
1077 	 * call to set this cpu on its way.
1078 	 */
1079 	cm_prepare_el3_exit_ns();
1080 
1081 	/*
1082 	 * Set the requested and target state of this CPU and all the higher
1083 	 * power domains which are ancestors of this CPU to run.
1084 	 */
1085 	psci_set_pwr_domains_to_run(cpu_idx, end_pwrlvl);
1086 
1087 #if ENABLE_PSCI_STAT
1088 	psci_stats_update_pwr_up(cpu_idx, end_pwrlvl, &state_info);
1089 #endif
1090 
1091 	/*
1092 	 * This loop releases the lock corresponding to each power level
1093 	 * in the reverse order to which they were acquired.
1094 	 */
1095 	psci_release_pwr_domain_locks(end_pwrlvl, parent_nodes);
1096 }
1097 
1098 /*******************************************************************************
1099  * This function initializes the set of hooks that PSCI invokes as part of power
1100  * management operation. The power management hooks are expected to be provided
1101  * by the SPD, after it finishes all its initialization
1102  ******************************************************************************/
1103 void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
1104 {
1105 	assert(pm != NULL);
1106 	psci_spd_pm = pm;
1107 
1108 	if (pm->svc_migrate != NULL)
1109 		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
1110 
1111 	if (pm->svc_migrate_info != NULL)
1112 		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
1113 				| define_psci_cap(PSCI_MIG_INFO_TYPE);
1114 }
1115 
1116 /*******************************************************************************
1117  * This function invokes the migrate info hook in the spd_pm_ops. It performs
1118  * the necessary return value validation. If the Secure Payload is UP and
1119  * migrate capable, it returns the mpidr of the CPU on which the Secure payload
1120  * is resident through the mpidr parameter. Else the value of the parameter on
1121  * return is undefined.
1122  ******************************************************************************/
1123 int psci_spd_migrate_info(u_register_t *mpidr)
1124 {
1125 	int rc;
1126 
1127 	if ((psci_spd_pm == NULL) || (psci_spd_pm->svc_migrate_info == NULL))
1128 		return PSCI_E_NOT_SUPPORTED;
1129 
1130 	rc = psci_spd_pm->svc_migrate_info(mpidr);
1131 
1132 	assert((rc == PSCI_TOS_UP_MIG_CAP) || (rc == PSCI_TOS_NOT_UP_MIG_CAP) ||
1133 	       (rc == PSCI_TOS_NOT_PRESENT_MP) || (rc == PSCI_E_NOT_SUPPORTED));
1134 
1135 	return rc;
1136 }
1137 
1138 
1139 /*******************************************************************************
1140  * This function prints the state of all power domains present in the
1141  * system
1142  ******************************************************************************/
1143 void psci_print_power_domain_map(void)
1144 {
1145 #if LOG_LEVEL >= LOG_LEVEL_INFO
1146 	unsigned int idx;
1147 	plat_local_state_t state;
1148 	plat_local_state_type_t state_type;
1149 
1150 	/* This array maps to the PSCI_STATE_X definitions in psci.h */
1151 	static const char * const psci_state_type_str[] = {
1152 		"ON",
1153 		"RETENTION",
1154 		"OFF",
1155 	};
1156 
1157 	INFO("PSCI Power Domain Map:\n");
1158 	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - psci_plat_core_count);
1159 							idx++) {
1160 		state_type = find_local_state_type(
1161 				psci_non_cpu_pd_nodes[idx].local_state);
1162 		INFO("  Domain Node : Level %u, parent_node %u,"
1163 				" State %s (0x%x)\n",
1164 				psci_non_cpu_pd_nodes[idx].level,
1165 				psci_non_cpu_pd_nodes[idx].parent_node,
1166 				psci_state_type_str[state_type],
1167 				psci_non_cpu_pd_nodes[idx].local_state);
1168 	}
1169 
1170 	for (idx = 0; idx < psci_plat_core_count; idx++) {
1171 		state = psci_get_cpu_local_state_by_idx(idx);
1172 		state_type = find_local_state_type(state);
1173 		INFO("  CPU Node : MPID 0x%llx, parent_node %u,"
1174 				" State %s (0x%x)\n",
1175 				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
1176 				psci_cpu_pd_nodes[idx].parent_node,
1177 				psci_state_type_str[state_type],
1178 				psci_get_cpu_local_state_by_idx(idx));
1179 	}
1180 #endif
1181 }
1182 
1183 /******************************************************************************
1184  * Return whether any secondaries were powered up with CPU_ON call. A CPU that
1185  * have ever been powered up would have set its MPDIR value to something other
1186  * than PSCI_INVALID_MPIDR. Note that MPDIR isn't reset back to
1187  * PSCI_INVALID_MPIDR when a CPU is powered down later, so the return value is
1188  * meaningful only when called on the primary CPU during early boot.
1189  *****************************************************************************/
1190 int psci_secondaries_brought_up(void)
1191 {
1192 	unsigned int idx, n_valid = 0U;
1193 
1194 	for (idx = 0U; idx < ARRAY_SIZE(psci_cpu_pd_nodes); idx++) {
1195 		if (psci_cpu_pd_nodes[idx].mpidr != PSCI_INVALID_MPIDR)
1196 			n_valid++;
1197 	}
1198 
1199 	assert(n_valid > 0U);
1200 
1201 	return (n_valid > 1U) ? 1 : 0;
1202 }
1203 
1204 static u_register_t call_cpu_pwr_dwn(unsigned int power_level)
1205 {
1206 	struct cpu_ops *ops = get_cpu_data(cpu_ops_ptr);
1207 
1208 	/* Call the last available power down handler */
1209 	if (power_level > CPU_MAX_PWR_DWN_OPS - 1) {
1210 		power_level = CPU_MAX_PWR_DWN_OPS - 1;
1211 	}
1212 
1213 	assert(ops != NULL);
1214 	assert(ops->pwr_dwn_ops[power_level] != NULL);
1215 
1216 	return ops->pwr_dwn_ops[power_level]();
1217 }
1218 
1219 static void prepare_cpu_pwr_dwn(unsigned int power_level)
1220 {
1221 	/* ignore the return, all cpus should behave the same */
1222 	(void)call_cpu_pwr_dwn(power_level);
1223 }
1224 
1225 static void prepare_cpu_pwr_up(unsigned int power_level)
1226 {
1227 	/*
1228 	 * Call the pwr_dwn cpu hook again, indicating that an abandon happened.
1229 	 * The cpu driver is expected to clean up. We ask it to return
1230 	 * PABANDON_ACK to indicate that it has handled this. This is a
1231 	 * heuristic: the value has been chosen such that an unported CPU is
1232 	 * extremely unlikely to return this value.
1233 	 */
1234 	u_register_t ret = call_cpu_pwr_dwn(power_level);
1235 
1236 	/* unreachable on AArch32 so cast down to calm the compiler */
1237 	if (ret != (u_register_t) PABANDON_ACK) {
1238 		panic();
1239 	}
1240 }
1241 
1242 /*******************************************************************************
1243  * Initiate power down sequence, by calling power down operations registered for
1244  * this CPU.
1245  ******************************************************************************/
1246 void psci_pwrdown_cpu_start(unsigned int power_level)
1247 {
1248 #if ENABLE_RUNTIME_INSTRUMENTATION
1249 
1250 	/*
1251 	 * Flush cache line so that even if CPU power down happens
1252 	 * the timestamp update is reflected in memory.
1253 	 */
1254 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1255 		RT_INSTR_ENTER_CFLUSH,
1256 		PMF_CACHE_MAINT);
1257 #endif
1258 
1259 #if !HW_ASSISTED_COHERENCY
1260 	/*
1261 	 * Disable data caching and handle the stack's cache maintenance.
1262 	 *
1263 	 * If the core can't automatically exit coherency, the cpu driver needs
1264 	 * to flush caches and exit coherency. We can't do this with data caches
1265 	 * enabled. The cpu driver will decide which caches to flush based on
1266 	 * the power level.
1267 	 *
1268 	 * If automatic coherency management is possible, we can keep data
1269 	 * caches on until the very end and let hardware do cache maintenance.
1270 	 */
1271 	psci_do_pwrdown_cache_maintenance();
1272 #endif
1273 
1274 	/* Initiate the power down sequence by calling into the cpu driver. */
1275 	prepare_cpu_pwr_dwn(power_level);
1276 
1277 #if ENABLE_RUNTIME_INSTRUMENTATION
1278 	PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
1279 		RT_INSTR_EXIT_CFLUSH,
1280 		PMF_NO_CACHE_MAINT);
1281 #endif
1282 }
1283 
1284 /*******************************************************************************
1285  * Finish a terminal power down sequence, ending with a wfi. In case of wakeup
1286  * will retry the sleep and panic if it persists.
1287  ******************************************************************************/
1288 void __dead2 psci_pwrdown_cpu_end_terminal(void)
1289 {
1290 #if ERRATA_SME_POWER_DOWN
1291 	/*
1292 	 * force SME off to not get power down rejected. Getting here is
1293 	 * terminal so we don't care if we lose context because of another
1294 	 * wakeup
1295 	 */
1296 	if (is_feat_sme_supported()) {
1297 		write_svcr(0);
1298 		isb();
1299 	}
1300 #endif /* ERRATA_SME_POWER_DOWN */
1301 
1302 	/* ensure write buffer empty */
1303 	dsbsy();
1304 
1305 	/*
1306 	 * Execute a wfi which, in most cases, will allow the power controller
1307 	 * to physically power down this cpu. Under some circumstances that may
1308 	 * be denied. Hopefully this is transient, retrying a few times should
1309 	 * power down.
1310 	 */
1311 	for (int i = 0; i < 32; i++)
1312 		wfi();
1313 
1314 	/* Wake up wasn't transient. System is probably in a bad state. */
1315 	ERROR("Could not power off CPU.\n");
1316 	panic();
1317 }
1318 
1319 /*******************************************************************************
1320  * Finish a non-terminal power down sequence, ending with a wfi. In case of
1321  * wakeup will unwind any CPU specific actions and return.
1322  ******************************************************************************/
1323 
1324 void psci_pwrdown_cpu_end_wakeup(unsigned int power_level)
1325 {
1326 	/* ensure write buffer empty */
1327 	dsbsy();
1328 
1329 	/*
1330 	 * Turn the core off. Usually, will be terminal. In some circumstances
1331 	 * the powerdown will be denied and we'll need to unwind.
1332 	 */
1333 	wfi();
1334 
1335 	/*
1336 	 * Waking up does not require hardware-assisted coherency, but that is
1337 	 * the case for every core that can wake up. Can either happen because
1338 	 * of errata or pabandon.
1339 	 */
1340 #if !defined(__aarch64__) || !HW_ASSISTED_COHERENCY
1341 	ERROR("AArch32 systems shouldn't wake up.\n");
1342 	panic();
1343 #endif
1344 	/*
1345 	 * Begin unwinding. Everything can be shared with CPU_ON and co later,
1346 	 * except the CPU specific bit. Cores that have hardware-assisted
1347 	 * coherency should be able to handle this.
1348 	 */
1349 	prepare_cpu_pwr_up(power_level);
1350 }
1351 
1352 /*******************************************************************************
1353  * This function invokes the callback 'stop_func()' with the 'mpidr' of each
1354  * online PE. Caller can pass suitable method to stop a remote core.
1355  *
1356  * 'wait_ms' is the timeout value in milliseconds for the other cores to
1357  * transition to power down state. Passing '0' makes it non-blocking.
1358  *
1359  * The function returns 'PSCI_E_DENIED' if some cores failed to stop within the
1360  * given timeout.
1361  ******************************************************************************/
1362 int psci_stop_other_cores(unsigned int this_cpu_idx, unsigned int wait_ms,
1363 				   void (*stop_func)(u_register_t mpidr))
1364 {
1365 	/* Invoke stop_func for each core */
1366 	for (unsigned int idx = 0U; idx < psci_plat_core_count; idx++) {
1367 		/* skip current CPU */
1368 		if (idx == this_cpu_idx) {
1369 			continue;
1370 		}
1371 
1372 		/* Check if the CPU is ON */
1373 		if (psci_get_aff_info_state_by_idx(idx) == AFF_STATE_ON) {
1374 			(*stop_func)(psci_cpu_pd_nodes[idx].mpidr);
1375 		}
1376 	}
1377 
1378 	/* Need to wait for other cores to shutdown */
1379 	if (wait_ms != 0U) {
1380 		for (uint32_t delay_ms = wait_ms; ((delay_ms != 0U) &&
1381 					(!psci_is_last_on_cpu(this_cpu_idx))); delay_ms--) {
1382 			mdelay(1U);
1383 		}
1384 
1385 		if (!psci_is_last_on_cpu(this_cpu_idx)) {
1386 			WARN("Failed to stop all cores!\n");
1387 			psci_print_power_domain_map();
1388 			return PSCI_E_DENIED;
1389 		}
1390 	}
1391 
1392 	return PSCI_E_SUCCESS;
1393 }
1394 
1395 /*******************************************************************************
1396  * This function verifies that all the other cores in the system have been
1397  * turned OFF and the current CPU is the last running CPU in the system.
1398  * Returns true if the current CPU is the last ON CPU or false otherwise.
1399  *
1400  * This API has following differences with psci_is_last_on_cpu
1401  *  1. PSCI states are locked
1402  ******************************************************************************/
1403 bool psci_is_last_on_cpu_safe(unsigned int this_core)
1404 {
1405 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1406 
1407 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1408 
1409 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1410 
1411 	if (!psci_is_last_on_cpu(this_core)) {
1412 		psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1413 		return false;
1414 	}
1415 
1416 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1417 
1418 	return true;
1419 }
1420 
1421 /*******************************************************************************
1422  * This function verifies that all cores in the system have been turned ON.
1423  * Returns true, if all CPUs are ON or false otherwise.
1424  *
1425  * This API has following differences with psci_are_all_cpus_on
1426  *  1. PSCI states are locked
1427  ******************************************************************************/
1428 bool psci_are_all_cpus_on_safe(unsigned int this_core)
1429 {
1430 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1431 
1432 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1433 
1434 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1435 
1436 	if (!psci_are_all_cpus_on()) {
1437 		psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1438 		return false;
1439 	}
1440 
1441 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1442 
1443 	return true;
1444 }
1445 
1446 /*******************************************************************************
1447  * Safely counts the number of CPUs in the system that are currently in the ON
1448  * or ON_PENDING state.
1449  *
1450  * This function acquires and releases the necessary power domain locks to
1451  * ensure consistency of the CPU state information.
1452  *
1453  * @param this_core The index of the current core making the query.
1454  *
1455  * @return The number of CPUs currently in AFF_STATE_ON or AFF_STATE_ON_PENDING.
1456  ******************************************************************************/
1457 unsigned int psci_num_cpus_running_on_safe(unsigned int this_core)
1458 {
1459 	unsigned int parent_nodes[PLAT_MAX_PWR_LVL] = {0};
1460 	unsigned int no_of_cpus;
1461 
1462 	psci_get_parent_pwr_domain_nodes(this_core, PLAT_MAX_PWR_LVL, parent_nodes);
1463 
1464 	psci_acquire_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1465 
1466 	no_of_cpus = psci_num_cpus_running();
1467 
1468 	psci_release_pwr_domain_locks(PLAT_MAX_PWR_LVL, parent_nodes);
1469 
1470 	return no_of_cpus;
1471 }
1472