xref: /rk3399_ARM-atf/lib/psci/psci_common.c (revision 532ed6183868036e4a4f83cd7a71b93266a3bdb7)
1*532ed618SSoby Mathew /*
2*532ed618SSoby Mathew  * Copyright (c) 2013-2016, ARM Limited and Contributors. All rights reserved.
3*532ed618SSoby Mathew  *
4*532ed618SSoby Mathew  * Redistribution and use in source and binary forms, with or without
5*532ed618SSoby Mathew  * modification, are permitted provided that the following conditions are met:
6*532ed618SSoby Mathew  *
7*532ed618SSoby Mathew  * Redistributions of source code must retain the above copyright notice, this
8*532ed618SSoby Mathew  * list of conditions and the following disclaimer.
9*532ed618SSoby Mathew  *
10*532ed618SSoby Mathew  * Redistributions in binary form must reproduce the above copyright notice,
11*532ed618SSoby Mathew  * this list of conditions and the following disclaimer in the documentation
12*532ed618SSoby Mathew  * and/or other materials provided with the distribution.
13*532ed618SSoby Mathew  *
14*532ed618SSoby Mathew  * Neither the name of ARM nor the names of its contributors may be used
15*532ed618SSoby Mathew  * to endorse or promote products derived from this software without specific
16*532ed618SSoby Mathew  * prior written permission.
17*532ed618SSoby Mathew  *
18*532ed618SSoby Mathew  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19*532ed618SSoby Mathew  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20*532ed618SSoby Mathew  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21*532ed618SSoby Mathew  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22*532ed618SSoby Mathew  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23*532ed618SSoby Mathew  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24*532ed618SSoby Mathew  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25*532ed618SSoby Mathew  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26*532ed618SSoby Mathew  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27*532ed618SSoby Mathew  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28*532ed618SSoby Mathew  * POSSIBILITY OF SUCH DAMAGE.
29*532ed618SSoby Mathew  */
30*532ed618SSoby Mathew 
31*532ed618SSoby Mathew #include <arch.h>
32*532ed618SSoby Mathew #include <arch_helpers.h>
33*532ed618SSoby Mathew #include <assert.h>
34*532ed618SSoby Mathew #include <bl_common.h>
35*532ed618SSoby Mathew #include <context.h>
36*532ed618SSoby Mathew #include <context_mgmt.h>
37*532ed618SSoby Mathew #include <debug.h>
38*532ed618SSoby Mathew #include <platform.h>
39*532ed618SSoby Mathew #include <string.h>
40*532ed618SSoby Mathew #include "psci_private.h"
41*532ed618SSoby Mathew 
42*532ed618SSoby Mathew /*
43*532ed618SSoby Mathew  * SPD power management operations, expected to be supplied by the registered
44*532ed618SSoby Mathew  * SPD on successful SP initialization
45*532ed618SSoby Mathew  */
46*532ed618SSoby Mathew const spd_pm_ops_t *psci_spd_pm;
47*532ed618SSoby Mathew 
48*532ed618SSoby Mathew /*
49*532ed618SSoby Mathew  * PSCI requested local power state map. This array is used to store the local
50*532ed618SSoby Mathew  * power states requested by a CPU for power levels from level 1 to
51*532ed618SSoby Mathew  * PLAT_MAX_PWR_LVL. It does not store the requested local power state for power
52*532ed618SSoby Mathew  * level 0 (PSCI_CPU_PWR_LVL) as the requested and the target power state for a
53*532ed618SSoby Mathew  * CPU are the same.
54*532ed618SSoby Mathew  *
55*532ed618SSoby Mathew  * During state coordination, the platform is passed an array containing the
56*532ed618SSoby Mathew  * local states requested for a particular non cpu power domain by each cpu
57*532ed618SSoby Mathew  * within the domain.
58*532ed618SSoby Mathew  *
59*532ed618SSoby Mathew  * TODO: Dense packing of the requested states will cause cache thrashing
60*532ed618SSoby Mathew  * when multiple power domains write to it. If we allocate the requested
61*532ed618SSoby Mathew  * states at each power level in a cache-line aligned per-domain memory,
62*532ed618SSoby Mathew  * the cache thrashing can be avoided.
63*532ed618SSoby Mathew  */
64*532ed618SSoby Mathew static plat_local_state_t
65*532ed618SSoby Mathew 	psci_req_local_pwr_states[PLAT_MAX_PWR_LVL][PLATFORM_CORE_COUNT];
66*532ed618SSoby Mathew 
67*532ed618SSoby Mathew 
68*532ed618SSoby Mathew /*******************************************************************************
69*532ed618SSoby Mathew  * Arrays that hold the platform's power domain tree information for state
70*532ed618SSoby Mathew  * management of power domains.
71*532ed618SSoby Mathew  * Each node in the array 'psci_non_cpu_pd_nodes' corresponds to a power domain
72*532ed618SSoby Mathew  * which is an ancestor of a CPU power domain.
73*532ed618SSoby Mathew  * Each node in the array 'psci_cpu_pd_nodes' corresponds to a cpu power domain
74*532ed618SSoby Mathew  ******************************************************************************/
75*532ed618SSoby Mathew non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS]
76*532ed618SSoby Mathew #if USE_COHERENT_MEM
77*532ed618SSoby Mathew __section("tzfw_coherent_mem")
78*532ed618SSoby Mathew #endif
79*532ed618SSoby Mathew ;
80*532ed618SSoby Mathew 
81*532ed618SSoby Mathew DEFINE_BAKERY_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
82*532ed618SSoby Mathew 
83*532ed618SSoby Mathew cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
84*532ed618SSoby Mathew 
85*532ed618SSoby Mathew /*******************************************************************************
86*532ed618SSoby Mathew  * Pointer to functions exported by the platform to complete power mgmt. ops
87*532ed618SSoby Mathew  ******************************************************************************/
88*532ed618SSoby Mathew const plat_psci_ops_t *psci_plat_pm_ops;
89*532ed618SSoby Mathew 
90*532ed618SSoby Mathew /******************************************************************************
91*532ed618SSoby Mathew  * Check that the maximum power level supported by the platform makes sense
92*532ed618SSoby Mathew  *****************************************************************************/
93*532ed618SSoby Mathew CASSERT(PLAT_MAX_PWR_LVL <= PSCI_MAX_PWR_LVL && \
94*532ed618SSoby Mathew 		PLAT_MAX_PWR_LVL >= PSCI_CPU_PWR_LVL, \
95*532ed618SSoby Mathew 		assert_platform_max_pwrlvl_check);
96*532ed618SSoby Mathew 
97*532ed618SSoby Mathew /*
98*532ed618SSoby Mathew  * The plat_local_state used by the platform is one of these types: RUN,
99*532ed618SSoby Mathew  * RETENTION and OFF. The platform can define further sub-states for each type
100*532ed618SSoby Mathew  * apart from RUN. This categorization is done to verify the sanity of the
101*532ed618SSoby Mathew  * psci_power_state passed by the platform and to print debug information. The
102*532ed618SSoby Mathew  * categorization is done on the basis of the following conditions:
103*532ed618SSoby Mathew  *
104*532ed618SSoby Mathew  * 1. If (plat_local_state == 0) then the category is STATE_TYPE_RUN.
105*532ed618SSoby Mathew  *
106*532ed618SSoby Mathew  * 2. If (0 < plat_local_state <= PLAT_MAX_RET_STATE), then the category is
107*532ed618SSoby Mathew  *    STATE_TYPE_RETN.
108*532ed618SSoby Mathew  *
109*532ed618SSoby Mathew  * 3. If (plat_local_state > PLAT_MAX_RET_STATE), then the category is
110*532ed618SSoby Mathew  *    STATE_TYPE_OFF.
111*532ed618SSoby Mathew  */
112*532ed618SSoby Mathew typedef enum plat_local_state_type {
113*532ed618SSoby Mathew 	STATE_TYPE_RUN = 0,
114*532ed618SSoby Mathew 	STATE_TYPE_RETN,
115*532ed618SSoby Mathew 	STATE_TYPE_OFF
116*532ed618SSoby Mathew } plat_local_state_type_t;
117*532ed618SSoby Mathew 
118*532ed618SSoby Mathew /* The macro used to categorize plat_local_state. */
119*532ed618SSoby Mathew #define find_local_state_type(plat_local_state)					\
120*532ed618SSoby Mathew 		((plat_local_state) ? ((plat_local_state > PLAT_MAX_RET_STATE)	\
121*532ed618SSoby Mathew 		? STATE_TYPE_OFF : STATE_TYPE_RETN)				\
122*532ed618SSoby Mathew 		: STATE_TYPE_RUN)
123*532ed618SSoby Mathew 
124*532ed618SSoby Mathew /******************************************************************************
125*532ed618SSoby Mathew  * Check that the maximum retention level supported by the platform is less
126*532ed618SSoby Mathew  * than the maximum off level.
127*532ed618SSoby Mathew  *****************************************************************************/
128*532ed618SSoby Mathew CASSERT(PLAT_MAX_RET_STATE < PLAT_MAX_OFF_STATE, \
129*532ed618SSoby Mathew 		assert_platform_max_off_and_retn_state_check);
130*532ed618SSoby Mathew 
131*532ed618SSoby Mathew /******************************************************************************
132*532ed618SSoby Mathew  * This function ensures that the power state parameter in a CPU_SUSPEND request
133*532ed618SSoby Mathew  * is valid. If so, it returns the requested states for each power level.
134*532ed618SSoby Mathew  *****************************************************************************/
135*532ed618SSoby Mathew int psci_validate_power_state(unsigned int power_state,
136*532ed618SSoby Mathew 			      psci_power_state_t *state_info)
137*532ed618SSoby Mathew {
138*532ed618SSoby Mathew 	/* Check SBZ bits in power state are zero */
139*532ed618SSoby Mathew 	if (psci_check_power_state(power_state))
140*532ed618SSoby Mathew 		return PSCI_E_INVALID_PARAMS;
141*532ed618SSoby Mathew 
142*532ed618SSoby Mathew 	assert(psci_plat_pm_ops->validate_power_state);
143*532ed618SSoby Mathew 
144*532ed618SSoby Mathew 	/* Validate the power_state using platform pm_ops */
145*532ed618SSoby Mathew 	return psci_plat_pm_ops->validate_power_state(power_state, state_info);
146*532ed618SSoby Mathew }
147*532ed618SSoby Mathew 
148*532ed618SSoby Mathew /******************************************************************************
149*532ed618SSoby Mathew  * This function retrieves the `psci_power_state_t` for system suspend from
150*532ed618SSoby Mathew  * the platform.
151*532ed618SSoby Mathew  *****************************************************************************/
152*532ed618SSoby Mathew void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info)
153*532ed618SSoby Mathew {
154*532ed618SSoby Mathew 	/*
155*532ed618SSoby Mathew 	 * Assert that the required pm_ops hook is implemented to ensure that
156*532ed618SSoby Mathew 	 * the capability detected during psci_setup() is valid.
157*532ed618SSoby Mathew 	 */
158*532ed618SSoby Mathew 	assert(psci_plat_pm_ops->get_sys_suspend_power_state);
159*532ed618SSoby Mathew 
160*532ed618SSoby Mathew 	/*
161*532ed618SSoby Mathew 	 * Query the platform for the power_state required for system suspend
162*532ed618SSoby Mathew 	 */
163*532ed618SSoby Mathew 	psci_plat_pm_ops->get_sys_suspend_power_state(state_info);
164*532ed618SSoby Mathew }
165*532ed618SSoby Mathew 
166*532ed618SSoby Mathew /*******************************************************************************
167*532ed618SSoby Mathew  * This function verifies that the all the other cores in the system have been
168*532ed618SSoby Mathew  * turned OFF and the current CPU is the last running CPU in the system.
169*532ed618SSoby Mathew  * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
170*532ed618SSoby Mathew  * otherwise.
171*532ed618SSoby Mathew  ******************************************************************************/
172*532ed618SSoby Mathew unsigned int psci_is_last_on_cpu(void)
173*532ed618SSoby Mathew {
174*532ed618SSoby Mathew 	unsigned int cpu_idx, my_idx = plat_my_core_pos();
175*532ed618SSoby Mathew 
176*532ed618SSoby Mathew 	for (cpu_idx = 0; cpu_idx < PLATFORM_CORE_COUNT; cpu_idx++) {
177*532ed618SSoby Mathew 		if (cpu_idx == my_idx) {
178*532ed618SSoby Mathew 			assert(psci_get_aff_info_state() == AFF_STATE_ON);
179*532ed618SSoby Mathew 			continue;
180*532ed618SSoby Mathew 		}
181*532ed618SSoby Mathew 
182*532ed618SSoby Mathew 		if (psci_get_aff_info_state_by_idx(cpu_idx) != AFF_STATE_OFF)
183*532ed618SSoby Mathew 			return 0;
184*532ed618SSoby Mathew 	}
185*532ed618SSoby Mathew 
186*532ed618SSoby Mathew 	return 1;
187*532ed618SSoby Mathew }
188*532ed618SSoby Mathew 
189*532ed618SSoby Mathew /*******************************************************************************
190*532ed618SSoby Mathew  * Routine to return the maximum power level to traverse to after a cpu has
191*532ed618SSoby Mathew  * been physically powered up. It is expected to be called immediately after
192*532ed618SSoby Mathew  * reset from assembler code.
193*532ed618SSoby Mathew  ******************************************************************************/
194*532ed618SSoby Mathew static unsigned int get_power_on_target_pwrlvl(void)
195*532ed618SSoby Mathew {
196*532ed618SSoby Mathew 	unsigned int pwrlvl;
197*532ed618SSoby Mathew 
198*532ed618SSoby Mathew 	/*
199*532ed618SSoby Mathew 	 * Assume that this cpu was suspended and retrieve its target power
200*532ed618SSoby Mathew 	 * level. If it is invalid then it could only have been turned off
201*532ed618SSoby Mathew 	 * earlier. PLAT_MAX_PWR_LVL will be the highest power level a
202*532ed618SSoby Mathew 	 * cpu can be turned off to.
203*532ed618SSoby Mathew 	 */
204*532ed618SSoby Mathew 	pwrlvl = psci_get_suspend_pwrlvl();
205*532ed618SSoby Mathew 	if (pwrlvl == PSCI_INVALID_PWR_LVL)
206*532ed618SSoby Mathew 		pwrlvl = PLAT_MAX_PWR_LVL;
207*532ed618SSoby Mathew 	return pwrlvl;
208*532ed618SSoby Mathew }
209*532ed618SSoby Mathew 
210*532ed618SSoby Mathew /******************************************************************************
211*532ed618SSoby Mathew  * Helper function to update the requested local power state array. This array
212*532ed618SSoby Mathew  * does not store the requested state for the CPU power level. Hence an
213*532ed618SSoby Mathew  * assertion is added to prevent us from accessing the wrong index.
214*532ed618SSoby Mathew  *****************************************************************************/
215*532ed618SSoby Mathew static void psci_set_req_local_pwr_state(unsigned int pwrlvl,
216*532ed618SSoby Mathew 					 unsigned int cpu_idx,
217*532ed618SSoby Mathew 					 plat_local_state_t req_pwr_state)
218*532ed618SSoby Mathew {
219*532ed618SSoby Mathew 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
220*532ed618SSoby Mathew 	psci_req_local_pwr_states[pwrlvl - 1][cpu_idx] = req_pwr_state;
221*532ed618SSoby Mathew }
222*532ed618SSoby Mathew 
223*532ed618SSoby Mathew /******************************************************************************
224*532ed618SSoby Mathew  * This function initializes the psci_req_local_pwr_states.
225*532ed618SSoby Mathew  *****************************************************************************/
226*532ed618SSoby Mathew void psci_init_req_local_pwr_states(void)
227*532ed618SSoby Mathew {
228*532ed618SSoby Mathew 	/* Initialize the requested state of all non CPU power domains as OFF */
229*532ed618SSoby Mathew 	memset(&psci_req_local_pwr_states, PLAT_MAX_OFF_STATE,
230*532ed618SSoby Mathew 			sizeof(psci_req_local_pwr_states));
231*532ed618SSoby Mathew }
232*532ed618SSoby Mathew 
233*532ed618SSoby Mathew /******************************************************************************
234*532ed618SSoby Mathew  * Helper function to return a reference to an array containing the local power
235*532ed618SSoby Mathew  * states requested by each cpu for a power domain at 'pwrlvl'. The size of the
236*532ed618SSoby Mathew  * array will be the number of cpu power domains of which this power domain is
237*532ed618SSoby Mathew  * an ancestor. These requested states will be used to determine a suitable
238*532ed618SSoby Mathew  * target state for this power domain during psci state coordination. An
239*532ed618SSoby Mathew  * assertion is added to prevent us from accessing the CPU power level.
240*532ed618SSoby Mathew  *****************************************************************************/
241*532ed618SSoby Mathew static plat_local_state_t *psci_get_req_local_pwr_states(unsigned int pwrlvl,
242*532ed618SSoby Mathew 							 unsigned int cpu_idx)
243*532ed618SSoby Mathew {
244*532ed618SSoby Mathew 	assert(pwrlvl > PSCI_CPU_PWR_LVL);
245*532ed618SSoby Mathew 
246*532ed618SSoby Mathew 	return &psci_req_local_pwr_states[pwrlvl - 1][cpu_idx];
247*532ed618SSoby Mathew }
248*532ed618SSoby Mathew 
249*532ed618SSoby Mathew /******************************************************************************
250*532ed618SSoby Mathew  * Helper function to return the current local power state of each power domain
251*532ed618SSoby Mathew  * from the current cpu power domain to its ancestor at the 'end_pwrlvl'. This
252*532ed618SSoby Mathew  * function will be called after a cpu is powered on to find the local state
253*532ed618SSoby Mathew  * each power domain has emerged from.
254*532ed618SSoby Mathew  *****************************************************************************/
255*532ed618SSoby Mathew static void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
256*532ed618SSoby Mathew 					     psci_power_state_t *target_state)
257*532ed618SSoby Mathew {
258*532ed618SSoby Mathew 	unsigned int parent_idx, lvl;
259*532ed618SSoby Mathew 	plat_local_state_t *pd_state = target_state->pwr_domain_state;
260*532ed618SSoby Mathew 
261*532ed618SSoby Mathew 	pd_state[PSCI_CPU_PWR_LVL] = psci_get_cpu_local_state();
262*532ed618SSoby Mathew 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
263*532ed618SSoby Mathew 
264*532ed618SSoby Mathew 	/* Copy the local power state from node to state_info */
265*532ed618SSoby Mathew 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
266*532ed618SSoby Mathew #if !USE_COHERENT_MEM
267*532ed618SSoby Mathew 		/*
268*532ed618SSoby Mathew 		 * If using normal memory for psci_non_cpu_pd_nodes, we need
269*532ed618SSoby Mathew 		 * to flush before reading the local power state as another
270*532ed618SSoby Mathew 		 * cpu in the same power domain could have updated it and this
271*532ed618SSoby Mathew 		 * code runs before caches are enabled.
272*532ed618SSoby Mathew 		 */
273*532ed618SSoby Mathew 		flush_dcache_range(
274*532ed618SSoby Mathew 				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
275*532ed618SSoby Mathew 				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
276*532ed618SSoby Mathew #endif
277*532ed618SSoby Mathew 		pd_state[lvl] =	psci_non_cpu_pd_nodes[parent_idx].local_state;
278*532ed618SSoby Mathew 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
279*532ed618SSoby Mathew 	}
280*532ed618SSoby Mathew 
281*532ed618SSoby Mathew 	/* Set the the higher levels to RUN */
282*532ed618SSoby Mathew 	for (; lvl <= PLAT_MAX_PWR_LVL; lvl++)
283*532ed618SSoby Mathew 		target_state->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
284*532ed618SSoby Mathew }
285*532ed618SSoby Mathew 
286*532ed618SSoby Mathew /******************************************************************************
287*532ed618SSoby Mathew  * Helper function to set the target local power state that each power domain
288*532ed618SSoby Mathew  * from the current cpu power domain to its ancestor at the 'end_pwrlvl' will
289*532ed618SSoby Mathew  * enter. This function will be called after coordination of requested power
290*532ed618SSoby Mathew  * states has been done for each power level.
291*532ed618SSoby Mathew  *****************************************************************************/
292*532ed618SSoby Mathew static void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
293*532ed618SSoby Mathew 					const psci_power_state_t *target_state)
294*532ed618SSoby Mathew {
295*532ed618SSoby Mathew 	unsigned int parent_idx, lvl;
296*532ed618SSoby Mathew 	const plat_local_state_t *pd_state = target_state->pwr_domain_state;
297*532ed618SSoby Mathew 
298*532ed618SSoby Mathew 	psci_set_cpu_local_state(pd_state[PSCI_CPU_PWR_LVL]);
299*532ed618SSoby Mathew 
300*532ed618SSoby Mathew 	/*
301*532ed618SSoby Mathew 	 * Need to flush as local_state will be accessed with Data Cache
302*532ed618SSoby Mathew 	 * disabled during power on
303*532ed618SSoby Mathew 	 */
304*532ed618SSoby Mathew 	flush_cpu_data(psci_svc_cpu_data.local_state);
305*532ed618SSoby Mathew 
306*532ed618SSoby Mathew 	parent_idx = psci_cpu_pd_nodes[plat_my_core_pos()].parent_node;
307*532ed618SSoby Mathew 
308*532ed618SSoby Mathew 	/* Copy the local_state from state_info */
309*532ed618SSoby Mathew 	for (lvl = 1; lvl <= end_pwrlvl; lvl++) {
310*532ed618SSoby Mathew 		psci_non_cpu_pd_nodes[parent_idx].local_state =	pd_state[lvl];
311*532ed618SSoby Mathew #if !USE_COHERENT_MEM
312*532ed618SSoby Mathew 		flush_dcache_range(
313*532ed618SSoby Mathew 				(uintptr_t)&psci_non_cpu_pd_nodes[parent_idx],
314*532ed618SSoby Mathew 				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
315*532ed618SSoby Mathew #endif
316*532ed618SSoby Mathew 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
317*532ed618SSoby Mathew 	}
318*532ed618SSoby Mathew }
319*532ed618SSoby Mathew 
320*532ed618SSoby Mathew 
321*532ed618SSoby Mathew /*******************************************************************************
322*532ed618SSoby Mathew  * PSCI helper function to get the parent nodes corresponding to a cpu_index.
323*532ed618SSoby Mathew  ******************************************************************************/
324*532ed618SSoby Mathew void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
325*532ed618SSoby Mathew 				      unsigned int end_lvl,
326*532ed618SSoby Mathew 				      unsigned int node_index[])
327*532ed618SSoby Mathew {
328*532ed618SSoby Mathew 	unsigned int parent_node = psci_cpu_pd_nodes[cpu_idx].parent_node;
329*532ed618SSoby Mathew 	int i;
330*532ed618SSoby Mathew 
331*532ed618SSoby Mathew 	for (i = PSCI_CPU_PWR_LVL + 1; i <= end_lvl; i++) {
332*532ed618SSoby Mathew 		*node_index++ = parent_node;
333*532ed618SSoby Mathew 		parent_node = psci_non_cpu_pd_nodes[parent_node].parent_node;
334*532ed618SSoby Mathew 	}
335*532ed618SSoby Mathew }
336*532ed618SSoby Mathew 
337*532ed618SSoby Mathew /******************************************************************************
338*532ed618SSoby Mathew  * This function is invoked post CPU power up and initialization. It sets the
339*532ed618SSoby Mathew  * affinity info state, target power state and requested power state for the
340*532ed618SSoby Mathew  * current CPU and all its ancestor power domains to RUN.
341*532ed618SSoby Mathew  *****************************************************************************/
342*532ed618SSoby Mathew void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl)
343*532ed618SSoby Mathew {
344*532ed618SSoby Mathew 	unsigned int parent_idx, cpu_idx = plat_my_core_pos(), lvl;
345*532ed618SSoby Mathew 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
346*532ed618SSoby Mathew 
347*532ed618SSoby Mathew 	/* Reset the local_state to RUN for the non cpu power domains. */
348*532ed618SSoby Mathew 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
349*532ed618SSoby Mathew 		psci_non_cpu_pd_nodes[parent_idx].local_state =
350*532ed618SSoby Mathew 				PSCI_LOCAL_STATE_RUN;
351*532ed618SSoby Mathew #if !USE_COHERENT_MEM
352*532ed618SSoby Mathew 		flush_dcache_range(
353*532ed618SSoby Mathew 				(uintptr_t) &psci_non_cpu_pd_nodes[parent_idx],
354*532ed618SSoby Mathew 				sizeof(psci_non_cpu_pd_nodes[parent_idx]));
355*532ed618SSoby Mathew #endif
356*532ed618SSoby Mathew 		psci_set_req_local_pwr_state(lvl,
357*532ed618SSoby Mathew 					     cpu_idx,
358*532ed618SSoby Mathew 					     PSCI_LOCAL_STATE_RUN);
359*532ed618SSoby Mathew 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
360*532ed618SSoby Mathew 	}
361*532ed618SSoby Mathew 
362*532ed618SSoby Mathew 	/* Set the affinity info state to ON */
363*532ed618SSoby Mathew 	psci_set_aff_info_state(AFF_STATE_ON);
364*532ed618SSoby Mathew 
365*532ed618SSoby Mathew 	psci_set_cpu_local_state(PSCI_LOCAL_STATE_RUN);
366*532ed618SSoby Mathew 	flush_cpu_data(psci_svc_cpu_data);
367*532ed618SSoby Mathew }
368*532ed618SSoby Mathew 
369*532ed618SSoby Mathew /******************************************************************************
370*532ed618SSoby Mathew  * This function is passed the local power states requested for each power
371*532ed618SSoby Mathew  * domain (state_info) between the current CPU domain and its ancestors until
372*532ed618SSoby Mathew  * the target power level (end_pwrlvl). It updates the array of requested power
373*532ed618SSoby Mathew  * states with this information.
374*532ed618SSoby Mathew  *
375*532ed618SSoby Mathew  * Then, for each level (apart from the CPU level) until the 'end_pwrlvl', it
376*532ed618SSoby Mathew  * retrieves the states requested by all the cpus of which the power domain at
377*532ed618SSoby Mathew  * that level is an ancestor. It passes this information to the platform to
378*532ed618SSoby Mathew  * coordinate and return the target power state. If the target state for a level
379*532ed618SSoby Mathew  * is RUN then subsequent levels are not considered. At the CPU level, state
380*532ed618SSoby Mathew  * coordination is not required. Hence, the requested and the target states are
381*532ed618SSoby Mathew  * the same.
382*532ed618SSoby Mathew  *
383*532ed618SSoby Mathew  * The 'state_info' is updated with the target state for each level between the
384*532ed618SSoby Mathew  * CPU and the 'end_pwrlvl' and returned to the caller.
385*532ed618SSoby Mathew  *
386*532ed618SSoby Mathew  * This function will only be invoked with data cache enabled and while
387*532ed618SSoby Mathew  * powering down a core.
388*532ed618SSoby Mathew  *****************************************************************************/
389*532ed618SSoby Mathew void psci_do_state_coordination(unsigned int end_pwrlvl,
390*532ed618SSoby Mathew 				psci_power_state_t *state_info)
391*532ed618SSoby Mathew {
392*532ed618SSoby Mathew 	unsigned int lvl, parent_idx, cpu_idx = plat_my_core_pos();
393*532ed618SSoby Mathew 	unsigned int start_idx, ncpus;
394*532ed618SSoby Mathew 	plat_local_state_t target_state, *req_states;
395*532ed618SSoby Mathew 
396*532ed618SSoby Mathew 	assert(end_pwrlvl <= PLAT_MAX_PWR_LVL);
397*532ed618SSoby Mathew 	parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
398*532ed618SSoby Mathew 
399*532ed618SSoby Mathew 	/* For level 0, the requested state will be equivalent
400*532ed618SSoby Mathew 	   to target state */
401*532ed618SSoby Mathew 	for (lvl = PSCI_CPU_PWR_LVL + 1; lvl <= end_pwrlvl; lvl++) {
402*532ed618SSoby Mathew 
403*532ed618SSoby Mathew 		/* First update the requested power state */
404*532ed618SSoby Mathew 		psci_set_req_local_pwr_state(lvl, cpu_idx,
405*532ed618SSoby Mathew 					     state_info->pwr_domain_state[lvl]);
406*532ed618SSoby Mathew 
407*532ed618SSoby Mathew 		/* Get the requested power states for this power level */
408*532ed618SSoby Mathew 		start_idx = psci_non_cpu_pd_nodes[parent_idx].cpu_start_idx;
409*532ed618SSoby Mathew 		req_states = psci_get_req_local_pwr_states(lvl, start_idx);
410*532ed618SSoby Mathew 
411*532ed618SSoby Mathew 		/*
412*532ed618SSoby Mathew 		 * Let the platform coordinate amongst the requested states at
413*532ed618SSoby Mathew 		 * this power level and return the target local power state.
414*532ed618SSoby Mathew 		 */
415*532ed618SSoby Mathew 		ncpus = psci_non_cpu_pd_nodes[parent_idx].ncpus;
416*532ed618SSoby Mathew 		target_state = plat_get_target_pwr_state(lvl,
417*532ed618SSoby Mathew 							 req_states,
418*532ed618SSoby Mathew 							 ncpus);
419*532ed618SSoby Mathew 
420*532ed618SSoby Mathew 		state_info->pwr_domain_state[lvl] = target_state;
421*532ed618SSoby Mathew 
422*532ed618SSoby Mathew 		/* Break early if the negotiated target power state is RUN */
423*532ed618SSoby Mathew 		if (is_local_state_run(state_info->pwr_domain_state[lvl]))
424*532ed618SSoby Mathew 			break;
425*532ed618SSoby Mathew 
426*532ed618SSoby Mathew 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
427*532ed618SSoby Mathew 	}
428*532ed618SSoby Mathew 
429*532ed618SSoby Mathew 	/*
430*532ed618SSoby Mathew 	 * This is for cases when we break out of the above loop early because
431*532ed618SSoby Mathew 	 * the target power state is RUN at a power level < end_pwlvl.
432*532ed618SSoby Mathew 	 * We update the requested power state from state_info and then
433*532ed618SSoby Mathew 	 * set the target state as RUN.
434*532ed618SSoby Mathew 	 */
435*532ed618SSoby Mathew 	for (lvl = lvl + 1; lvl <= end_pwrlvl; lvl++) {
436*532ed618SSoby Mathew 		psci_set_req_local_pwr_state(lvl, cpu_idx,
437*532ed618SSoby Mathew 					     state_info->pwr_domain_state[lvl]);
438*532ed618SSoby Mathew 		state_info->pwr_domain_state[lvl] = PSCI_LOCAL_STATE_RUN;
439*532ed618SSoby Mathew 
440*532ed618SSoby Mathew 	}
441*532ed618SSoby Mathew 
442*532ed618SSoby Mathew 	/* Update the target state in the power domain nodes */
443*532ed618SSoby Mathew 	psci_set_target_local_pwr_states(end_pwrlvl, state_info);
444*532ed618SSoby Mathew }
445*532ed618SSoby Mathew 
446*532ed618SSoby Mathew /******************************************************************************
447*532ed618SSoby Mathew  * This function validates a suspend request by making sure that if a standby
448*532ed618SSoby Mathew  * state is requested then no power level is turned off and the highest power
449*532ed618SSoby Mathew  * level is placed in a standby/retention state.
450*532ed618SSoby Mathew  *
451*532ed618SSoby Mathew  * It also ensures that the state level X will enter is not shallower than the
452*532ed618SSoby Mathew  * state level X + 1 will enter.
453*532ed618SSoby Mathew  *
454*532ed618SSoby Mathew  * This validation will be enabled only for DEBUG builds as the platform is
455*532ed618SSoby Mathew  * expected to perform these validations as well.
456*532ed618SSoby Mathew  *****************************************************************************/
457*532ed618SSoby Mathew int psci_validate_suspend_req(const psci_power_state_t *state_info,
458*532ed618SSoby Mathew 			      unsigned int is_power_down_state)
459*532ed618SSoby Mathew {
460*532ed618SSoby Mathew 	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
461*532ed618SSoby Mathew 	plat_local_state_t state;
462*532ed618SSoby Mathew 	plat_local_state_type_t req_state_type, deepest_state_type;
463*532ed618SSoby Mathew 	int i;
464*532ed618SSoby Mathew 
465*532ed618SSoby Mathew 	/* Find the target suspend power level */
466*532ed618SSoby Mathew 	target_lvl = psci_find_target_suspend_lvl(state_info);
467*532ed618SSoby Mathew 	if (target_lvl == PSCI_INVALID_PWR_LVL)
468*532ed618SSoby Mathew 		return PSCI_E_INVALID_PARAMS;
469*532ed618SSoby Mathew 
470*532ed618SSoby Mathew 	/* All power domain levels are in a RUN state to begin with */
471*532ed618SSoby Mathew 	deepest_state_type = STATE_TYPE_RUN;
472*532ed618SSoby Mathew 
473*532ed618SSoby Mathew 	for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
474*532ed618SSoby Mathew 		state = state_info->pwr_domain_state[i];
475*532ed618SSoby Mathew 		req_state_type = find_local_state_type(state);
476*532ed618SSoby Mathew 
477*532ed618SSoby Mathew 		/*
478*532ed618SSoby Mathew 		 * While traversing from the highest power level to the lowest,
479*532ed618SSoby Mathew 		 * the state requested for lower levels has to be the same or
480*532ed618SSoby Mathew 		 * deeper i.e. equal to or greater than the state at the higher
481*532ed618SSoby Mathew 		 * levels. If this condition is true, then the requested state
482*532ed618SSoby Mathew 		 * becomes the deepest state encountered so far.
483*532ed618SSoby Mathew 		 */
484*532ed618SSoby Mathew 		if (req_state_type < deepest_state_type)
485*532ed618SSoby Mathew 			return PSCI_E_INVALID_PARAMS;
486*532ed618SSoby Mathew 		deepest_state_type = req_state_type;
487*532ed618SSoby Mathew 	}
488*532ed618SSoby Mathew 
489*532ed618SSoby Mathew 	/* Find the highest off power level */
490*532ed618SSoby Mathew 	max_off_lvl = psci_find_max_off_lvl(state_info);
491*532ed618SSoby Mathew 
492*532ed618SSoby Mathew 	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
493*532ed618SSoby Mathew 	max_retn_lvl = PSCI_INVALID_PWR_LVL;
494*532ed618SSoby Mathew 	if (target_lvl != max_off_lvl)
495*532ed618SSoby Mathew 		max_retn_lvl = target_lvl;
496*532ed618SSoby Mathew 
497*532ed618SSoby Mathew 	/*
498*532ed618SSoby Mathew 	 * If this is not a request for a power down state then max off level
499*532ed618SSoby Mathew 	 * has to be invalid and max retention level has to be a valid power
500*532ed618SSoby Mathew 	 * level.
501*532ed618SSoby Mathew 	 */
502*532ed618SSoby Mathew 	if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
503*532ed618SSoby Mathew 				    max_retn_lvl == PSCI_INVALID_PWR_LVL))
504*532ed618SSoby Mathew 		return PSCI_E_INVALID_PARAMS;
505*532ed618SSoby Mathew 
506*532ed618SSoby Mathew 	return PSCI_E_SUCCESS;
507*532ed618SSoby Mathew }
508*532ed618SSoby Mathew 
509*532ed618SSoby Mathew /******************************************************************************
510*532ed618SSoby Mathew  * This function finds the highest power level which will be powered down
511*532ed618SSoby Mathew  * amongst all the power levels specified in the 'state_info' structure
512*532ed618SSoby Mathew  *****************************************************************************/
513*532ed618SSoby Mathew unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info)
514*532ed618SSoby Mathew {
515*532ed618SSoby Mathew 	int i;
516*532ed618SSoby Mathew 
517*532ed618SSoby Mathew 	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
518*532ed618SSoby Mathew 		if (is_local_state_off(state_info->pwr_domain_state[i]))
519*532ed618SSoby Mathew 			return i;
520*532ed618SSoby Mathew 	}
521*532ed618SSoby Mathew 
522*532ed618SSoby Mathew 	return PSCI_INVALID_PWR_LVL;
523*532ed618SSoby Mathew }
524*532ed618SSoby Mathew 
525*532ed618SSoby Mathew /******************************************************************************
526*532ed618SSoby Mathew  * This functions finds the level of the highest power domain which will be
527*532ed618SSoby Mathew  * placed in a low power state during a suspend operation.
528*532ed618SSoby Mathew  *****************************************************************************/
529*532ed618SSoby Mathew unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info)
530*532ed618SSoby Mathew {
531*532ed618SSoby Mathew 	int i;
532*532ed618SSoby Mathew 
533*532ed618SSoby Mathew 	for (i = PLAT_MAX_PWR_LVL; i >= PSCI_CPU_PWR_LVL; i--) {
534*532ed618SSoby Mathew 		if (!is_local_state_run(state_info->pwr_domain_state[i]))
535*532ed618SSoby Mathew 			return i;
536*532ed618SSoby Mathew 	}
537*532ed618SSoby Mathew 
538*532ed618SSoby Mathew 	return PSCI_INVALID_PWR_LVL;
539*532ed618SSoby Mathew }
540*532ed618SSoby Mathew 
541*532ed618SSoby Mathew /*******************************************************************************
542*532ed618SSoby Mathew  * This function is passed a cpu_index and the highest level in the topology
543*532ed618SSoby Mathew  * tree that the operation should be applied to. It picks up locks in order of
544*532ed618SSoby Mathew  * increasing power domain level in the range specified.
545*532ed618SSoby Mathew  ******************************************************************************/
546*532ed618SSoby Mathew void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
547*532ed618SSoby Mathew 				   unsigned int cpu_idx)
548*532ed618SSoby Mathew {
549*532ed618SSoby Mathew 	unsigned int parent_idx = psci_cpu_pd_nodes[cpu_idx].parent_node;
550*532ed618SSoby Mathew 	unsigned int level;
551*532ed618SSoby Mathew 
552*532ed618SSoby Mathew 	/* No locking required for level 0. Hence start locking from level 1 */
553*532ed618SSoby Mathew 	for (level = PSCI_CPU_PWR_LVL + 1; level <= end_pwrlvl; level++) {
554*532ed618SSoby Mathew 		psci_lock_get(&psci_non_cpu_pd_nodes[parent_idx]);
555*532ed618SSoby Mathew 		parent_idx = psci_non_cpu_pd_nodes[parent_idx].parent_node;
556*532ed618SSoby Mathew 	}
557*532ed618SSoby Mathew }
558*532ed618SSoby Mathew 
559*532ed618SSoby Mathew /*******************************************************************************
560*532ed618SSoby Mathew  * This function is passed a cpu_index and the highest level in the topology
561*532ed618SSoby Mathew  * tree that the operation should be applied to. It releases the locks in order
562*532ed618SSoby Mathew  * of decreasing power domain level in the range specified.
563*532ed618SSoby Mathew  ******************************************************************************/
564*532ed618SSoby Mathew void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
565*532ed618SSoby Mathew 				   unsigned int cpu_idx)
566*532ed618SSoby Mathew {
567*532ed618SSoby Mathew 	unsigned int parent_idx, parent_nodes[PLAT_MAX_PWR_LVL] = {0};
568*532ed618SSoby Mathew 	int level;
569*532ed618SSoby Mathew 
570*532ed618SSoby Mathew 	/* Get the parent nodes */
571*532ed618SSoby Mathew 	psci_get_parent_pwr_domain_nodes(cpu_idx, end_pwrlvl, parent_nodes);
572*532ed618SSoby Mathew 
573*532ed618SSoby Mathew 	/* Unlock top down. No unlocking required for level 0. */
574*532ed618SSoby Mathew 	for (level = end_pwrlvl; level >= PSCI_CPU_PWR_LVL + 1; level--) {
575*532ed618SSoby Mathew 		parent_idx = parent_nodes[level - 1];
576*532ed618SSoby Mathew 		psci_lock_release(&psci_non_cpu_pd_nodes[parent_idx]);
577*532ed618SSoby Mathew 	}
578*532ed618SSoby Mathew }
579*532ed618SSoby Mathew 
580*532ed618SSoby Mathew /*******************************************************************************
581*532ed618SSoby Mathew  * Simple routine to determine whether a mpidr is valid or not.
582*532ed618SSoby Mathew  ******************************************************************************/
583*532ed618SSoby Mathew int psci_validate_mpidr(u_register_t mpidr)
584*532ed618SSoby Mathew {
585*532ed618SSoby Mathew 	if (plat_core_pos_by_mpidr(mpidr) < 0)
586*532ed618SSoby Mathew 		return PSCI_E_INVALID_PARAMS;
587*532ed618SSoby Mathew 
588*532ed618SSoby Mathew 	return PSCI_E_SUCCESS;
589*532ed618SSoby Mathew }
590*532ed618SSoby Mathew 
591*532ed618SSoby Mathew /*******************************************************************************
592*532ed618SSoby Mathew  * This function determines the full entrypoint information for the requested
593*532ed618SSoby Mathew  * PSCI entrypoint on power on/resume and returns it.
594*532ed618SSoby Mathew  ******************************************************************************/
595*532ed618SSoby Mathew static int psci_get_ns_ep_info(entry_point_info_t *ep,
596*532ed618SSoby Mathew 			       uintptr_t entrypoint,
597*532ed618SSoby Mathew 			       u_register_t context_id)
598*532ed618SSoby Mathew {
599*532ed618SSoby Mathew 	u_register_t ep_attr, sctlr;
600*532ed618SSoby Mathew 	unsigned int daif, ee, mode;
601*532ed618SSoby Mathew 	u_register_t ns_scr_el3 = read_scr_el3();
602*532ed618SSoby Mathew 	u_register_t ns_sctlr_el1 = read_sctlr_el1();
603*532ed618SSoby Mathew 
604*532ed618SSoby Mathew 	sctlr = ns_scr_el3 & SCR_HCE_BIT ? read_sctlr_el2() : ns_sctlr_el1;
605*532ed618SSoby Mathew 	ee = 0;
606*532ed618SSoby Mathew 
607*532ed618SSoby Mathew 	ep_attr = NON_SECURE | EP_ST_DISABLE;
608*532ed618SSoby Mathew 	if (sctlr & SCTLR_EE_BIT) {
609*532ed618SSoby Mathew 		ep_attr |= EP_EE_BIG;
610*532ed618SSoby Mathew 		ee = 1;
611*532ed618SSoby Mathew 	}
612*532ed618SSoby Mathew 	SET_PARAM_HEAD(ep, PARAM_EP, VERSION_1, ep_attr);
613*532ed618SSoby Mathew 
614*532ed618SSoby Mathew 	ep->pc = entrypoint;
615*532ed618SSoby Mathew 	memset(&ep->args, 0, sizeof(ep->args));
616*532ed618SSoby Mathew 	ep->args.arg0 = context_id;
617*532ed618SSoby Mathew 
618*532ed618SSoby Mathew 	/*
619*532ed618SSoby Mathew 	 * Figure out whether the cpu enters the non-secure address space
620*532ed618SSoby Mathew 	 * in aarch32 or aarch64
621*532ed618SSoby Mathew 	 */
622*532ed618SSoby Mathew 	if (ns_scr_el3 & SCR_RW_BIT) {
623*532ed618SSoby Mathew 
624*532ed618SSoby Mathew 		/*
625*532ed618SSoby Mathew 		 * Check whether a Thumb entry point has been provided for an
626*532ed618SSoby Mathew 		 * aarch64 EL
627*532ed618SSoby Mathew 		 */
628*532ed618SSoby Mathew 		if (entrypoint & 0x1)
629*532ed618SSoby Mathew 			return PSCI_E_INVALID_ADDRESS;
630*532ed618SSoby Mathew 
631*532ed618SSoby Mathew 		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE_EL2 : MODE_EL1;
632*532ed618SSoby Mathew 
633*532ed618SSoby Mathew 		ep->spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
634*532ed618SSoby Mathew 	} else {
635*532ed618SSoby Mathew 
636*532ed618SSoby Mathew 		mode = ns_scr_el3 & SCR_HCE_BIT ? MODE32_hyp : MODE32_svc;
637*532ed618SSoby Mathew 
638*532ed618SSoby Mathew 		/*
639*532ed618SSoby Mathew 		 * TODO: Choose async. exception bits if HYP mode is not
640*532ed618SSoby Mathew 		 * implemented according to the values of SCR.{AW, FW} bits
641*532ed618SSoby Mathew 		 */
642*532ed618SSoby Mathew 		daif = DAIF_ABT_BIT | DAIF_IRQ_BIT | DAIF_FIQ_BIT;
643*532ed618SSoby Mathew 
644*532ed618SSoby Mathew 		ep->spsr = SPSR_MODE32(mode, entrypoint & 0x1, ee, daif);
645*532ed618SSoby Mathew 	}
646*532ed618SSoby Mathew 
647*532ed618SSoby Mathew 	return PSCI_E_SUCCESS;
648*532ed618SSoby Mathew }
649*532ed618SSoby Mathew 
650*532ed618SSoby Mathew /*******************************************************************************
651*532ed618SSoby Mathew  * This function validates the entrypoint with the platform layer if the
652*532ed618SSoby Mathew  * appropriate pm_ops hook is exported by the platform and returns the
653*532ed618SSoby Mathew  * 'entry_point_info'.
654*532ed618SSoby Mathew  ******************************************************************************/
655*532ed618SSoby Mathew int psci_validate_entry_point(entry_point_info_t *ep,
656*532ed618SSoby Mathew 			      uintptr_t entrypoint,
657*532ed618SSoby Mathew 			      u_register_t context_id)
658*532ed618SSoby Mathew {
659*532ed618SSoby Mathew 	int rc;
660*532ed618SSoby Mathew 
661*532ed618SSoby Mathew 	/* Validate the entrypoint using platform psci_ops */
662*532ed618SSoby Mathew 	if (psci_plat_pm_ops->validate_ns_entrypoint) {
663*532ed618SSoby Mathew 		rc = psci_plat_pm_ops->validate_ns_entrypoint(entrypoint);
664*532ed618SSoby Mathew 		if (rc != PSCI_E_SUCCESS)
665*532ed618SSoby Mathew 			return PSCI_E_INVALID_ADDRESS;
666*532ed618SSoby Mathew 	}
667*532ed618SSoby Mathew 
668*532ed618SSoby Mathew 	/*
669*532ed618SSoby Mathew 	 * Verify and derive the re-entry information for
670*532ed618SSoby Mathew 	 * the non-secure world from the non-secure state from
671*532ed618SSoby Mathew 	 * where this call originated.
672*532ed618SSoby Mathew 	 */
673*532ed618SSoby Mathew 	rc = psci_get_ns_ep_info(ep, entrypoint, context_id);
674*532ed618SSoby Mathew 	return rc;
675*532ed618SSoby Mathew }
676*532ed618SSoby Mathew 
677*532ed618SSoby Mathew /*******************************************************************************
678*532ed618SSoby Mathew  * Generic handler which is called when a cpu is physically powered on. It
679*532ed618SSoby Mathew  * traverses the node information and finds the highest power level powered
680*532ed618SSoby Mathew  * off and performs generic, architectural, platform setup and state management
681*532ed618SSoby Mathew  * to power on that power level and power levels below it.
682*532ed618SSoby Mathew  * e.g. For a cpu that's been powered on, it will call the platform specific
683*532ed618SSoby Mathew  * code to enable the gic cpu interface and for a cluster it will enable
684*532ed618SSoby Mathew  * coherency at the interconnect level in addition to gic cpu interface.
685*532ed618SSoby Mathew  ******************************************************************************/
686*532ed618SSoby Mathew void psci_power_up_finish(void)
687*532ed618SSoby Mathew {
688*532ed618SSoby Mathew 	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
689*532ed618SSoby Mathew 	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };
690*532ed618SSoby Mathew 
691*532ed618SSoby Mathew 	/*
692*532ed618SSoby Mathew 	 * Verify that we have been explicitly turned ON or resumed from
693*532ed618SSoby Mathew 	 * suspend.
694*532ed618SSoby Mathew 	 */
695*532ed618SSoby Mathew 	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
696*532ed618SSoby Mathew 		ERROR("Unexpected affinity info state");
697*532ed618SSoby Mathew 		panic();
698*532ed618SSoby Mathew 	}
699*532ed618SSoby Mathew 
700*532ed618SSoby Mathew 	/*
701*532ed618SSoby Mathew 	 * Get the maximum power domain level to traverse to after this cpu
702*532ed618SSoby Mathew 	 * has been physically powered up.
703*532ed618SSoby Mathew 	 */
704*532ed618SSoby Mathew 	end_pwrlvl = get_power_on_target_pwrlvl();
705*532ed618SSoby Mathew 
706*532ed618SSoby Mathew 	/*
707*532ed618SSoby Mathew 	 * This function acquires the lock corresponding to each power level so
708*532ed618SSoby Mathew 	 * that by the time all locks are taken, the system topology is snapshot
709*532ed618SSoby Mathew 	 * and state management can be done safely.
710*532ed618SSoby Mathew 	 */
711*532ed618SSoby Mathew 	psci_acquire_pwr_domain_locks(end_pwrlvl,
712*532ed618SSoby Mathew 				      cpu_idx);
713*532ed618SSoby Mathew 
714*532ed618SSoby Mathew #if ENABLE_PSCI_STAT
715*532ed618SSoby Mathew 	/*
716*532ed618SSoby Mathew 	 * Capture power up time-stamp.
717*532ed618SSoby Mathew 	 * No cache maintenance is required as caches are off
718*532ed618SSoby Mathew 	 * and writes are direct to the main memory.
719*532ed618SSoby Mathew 	 */
720*532ed618SSoby Mathew 	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_EXIT_LOW_PWR,
721*532ed618SSoby Mathew 		PMF_NO_CACHE_MAINT);
722*532ed618SSoby Mathew #endif
723*532ed618SSoby Mathew 
724*532ed618SSoby Mathew 	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);
725*532ed618SSoby Mathew 
726*532ed618SSoby Mathew 	/*
727*532ed618SSoby Mathew 	 * This CPU could be resuming from suspend or it could have just been
728*532ed618SSoby Mathew 	 * turned on. To distinguish between these 2 cases, we examine the
729*532ed618SSoby Mathew 	 * affinity state of the CPU:
730*532ed618SSoby Mathew 	 *  - If the affinity state is ON_PENDING then it has just been
731*532ed618SSoby Mathew 	 *    turned on.
732*532ed618SSoby Mathew 	 *  - Else it is resuming from suspend.
733*532ed618SSoby Mathew 	 *
734*532ed618SSoby Mathew 	 * Depending on the type of warm reset identified, choose the right set
735*532ed618SSoby Mathew 	 * of power management handler and perform the generic, architecture
736*532ed618SSoby Mathew 	 * and platform specific handling.
737*532ed618SSoby Mathew 	 */
738*532ed618SSoby Mathew 	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
739*532ed618SSoby Mathew 		psci_cpu_on_finish(cpu_idx, &state_info);
740*532ed618SSoby Mathew 	else
741*532ed618SSoby Mathew 		psci_cpu_suspend_finish(cpu_idx, &state_info);
742*532ed618SSoby Mathew 
743*532ed618SSoby Mathew 	/*
744*532ed618SSoby Mathew 	 * Set the requested and target state of this CPU and all the higher
745*532ed618SSoby Mathew 	 * power domains which are ancestors of this CPU to run.
746*532ed618SSoby Mathew 	 */
747*532ed618SSoby Mathew 	psci_set_pwr_domains_to_run(end_pwrlvl);
748*532ed618SSoby Mathew 
749*532ed618SSoby Mathew #if ENABLE_PSCI_STAT
750*532ed618SSoby Mathew 	/*
751*532ed618SSoby Mathew 	 * Update PSCI stats.
752*532ed618SSoby Mathew 	 * Caches are off when writing stats data on the power down path.
753*532ed618SSoby Mathew 	 * Since caches are now enabled, it's necessary to do cache
754*532ed618SSoby Mathew 	 * maintenance before reading that same data.
755*532ed618SSoby Mathew 	 */
756*532ed618SSoby Mathew 	psci_stats_update_pwr_up(end_pwrlvl, &state_info, PMF_CACHE_MAINT);
757*532ed618SSoby Mathew #endif
758*532ed618SSoby Mathew 
759*532ed618SSoby Mathew 	/*
760*532ed618SSoby Mathew 	 * This loop releases the lock corresponding to each power level
761*532ed618SSoby Mathew 	 * in the reverse order to which they were acquired.
762*532ed618SSoby Mathew 	 */
763*532ed618SSoby Mathew 	psci_release_pwr_domain_locks(end_pwrlvl,
764*532ed618SSoby Mathew 				      cpu_idx);
765*532ed618SSoby Mathew }
766*532ed618SSoby Mathew 
767*532ed618SSoby Mathew /*******************************************************************************
768*532ed618SSoby Mathew  * This function initializes the set of hooks that PSCI invokes as part of power
769*532ed618SSoby Mathew  * management operation. The power management hooks are expected to be provided
770*532ed618SSoby Mathew  * by the SPD, after it finishes all its initialization
771*532ed618SSoby Mathew  ******************************************************************************/
772*532ed618SSoby Mathew void psci_register_spd_pm_hook(const spd_pm_ops_t *pm)
773*532ed618SSoby Mathew {
774*532ed618SSoby Mathew 	assert(pm);
775*532ed618SSoby Mathew 	psci_spd_pm = pm;
776*532ed618SSoby Mathew 
777*532ed618SSoby Mathew 	if (pm->svc_migrate)
778*532ed618SSoby Mathew 		psci_caps |= define_psci_cap(PSCI_MIG_AARCH64);
779*532ed618SSoby Mathew 
780*532ed618SSoby Mathew 	if (pm->svc_migrate_info)
781*532ed618SSoby Mathew 		psci_caps |= define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64)
782*532ed618SSoby Mathew 				| define_psci_cap(PSCI_MIG_INFO_TYPE);
783*532ed618SSoby Mathew }
784*532ed618SSoby Mathew 
785*532ed618SSoby Mathew /*******************************************************************************
786*532ed618SSoby Mathew  * This function invokes the migrate info hook in the spd_pm_ops. It performs
787*532ed618SSoby Mathew  * the necessary return value validation. If the Secure Payload is UP and
788*532ed618SSoby Mathew  * migrate capable, it returns the mpidr of the CPU on which the Secure payload
789*532ed618SSoby Mathew  * is resident through the mpidr parameter. Else the value of the parameter on
790*532ed618SSoby Mathew  * return is undefined.
791*532ed618SSoby Mathew  ******************************************************************************/
792*532ed618SSoby Mathew int psci_spd_migrate_info(u_register_t *mpidr)
793*532ed618SSoby Mathew {
794*532ed618SSoby Mathew 	int rc;
795*532ed618SSoby Mathew 
796*532ed618SSoby Mathew 	if (!psci_spd_pm || !psci_spd_pm->svc_migrate_info)
797*532ed618SSoby Mathew 		return PSCI_E_NOT_SUPPORTED;
798*532ed618SSoby Mathew 
799*532ed618SSoby Mathew 	rc = psci_spd_pm->svc_migrate_info(mpidr);
800*532ed618SSoby Mathew 
801*532ed618SSoby Mathew 	assert(rc == PSCI_TOS_UP_MIG_CAP || rc == PSCI_TOS_NOT_UP_MIG_CAP \
802*532ed618SSoby Mathew 		|| rc == PSCI_TOS_NOT_PRESENT_MP || rc == PSCI_E_NOT_SUPPORTED);
803*532ed618SSoby Mathew 
804*532ed618SSoby Mathew 	return rc;
805*532ed618SSoby Mathew }
806*532ed618SSoby Mathew 
807*532ed618SSoby Mathew 
808*532ed618SSoby Mathew /*******************************************************************************
809*532ed618SSoby Mathew  * This function prints the state of all power domains present in the
810*532ed618SSoby Mathew  * system
811*532ed618SSoby Mathew  ******************************************************************************/
812*532ed618SSoby Mathew void psci_print_power_domain_map(void)
813*532ed618SSoby Mathew {
814*532ed618SSoby Mathew #if LOG_LEVEL >= LOG_LEVEL_INFO
815*532ed618SSoby Mathew 	unsigned int idx;
816*532ed618SSoby Mathew 	plat_local_state_t state;
817*532ed618SSoby Mathew 	plat_local_state_type_t state_type;
818*532ed618SSoby Mathew 
819*532ed618SSoby Mathew 	/* This array maps to the PSCI_STATE_X definitions in psci.h */
820*532ed618SSoby Mathew 	static const char * const psci_state_type_str[] = {
821*532ed618SSoby Mathew 		"ON",
822*532ed618SSoby Mathew 		"RETENTION",
823*532ed618SSoby Mathew 		"OFF",
824*532ed618SSoby Mathew 	};
825*532ed618SSoby Mathew 
826*532ed618SSoby Mathew 	INFO("PSCI Power Domain Map:\n");
827*532ed618SSoby Mathew 	for (idx = 0; idx < (PSCI_NUM_PWR_DOMAINS - PLATFORM_CORE_COUNT);
828*532ed618SSoby Mathew 							idx++) {
829*532ed618SSoby Mathew 		state_type = find_local_state_type(
830*532ed618SSoby Mathew 				psci_non_cpu_pd_nodes[idx].local_state);
831*532ed618SSoby Mathew 		INFO("  Domain Node : Level %u, parent_node %d,"
832*532ed618SSoby Mathew 				" State %s (0x%x)\n",
833*532ed618SSoby Mathew 				psci_non_cpu_pd_nodes[idx].level,
834*532ed618SSoby Mathew 				psci_non_cpu_pd_nodes[idx].parent_node,
835*532ed618SSoby Mathew 				psci_state_type_str[state_type],
836*532ed618SSoby Mathew 				psci_non_cpu_pd_nodes[idx].local_state);
837*532ed618SSoby Mathew 	}
838*532ed618SSoby Mathew 
839*532ed618SSoby Mathew 	for (idx = 0; idx < PLATFORM_CORE_COUNT; idx++) {
840*532ed618SSoby Mathew 		state = psci_get_cpu_local_state_by_idx(idx);
841*532ed618SSoby Mathew 		state_type = find_local_state_type(state);
842*532ed618SSoby Mathew 		INFO("  CPU Node : MPID 0x%llx, parent_node %d,"
843*532ed618SSoby Mathew 				" State %s (0x%x)\n",
844*532ed618SSoby Mathew 				(unsigned long long)psci_cpu_pd_nodes[idx].mpidr,
845*532ed618SSoby Mathew 				psci_cpu_pd_nodes[idx].parent_node,
846*532ed618SSoby Mathew 				psci_state_type_str[state_type],
847*532ed618SSoby Mathew 				psci_get_cpu_local_state_by_idx(idx));
848*532ed618SSoby Mathew 	}
849*532ed618SSoby Mathew #endif
850*532ed618SSoby Mathew }
851*532ed618SSoby Mathew 
852*532ed618SSoby Mathew #if ENABLE_PLAT_COMPAT
853*532ed618SSoby Mathew /*******************************************************************************
854*532ed618SSoby Mathew  * PSCI Compatibility helper function to return the 'power_state' parameter of
855*532ed618SSoby Mathew  * the PSCI CPU SUSPEND request for the current CPU. Returns PSCI_INVALID_DATA
856*532ed618SSoby Mathew  * if not invoked within CPU_SUSPEND for the current CPU.
857*532ed618SSoby Mathew  ******************************************************************************/
858*532ed618SSoby Mathew int psci_get_suspend_powerstate(void)
859*532ed618SSoby Mathew {
860*532ed618SSoby Mathew 	/* Sanity check to verify that CPU is within CPU_SUSPEND */
861*532ed618SSoby Mathew 	if (psci_get_aff_info_state() == AFF_STATE_ON &&
862*532ed618SSoby Mathew 		!is_local_state_run(psci_get_cpu_local_state()))
863*532ed618SSoby Mathew 		return psci_power_state_compat[plat_my_core_pos()];
864*532ed618SSoby Mathew 
865*532ed618SSoby Mathew 	return PSCI_INVALID_DATA;
866*532ed618SSoby Mathew }
867*532ed618SSoby Mathew 
868*532ed618SSoby Mathew /*******************************************************************************
869*532ed618SSoby Mathew  * PSCI Compatibility helper function to return the state id of the current
870*532ed618SSoby Mathew  * cpu encoded in the 'power_state' parameter. Returns PSCI_INVALID_DATA
871*532ed618SSoby Mathew  * if not invoked within CPU_SUSPEND for the current CPU.
872*532ed618SSoby Mathew  ******************************************************************************/
873*532ed618SSoby Mathew int psci_get_suspend_stateid(void)
874*532ed618SSoby Mathew {
875*532ed618SSoby Mathew 	unsigned int power_state;
876*532ed618SSoby Mathew 	power_state = psci_get_suspend_powerstate();
877*532ed618SSoby Mathew 	if (power_state != PSCI_INVALID_DATA)
878*532ed618SSoby Mathew 		return psci_get_pstate_id(power_state);
879*532ed618SSoby Mathew 
880*532ed618SSoby Mathew 	return PSCI_INVALID_DATA;
881*532ed618SSoby Mathew }
882*532ed618SSoby Mathew 
883*532ed618SSoby Mathew /*******************************************************************************
884*532ed618SSoby Mathew  * PSCI Compatibility helper function to return the state id encoded in the
885*532ed618SSoby Mathew  * 'power_state' parameter of the CPU specified by 'mpidr'. Returns
886*532ed618SSoby Mathew  * PSCI_INVALID_DATA if the CPU is not in CPU_SUSPEND.
887*532ed618SSoby Mathew  ******************************************************************************/
888*532ed618SSoby Mathew int psci_get_suspend_stateid_by_mpidr(unsigned long mpidr)
889*532ed618SSoby Mathew {
890*532ed618SSoby Mathew 	int cpu_idx = plat_core_pos_by_mpidr(mpidr);
891*532ed618SSoby Mathew 
892*532ed618SSoby Mathew 	if (cpu_idx == -1)
893*532ed618SSoby Mathew 		return PSCI_INVALID_DATA;
894*532ed618SSoby Mathew 
895*532ed618SSoby Mathew 	/* Sanity check to verify that the CPU is in CPU_SUSPEND */
896*532ed618SSoby Mathew 	if (psci_get_aff_info_state_by_idx(cpu_idx) == AFF_STATE_ON &&
897*532ed618SSoby Mathew 		!is_local_state_run(psci_get_cpu_local_state_by_idx(cpu_idx)))
898*532ed618SSoby Mathew 		return psci_get_pstate_id(psci_power_state_compat[cpu_idx]);
899*532ed618SSoby Mathew 
900*532ed618SSoby Mathew 	return PSCI_INVALID_DATA;
901*532ed618SSoby Mathew }
902*532ed618SSoby Mathew 
903*532ed618SSoby Mathew /*******************************************************************************
904*532ed618SSoby Mathew  * This function returns highest affinity level which is in OFF
905*532ed618SSoby Mathew  * state. The affinity instance with which the level is associated is
906*532ed618SSoby Mathew  * determined by the caller.
907*532ed618SSoby Mathew  ******************************************************************************/
908*532ed618SSoby Mathew unsigned int psci_get_max_phys_off_afflvl(void)
909*532ed618SSoby Mathew {
910*532ed618SSoby Mathew 	psci_power_state_t state_info;
911*532ed618SSoby Mathew 
912*532ed618SSoby Mathew 	memset(&state_info, 0, sizeof(state_info));
913*532ed618SSoby Mathew 	psci_get_target_local_pwr_states(PLAT_MAX_PWR_LVL, &state_info);
914*532ed618SSoby Mathew 
915*532ed618SSoby Mathew 	return psci_find_target_suspend_lvl(&state_info);
916*532ed618SSoby Mathew }
917*532ed618SSoby Mathew 
918*532ed618SSoby Mathew /*******************************************************************************
919*532ed618SSoby Mathew  * PSCI Compatibility helper function to return target affinity level requested
920*532ed618SSoby Mathew  * for the CPU_SUSPEND. This function assumes affinity levels correspond to
921*532ed618SSoby Mathew  * power domain levels on the platform.
922*532ed618SSoby Mathew  ******************************************************************************/
923*532ed618SSoby Mathew int psci_get_suspend_afflvl(void)
924*532ed618SSoby Mathew {
925*532ed618SSoby Mathew 	return psci_get_suspend_pwrlvl();
926*532ed618SSoby Mathew }
927*532ed618SSoby Mathew 
928*532ed618SSoby Mathew #endif
929