xref: /rk3399_ARM-atf/plat/nxp/common/psci/aarch64/psci_utils.S (revision dd4268a2db47ffedb28485762d7c8160150e4d60)
1*dd4268a2SPankaj Gupta
2*dd4268a2SPankaj Gupta/*
3*dd4268a2SPankaj Gupta * Copyright 2018-2020 NXP
4*dd4268a2SPankaj Gupta *
5*dd4268a2SPankaj Gupta * SPDX-License-Identifier: BSD-3-Clause
6*dd4268a2SPankaj Gupta *
7*dd4268a2SPankaj Gupta */
8*dd4268a2SPankaj Gupta
9*dd4268a2SPankaj Gupta#include <asm_macros.S>
10*dd4268a2SPankaj Gupta#include <assert_macros.S>
11*dd4268a2SPankaj Gupta
12*dd4268a2SPankaj Gupta#include <lib/psci/psci.h>
13*dd4268a2SPankaj Gupta
14*dd4268a2SPankaj Gupta#include <bl31_data.h>
15*dd4268a2SPankaj Gupta#include <plat_psci.h>
16*dd4268a2SPankaj Gupta
17*dd4268a2SPankaj Gupta
18*dd4268a2SPankaj Gupta#define RESET_RETRY_CNT   800
19*dd4268a2SPankaj Gupta#define PSCI_ABORT_CNT	100
20*dd4268a2SPankaj Gupta
21*dd4268a2SPankaj Gupta#if (SOC_CORE_RELEASE)
22*dd4268a2SPankaj Gupta
23*dd4268a2SPankaj Gupta.global _psci_cpu_on
24*dd4268a2SPankaj Gupta
25*dd4268a2SPankaj Gupta/*
26*dd4268a2SPankaj Gupta * int _psci_cpu_on(u_register_t core_mask)
27*dd4268a2SPankaj Gupta * x0   = target cpu core mask
28*dd4268a2SPankaj Gupta *
29*dd4268a2SPankaj Gupta * Called from C, so save the non-volatile regs
30*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
31*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
32*dd4268a2SPankaj Gupta *
33*dd4268a2SPankaj Gupta */
34*dd4268a2SPankaj Gupta
35*dd4268a2SPankaj Guptafunc _psci_cpu_on
36*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
37*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
38*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
39*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
40*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
41*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
42*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
43*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
44*dd4268a2SPankaj Gupta
45*dd4268a2SPankaj Gupta	mov  x6, x0
46*dd4268a2SPankaj Gupta
47*dd4268a2SPankaj Gupta	/* x0   = core mask (lsb)
48*dd4268a2SPankaj Gupta	 * x6   = core mask (lsb)
49*dd4268a2SPankaj Gupta	 */
50*dd4268a2SPankaj Gupta
51*dd4268a2SPankaj Gupta	/* check if core disabled */
52*dd4268a2SPankaj Gupta	bl   _soc_ck_disabled		/* 0-2 */
53*dd4268a2SPankaj Gupta	cbnz w0, psci_disabled
54*dd4268a2SPankaj Gupta
55*dd4268a2SPankaj Gupta	/* check core data area to see if core cannot be turned on
56*dd4268a2SPankaj Gupta	 * read the core state
57*dd4268a2SPankaj Gupta	 */
58*dd4268a2SPankaj Gupta	mov  x0, x6
59*dd4268a2SPankaj Gupta	bl   _getCoreState		/* 0-5 */
60*dd4268a2SPankaj Gupta	mov  x9, x0
61*dd4268a2SPankaj Gupta
62*dd4268a2SPankaj Gupta	/* x6   = core mask (lsb)
63*dd4268a2SPankaj Gupta	 * x9   = core state (from data area)
64*dd4268a2SPankaj Gupta	 */
65*dd4268a2SPankaj Gupta
66*dd4268a2SPankaj Gupta	cmp  x9, #CORE_DISABLED
67*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_DISABLED
68*dd4268a2SPankaj Gupta	b.eq cpu_on_done
69*dd4268a2SPankaj Gupta
70*dd4268a2SPankaj Gupta	cmp  x9, #CORE_PENDING
71*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_ON_PENDING
72*dd4268a2SPankaj Gupta	b.eq cpu_on_done
73*dd4268a2SPankaj Gupta
74*dd4268a2SPankaj Gupta	cmp  x9, #CORE_RELEASED
75*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_ALREADY_ON
76*dd4268a2SPankaj Gupta	b.eq cpu_on_done
77*dd4268a2SPankaj Gupta
78*dd4268a2SPankaj Gupta8:
79*dd4268a2SPankaj Gupta	/* x6   = core mask (lsb)
80*dd4268a2SPankaj Gupta	 * x9   = core state (from data area)
81*dd4268a2SPankaj Gupta	 */
82*dd4268a2SPankaj Gupta
83*dd4268a2SPankaj Gupta	cmp  x9, #CORE_WFE
84*dd4268a2SPankaj Gupta	b.eq core_in_wfe
85*dd4268a2SPankaj Gupta	cmp  x9, #CORE_IN_RESET
86*dd4268a2SPankaj Gupta	b.eq core_in_reset
87*dd4268a2SPankaj Gupta	cmp  x9, #CORE_OFF
88*dd4268a2SPankaj Gupta	b.eq core_is_off
89*dd4268a2SPankaj Gupta	cmp  x9, #CORE_OFF_PENDING
90*dd4268a2SPankaj Gupta
91*dd4268a2SPankaj Gupta	/* if state == CORE_OFF_PENDING, set abort */
92*dd4268a2SPankaj Gupta	mov  x0, x6
93*dd4268a2SPankaj Gupta	mov  x1, #ABORT_FLAG_DATA
94*dd4268a2SPankaj Gupta	mov  x2, #CORE_ABORT_OP
95*dd4268a2SPankaj Gupta	bl   _setCoreData		/* 0-3, [13-15] */
96*dd4268a2SPankaj Gupta
97*dd4268a2SPankaj Gupta	ldr  x3, =PSCI_ABORT_CNT
98*dd4268a2SPankaj Gupta7:
99*dd4268a2SPankaj Gupta	/* watch for abort to take effect */
100*dd4268a2SPankaj Gupta	mov  x0, x6
101*dd4268a2SPankaj Gupta	bl   _getCoreState		/* 0-5 */
102*dd4268a2SPankaj Gupta	cmp  x0, #CORE_OFF
103*dd4268a2SPankaj Gupta	b.eq core_is_off
104*dd4268a2SPankaj Gupta	cmp  x0, #CORE_PENDING
105*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
106*dd4268a2SPankaj Gupta	b.eq cpu_on_done
107*dd4268a2SPankaj Gupta
108*dd4268a2SPankaj Gupta	/* loop til finished */
109*dd4268a2SPankaj Gupta	sub  x3, x3, #1
110*dd4268a2SPankaj Gupta	cbnz x3, 7b
111*dd4268a2SPankaj Gupta
112*dd4268a2SPankaj Gupta	/* if we didn't see either CORE_OFF or CORE_PENDING, then this
113*dd4268a2SPankaj Gupta	 * core is in CORE_OFF_PENDING - exit with success, as the core will
114*dd4268a2SPankaj Gupta	 * respond to the abort request
115*dd4268a2SPankaj Gupta	 */
116*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
117*dd4268a2SPankaj Gupta	b    cpu_on_done
118*dd4268a2SPankaj Gupta
119*dd4268a2SPankaj Gupta/* this is where we start up a core out of reset */
120*dd4268a2SPankaj Guptacore_in_reset:
121*dd4268a2SPankaj Gupta	/* see if the soc-specific module supports this op */
122*dd4268a2SPankaj Gupta	ldr  x7, =SOC_CORE_RELEASE
123*dd4268a2SPankaj Gupta	cbnz  x7, 3f
124*dd4268a2SPankaj Gupta
125*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_NOT_SUPPORTED
126*dd4268a2SPankaj Gupta	b    cpu_on_done
127*dd4268a2SPankaj Gupta
128*dd4268a2SPankaj Gupta	/* x6   = core mask (lsb) */
129*dd4268a2SPankaj Gupta3:
130*dd4268a2SPankaj Gupta	/* set core state in data area */
131*dd4268a2SPankaj Gupta	mov  x0, x6
132*dd4268a2SPankaj Gupta	mov  x1, #CORE_PENDING
133*dd4268a2SPankaj Gupta	bl   _setCoreState   			/* 0-3, [13-15] */
134*dd4268a2SPankaj Gupta
135*dd4268a2SPankaj Gupta	/* release the core from reset */
136*dd4268a2SPankaj Gupta	mov   x0, x6
137*dd4268a2SPankaj Gupta	bl    _soc_core_release 		/* 0-3 */
138*dd4268a2SPankaj Gupta	mov   x0, #PSCI_E_SUCCESS
139*dd4268a2SPankaj Gupta	b     cpu_on_done
140*dd4268a2SPankaj Gupta
141*dd4268a2SPankaj Gupta	/* Start up the core that has been powered-down via CPU_OFF
142*dd4268a2SPankaj Gupta	 */
143*dd4268a2SPankaj Guptacore_is_off:
144*dd4268a2SPankaj Gupta	/* see if the soc-specific module supports this op
145*dd4268a2SPankaj Gupta	 */
146*dd4268a2SPankaj Gupta	ldr  x7, =SOC_CORE_RESTART
147*dd4268a2SPankaj Gupta	cbnz x7, 2f
148*dd4268a2SPankaj Gupta
149*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_NOT_SUPPORTED
150*dd4268a2SPankaj Gupta	b    cpu_on_done
151*dd4268a2SPankaj Gupta
152*dd4268a2SPankaj Gupta	/* x6   = core mask (lsb) */
153*dd4268a2SPankaj Gupta2:
154*dd4268a2SPankaj Gupta	/* set core state in data area */
155*dd4268a2SPankaj Gupta	mov  x0, x6
156*dd4268a2SPankaj Gupta	mov  x1, #CORE_WAKEUP
157*dd4268a2SPankaj Gupta	bl   _setCoreState			/* 0-3, [13-15] */
158*dd4268a2SPankaj Gupta
159*dd4268a2SPankaj Gupta	/* put the core back into service */
160*dd4268a2SPankaj Gupta	mov  x0, x6
161*dd4268a2SPankaj Gupta#if (SOC_CORE_RESTART)
162*dd4268a2SPankaj Gupta	bl   _soc_core_restart			/* 0-5 */
163*dd4268a2SPankaj Gupta#endif
164*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
165*dd4268a2SPankaj Gupta	b    cpu_on_done
166*dd4268a2SPankaj Gupta
167*dd4268a2SPankaj Gupta/* this is where we release a core that is being held in wfe */
168*dd4268a2SPankaj Guptacore_in_wfe:
169*dd4268a2SPankaj Gupta	/* x6   = core mask (lsb) */
170*dd4268a2SPankaj Gupta
171*dd4268a2SPankaj Gupta	/* set core state in data area */
172*dd4268a2SPankaj Gupta	mov  x0, x6
173*dd4268a2SPankaj Gupta	mov  x1, #CORE_PENDING
174*dd4268a2SPankaj Gupta	bl   _setCoreState			/* 0-3, [13-15] */
175*dd4268a2SPankaj Gupta	dsb  sy
176*dd4268a2SPankaj Gupta	isb
177*dd4268a2SPankaj Gupta
178*dd4268a2SPankaj Gupta	/* put the core back into service */
179*dd4268a2SPankaj Gupta	sev
180*dd4268a2SPankaj Gupta	sev
181*dd4268a2SPankaj Gupta	isb
182*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
183*dd4268a2SPankaj Gupta
184*dd4268a2SPankaj Guptacpu_on_done:
185*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers */
186*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
187*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
188*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
189*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
190*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
191*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
192*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
193*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
194*dd4268a2SPankaj Gupta	b    psci_completed
195*dd4268a2SPankaj Guptaendfunc _psci_cpu_on
196*dd4268a2SPankaj Gupta
197*dd4268a2SPankaj Gupta#endif
198*dd4268a2SPankaj Gupta
199*dd4268a2SPankaj Gupta
200*dd4268a2SPankaj Gupta#if (SOC_CORE_OFF)
201*dd4268a2SPankaj Gupta
202*dd4268a2SPankaj Gupta.global _psci_cpu_prep_off
203*dd4268a2SPankaj Gupta.global _psci_cpu_off_wfi
204*dd4268a2SPankaj Gupta
205*dd4268a2SPankaj Gupta/*
206*dd4268a2SPankaj Gupta * void _psci_cpu_prep_off(u_register_t core_mask)
207*dd4268a2SPankaj Gupta * this function performs the SoC-specific programming prior
208*dd4268a2SPankaj Gupta * to shutting the core down
209*dd4268a2SPankaj Gupta * x0 = core_mask
210*dd4268a2SPankaj Gupta *
211*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
212*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
213*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
214*dd4268a2SPankaj Gupta */
215*dd4268a2SPankaj Gupta
216*dd4268a2SPankaj Guptafunc _psci_cpu_prep_off
217*dd4268a2SPankaj Gupta
218*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
219*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
220*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
221*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
222*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
223*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
224*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
225*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
226*dd4268a2SPankaj Gupta
227*dd4268a2SPankaj Gupta	mov  x10, x0			/* x10 = core_mask */
228*dd4268a2SPankaj Gupta
229*dd4268a2SPankaj Gupta	/* the core does not return from cpu_off, so no need
230*dd4268a2SPankaj Gupta	 * to save/restore non-volatile registers
231*dd4268a2SPankaj Gupta	 */
232*dd4268a2SPankaj Gupta
233*dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
234*dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
235*dd4268a2SPankaj Gupta
236*dd4268a2SPankaj Gupta	/* read cpuectlr and save current value */
237*dd4268a2SPankaj Gupta	mrs   x4, CORTEX_A72_ECTLR_EL1
238*dd4268a2SPankaj Gupta	mov   x1, #CPUECTLR_DATA
239*dd4268a2SPankaj Gupta	mov   x2, x4
240*dd4268a2SPankaj Gupta	mov   x0, x10
241*dd4268a2SPankaj Gupta	bl    _setCoreData
242*dd4268a2SPankaj Gupta
243*dd4268a2SPankaj Gupta	/* remove the core from coherency */
244*dd4268a2SPankaj Gupta	bic   x4, x4, #CPUECTLR_SMPEN_MASK
245*dd4268a2SPankaj Gupta	msr   CORTEX_A72_ECTLR_EL1, x4
246*dd4268a2SPankaj Gupta
247*dd4268a2SPankaj Gupta	/* save scr_el3 */
248*dd4268a2SPankaj Gupta	mov  x0, x10
249*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
250*dd4268a2SPankaj Gupta	mov  x2, x4
251*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
252*dd4268a2SPankaj Gupta	bl    _setCoreData
253*dd4268a2SPankaj Gupta
254*dd4268a2SPankaj Gupta	/* x4 = scr_el3 */
255*dd4268a2SPankaj Gupta
256*dd4268a2SPankaj Gupta	/* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
257*dd4268a2SPankaj Gupta	orr   x4, x4, #SCR_FIQ_MASK
258*dd4268a2SPankaj Gupta	msr   scr_el3, x4
259*dd4268a2SPankaj Gupta
260*dd4268a2SPankaj Gupta	/* x10 = core_mask */
261*dd4268a2SPankaj Gupta
262*dd4268a2SPankaj Gupta	/* prep the core for shutdown */
263*dd4268a2SPankaj Gupta	mov  x0, x10
264*dd4268a2SPankaj Gupta	bl   _soc_core_prep_off
265*dd4268a2SPankaj Gupta
266*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers */
267*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
268*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
269*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
270*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
271*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
272*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
273*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
274*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
275*dd4268a2SPankaj Gupta	b    psci_completed
276*dd4268a2SPankaj Guptaendfunc _psci_cpu_prep_off
277*dd4268a2SPankaj Gupta
278*dd4268a2SPankaj Gupta/*
279*dd4268a2SPankaj Gupta * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
280*dd4268a2SPankaj Gupta *   - this function shuts down the core
281*dd4268a2SPankaj Gupta *   - this function does not return!!
282*dd4268a2SPankaj Gupta */
283*dd4268a2SPankaj Gupta
284*dd4268a2SPankaj Guptafunc _psci_cpu_off_wfi
285*dd4268a2SPankaj Gupta	/* save the wakeup address */
286*dd4268a2SPankaj Gupta	mov  x29, x1
287*dd4268a2SPankaj Gupta
288*dd4268a2SPankaj Gupta	/* x0 = core_mask */
289*dd4268a2SPankaj Gupta
290*dd4268a2SPankaj Gupta	/* shutdown the core */
291*dd4268a2SPankaj Gupta	bl   _soc_core_entr_off
292*dd4268a2SPankaj Gupta
293*dd4268a2SPankaj Gupta	/* branch to resume execution */
294*dd4268a2SPankaj Gupta	br   x29
295*dd4268a2SPankaj Guptaendfunc _psci_cpu_off_wfi
296*dd4268a2SPankaj Gupta
297*dd4268a2SPankaj Gupta#endif
298*dd4268a2SPankaj Gupta
299*dd4268a2SPankaj Gupta
300*dd4268a2SPankaj Gupta#if (SOC_CORE_RESTART)
301*dd4268a2SPankaj Gupta
302*dd4268a2SPankaj Gupta.global _psci_wakeup
303*dd4268a2SPankaj Gupta
304*dd4268a2SPankaj Gupta/*
305*dd4268a2SPankaj Gupta * void _psci_wakeup(u_register_t core_mask)
306*dd4268a2SPankaj Gupta * this function performs the SoC-specific programming
307*dd4268a2SPankaj Gupta * after a core wakes up from OFF
308*dd4268a2SPankaj Gupta * x0 = core mask
309*dd4268a2SPankaj Gupta *
310*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
311*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
312*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
313*dd4268a2SPankaj Gupta */
314*dd4268a2SPankaj Gupta
315*dd4268a2SPankaj Guptafunc _psci_wakeup
316*dd4268a2SPankaj Gupta
317*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
318*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
319*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
320*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
321*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
322*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
323*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
324*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
325*dd4268a2SPankaj Gupta
326*dd4268a2SPankaj Gupta	mov  x4, x0			/* x4 = core mask */
327*dd4268a2SPankaj Gupta
328*dd4268a2SPankaj Gupta	/* restore scr_el3 */
329*dd4268a2SPankaj Gupta	mov  x0, x4
330*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
331*dd4268a2SPankaj Gupta	bl   _getCoreData
332*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
333*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
334*dd4268a2SPankaj Gupta
335*dd4268a2SPankaj Gupta	/* x4 = core mask */
336*dd4268a2SPankaj Gupta
337*dd4268a2SPankaj Gupta	/* restore CPUECTLR */
338*dd4268a2SPankaj Gupta	mov   x0, x4
339*dd4268a2SPankaj Gupta	mov   x1, #CPUECTLR_DATA
340*dd4268a2SPankaj Gupta	bl    _getCoreData
341*dd4268a2SPankaj Gupta	orr   x0, x0, #CPUECTLR_SMPEN_MASK
342*dd4268a2SPankaj Gupta	msr   CORTEX_A72_ECTLR_EL1, x0
343*dd4268a2SPankaj Gupta
344*dd4268a2SPankaj Gupta	/* x4 = core mask */
345*dd4268a2SPankaj Gupta
346*dd4268a2SPankaj Gupta	/* start the core back up */
347*dd4268a2SPankaj Gupta	mov   x0, x4
348*dd4268a2SPankaj Gupta	bl   _soc_core_exit_off
349*dd4268a2SPankaj Gupta
350*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
351*dd4268a2SPankaj Gupta	 */
352*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
353*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
354*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
355*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
356*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
357*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
358*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
359*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
360*dd4268a2SPankaj Gupta	b    psci_completed
361*dd4268a2SPankaj Guptaendfunc _psci_wakeup
362*dd4268a2SPankaj Gupta
363*dd4268a2SPankaj Gupta#endif
364*dd4268a2SPankaj Gupta
365*dd4268a2SPankaj Gupta
366*dd4268a2SPankaj Gupta#if (SOC_SYSTEM_RESET)
367*dd4268a2SPankaj Gupta
368*dd4268a2SPankaj Gupta.global _psci_system_reset
369*dd4268a2SPankaj Gupta
370*dd4268a2SPankaj Guptafunc _psci_system_reset
371*dd4268a2SPankaj Gupta
372*dd4268a2SPankaj Gupta	/* system reset is mandatory
373*dd4268a2SPankaj Gupta	 * system reset is soc-specific
374*dd4268a2SPankaj Gupta	 * Note: under no circumstances do we return from this call
375*dd4268a2SPankaj Gupta	 */
376*dd4268a2SPankaj Gupta	bl   _soc_sys_reset
377*dd4268a2SPankaj Guptaendfunc _psci_system_reset
378*dd4268a2SPankaj Gupta
379*dd4268a2SPankaj Gupta#endif
380*dd4268a2SPankaj Gupta
381*dd4268a2SPankaj Gupta
382*dd4268a2SPankaj Gupta#if (SOC_SYSTEM_OFF)
383*dd4268a2SPankaj Gupta
384*dd4268a2SPankaj Gupta.global _psci_system_off
385*dd4268a2SPankaj Gupta
386*dd4268a2SPankaj Guptafunc _psci_system_off
387*dd4268a2SPankaj Gupta
388*dd4268a2SPankaj Gupta	/* system off is mandatory
389*dd4268a2SPankaj Gupta	 * system off is soc-specific
390*dd4268a2SPankaj Gupta	 * Note: under no circumstances do we return from this call */
391*dd4268a2SPankaj Gupta	b    _soc_sys_off
392*dd4268a2SPankaj Guptaendfunc _psci_system_off
393*dd4268a2SPankaj Gupta
394*dd4268a2SPankaj Gupta#endif
395*dd4268a2SPankaj Gupta
396*dd4268a2SPankaj Gupta
397*dd4268a2SPankaj Gupta#if (SOC_CORE_STANDBY)
398*dd4268a2SPankaj Gupta
399*dd4268a2SPankaj Gupta.global _psci_core_entr_stdby
400*dd4268a2SPankaj Gupta.global _psci_core_prep_stdby
401*dd4268a2SPankaj Gupta.global _psci_core_exit_stdby
402*dd4268a2SPankaj Gupta
403*dd4268a2SPankaj Gupta/*
404*dd4268a2SPankaj Gupta * void _psci_core_entr_stdby(u_register_t core_mask) - this
405*dd4268a2SPankaj Gupta * is the fast-path for simple core standby
406*dd4268a2SPankaj Gupta */
407*dd4268a2SPankaj Gupta
408*dd4268a2SPankaj Guptafunc _psci_core_entr_stdby
409*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
410*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
411*dd4268a2SPankaj Gupta
412*dd4268a2SPankaj Gupta	mov  x5, x0		/* x5 = core mask */
413*dd4268a2SPankaj Gupta
414*dd4268a2SPankaj Gupta	/* save scr_el3 */
415*dd4268a2SPankaj Gupta	mov  x0, x5
416*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
417*dd4268a2SPankaj Gupta	mov  x2, x4
418*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
419*dd4268a2SPankaj Gupta	bl    _setCoreData
420*dd4268a2SPankaj Gupta
421*dd4268a2SPankaj Gupta	/* x4 = SCR_EL3
422*dd4268a2SPankaj Gupta	 * x5 = core mask
423*dd4268a2SPankaj Gupta	 */
424*dd4268a2SPankaj Gupta
425*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
426*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
427*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
428*dd4268a2SPankaj Gupta
429*dd4268a2SPankaj Gupta	/* x5 = core mask */
430*dd4268a2SPankaj Gupta
431*dd4268a2SPankaj Gupta	/* put the core into standby */
432*dd4268a2SPankaj Gupta	mov  x0, x5
433*dd4268a2SPankaj Gupta	bl   _soc_core_entr_stdby
434*dd4268a2SPankaj Gupta
435*dd4268a2SPankaj Gupta	/* restore scr_el3 */
436*dd4268a2SPankaj Gupta	mov  x0, x5
437*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
438*dd4268a2SPankaj Gupta	bl   _getCoreData
439*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
440*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
441*dd4268a2SPankaj Gupta
442*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
443*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
444*dd4268a2SPankaj Gupta	isb
445*dd4268a2SPankaj Gupta	ret
446*dd4268a2SPankaj Guptaendfunc _psci_core_entr_stdby
447*dd4268a2SPankaj Gupta
448*dd4268a2SPankaj Gupta/*
449*dd4268a2SPankaj Gupta * void _psci_core_prep_stdby(u_register_t core_mask) - this
450*dd4268a2SPankaj Gupta * sets up the core to enter standby state thru the normal path
451*dd4268a2SPankaj Gupta */
452*dd4268a2SPankaj Gupta
453*dd4268a2SPankaj Guptafunc _psci_core_prep_stdby
454*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
455*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
456*dd4268a2SPankaj Gupta
457*dd4268a2SPankaj Gupta	mov  x5, x0
458*dd4268a2SPankaj Gupta
459*dd4268a2SPankaj Gupta	/* x5 = core mask */
460*dd4268a2SPankaj Gupta
461*dd4268a2SPankaj Gupta	/* save scr_el3 */
462*dd4268a2SPankaj Gupta	mov  x0, x5
463*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
464*dd4268a2SPankaj Gupta	mov  x2, x4
465*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
466*dd4268a2SPankaj Gupta	bl    _setCoreData
467*dd4268a2SPankaj Gupta
468*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
469*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
470*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
471*dd4268a2SPankaj Gupta
472*dd4268a2SPankaj Gupta	/* x5 = core mask */
473*dd4268a2SPankaj Gupta
474*dd4268a2SPankaj Gupta	/* call for any SoC-specific programming */
475*dd4268a2SPankaj Gupta	mov  x0, x5
476*dd4268a2SPankaj Gupta	bl   _soc_core_prep_stdby
477*dd4268a2SPankaj Gupta
478*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
479*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
480*dd4268a2SPankaj Gupta	isb
481*dd4268a2SPankaj Gupta	ret
482*dd4268a2SPankaj Guptaendfunc _psci_core_prep_stdby
483*dd4268a2SPankaj Gupta
484*dd4268a2SPankaj Gupta/*
485*dd4268a2SPankaj Gupta * void _psci_core_exit_stdby(u_register_t core_mask) - this
486*dd4268a2SPankaj Gupta * exits the core from standby state thru the normal path
487*dd4268a2SPankaj Gupta */
488*dd4268a2SPankaj Gupta
489*dd4268a2SPankaj Guptafunc _psci_core_exit_stdby
490*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
491*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
492*dd4268a2SPankaj Gupta
493*dd4268a2SPankaj Gupta	mov  x5, x0
494*dd4268a2SPankaj Gupta
495*dd4268a2SPankaj Gupta	/* x5 = core mask */
496*dd4268a2SPankaj Gupta
497*dd4268a2SPankaj Gupta	/* restore scr_el3 */
498*dd4268a2SPankaj Gupta	mov  x0, x5
499*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
500*dd4268a2SPankaj Gupta	bl   _getCoreData
501*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
502*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
503*dd4268a2SPankaj Gupta
504*dd4268a2SPankaj Gupta	/* x5 = core mask */
505*dd4268a2SPankaj Gupta
506*dd4268a2SPankaj Gupta	/* perform any SoC-specific programming after standby state */
507*dd4268a2SPankaj Gupta	mov  x0, x5
508*dd4268a2SPankaj Gupta	bl   _soc_core_exit_stdby
509*dd4268a2SPankaj Gupta
510*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
511*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
512*dd4268a2SPankaj Gupta	isb
513*dd4268a2SPankaj Gupta	ret
514*dd4268a2SPankaj Guptaendfunc _psci_core_exit_stdby
515*dd4268a2SPankaj Gupta
516*dd4268a2SPankaj Gupta#endif
517*dd4268a2SPankaj Gupta
518*dd4268a2SPankaj Gupta
519*dd4268a2SPankaj Gupta#if (SOC_CORE_PWR_DWN)
520*dd4268a2SPankaj Gupta
521*dd4268a2SPankaj Gupta.global _psci_core_prep_pwrdn
522*dd4268a2SPankaj Gupta.global _psci_cpu_pwrdn_wfi
523*dd4268a2SPankaj Gupta.global _psci_core_exit_pwrdn
524*dd4268a2SPankaj Gupta
525*dd4268a2SPankaj Gupta/*
526*dd4268a2SPankaj Gupta * void _psci_core_prep_pwrdn_(u_register_t core_mask)
527*dd4268a2SPankaj Gupta * this function prepares the core for power-down
528*dd4268a2SPankaj Gupta * x0 = core mask
529*dd4268a2SPankaj Gupta *
530*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
531*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
532*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
533*dd4268a2SPankaj Gupta */
534*dd4268a2SPankaj Gupta
535*dd4268a2SPankaj Guptafunc _psci_core_prep_pwrdn
536*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
537*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
538*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
539*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
540*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
541*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
542*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
543*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
544*dd4268a2SPankaj Gupta
545*dd4268a2SPankaj Gupta	mov  x6, x0
546*dd4268a2SPankaj Gupta
547*dd4268a2SPankaj Gupta	/* x6 = core mask */
548*dd4268a2SPankaj Gupta
549*dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
550*dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
551*dd4268a2SPankaj Gupta
552*dd4268a2SPankaj Gupta	/* save scr_el3 */
553*dd4268a2SPankaj Gupta	mov  x0, x6
554*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
555*dd4268a2SPankaj Gupta	mov  x2, x4
556*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
557*dd4268a2SPankaj Gupta	bl    _setCoreData
558*dd4268a2SPankaj Gupta
559*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
560*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
561*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
562*dd4268a2SPankaj Gupta
563*dd4268a2SPankaj Gupta	/* save cpuectlr */
564*dd4268a2SPankaj Gupta	mov  x0, x6
565*dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
566*dd4268a2SPankaj Gupta	mrs  x2, CORTEX_A72_ECTLR_EL1
567*dd4268a2SPankaj Gupta	bl   _setCoreData
568*dd4268a2SPankaj Gupta
569*dd4268a2SPankaj Gupta	/* x6 = core mask */
570*dd4268a2SPankaj Gupta
571*dd4268a2SPankaj Gupta	/* SoC-specific programming for power-down */
572*dd4268a2SPankaj Gupta	mov  x0, x6
573*dd4268a2SPankaj Gupta	bl  _soc_core_prep_pwrdn
574*dd4268a2SPankaj Gupta
575*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
576*dd4268a2SPankaj Gupta	 */
577*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
578*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
579*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
580*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
581*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
582*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
583*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
584*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
585*dd4268a2SPankaj Gupta	b    psci_completed
586*dd4268a2SPankaj Guptaendfunc _psci_core_prep_pwrdn
587*dd4268a2SPankaj Gupta
588*dd4268a2SPankaj Gupta/*
589*dd4268a2SPankaj Gupta * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
590*dd4268a2SPankaj Gupta * this function powers down the core
591*dd4268a2SPankaj Gupta */
592*dd4268a2SPankaj Gupta
593*dd4268a2SPankaj Guptafunc _psci_cpu_pwrdn_wfi
594*dd4268a2SPankaj Gupta	/* save the wakeup address */
595*dd4268a2SPankaj Gupta	mov  x29, x1
596*dd4268a2SPankaj Gupta
597*dd4268a2SPankaj Gupta	/* x0 = core mask */
598*dd4268a2SPankaj Gupta
599*dd4268a2SPankaj Gupta	/* shutdown the core */
600*dd4268a2SPankaj Gupta	bl   _soc_core_entr_pwrdn
601*dd4268a2SPankaj Gupta
602*dd4268a2SPankaj Gupta	/* branch to resume execution */
603*dd4268a2SPankaj Gupta	br   x29
604*dd4268a2SPankaj Guptaendfunc _psci_cpu_pwrdn_wfi
605*dd4268a2SPankaj Gupta
606*dd4268a2SPankaj Gupta/*
607*dd4268a2SPankaj Gupta * void _psci_core_exit_pwrdn_(u_register_t core_mask)
608*dd4268a2SPankaj Gupta * this function cleans up after a core power-down
609*dd4268a2SPankaj Gupta * x0 = core mask
610*dd4268a2SPankaj Gupta *
611*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
612*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
613*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
614*dd4268a2SPankaj Gupta */
615*dd4268a2SPankaj Gupta
616*dd4268a2SPankaj Guptafunc _psci_core_exit_pwrdn
617*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
618*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
619*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
620*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
621*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
622*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
623*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
624*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
625*dd4268a2SPankaj Gupta
626*dd4268a2SPankaj Gupta	mov  x5, x0			/* x5 = core mask */
627*dd4268a2SPankaj Gupta
628*dd4268a2SPankaj Gupta	/* restore scr_el3 */
629*dd4268a2SPankaj Gupta	mov  x0, x5
630*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
631*dd4268a2SPankaj Gupta	bl   _getCoreData
632*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
633*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
634*dd4268a2SPankaj Gupta
635*dd4268a2SPankaj Gupta	/* x5 = core mask */
636*dd4268a2SPankaj Gupta
637*dd4268a2SPankaj Gupta	/* restore cpuectlr */
638*dd4268a2SPankaj Gupta	mov  x0, x5
639*dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
640*dd4268a2SPankaj Gupta	bl   _getCoreData
641*dd4268a2SPankaj Gupta	/* make sure smp is set */
642*dd4268a2SPankaj Gupta	orr  x0, x0, #CPUECTLR_SMPEN_MASK
643*dd4268a2SPankaj Gupta	msr  CORTEX_A72_ECTLR_EL1, x0
644*dd4268a2SPankaj Gupta
645*dd4268a2SPankaj Gupta	/* x5 = core mask */
646*dd4268a2SPankaj Gupta
647*dd4268a2SPankaj Gupta	/* SoC-specific cleanup */
648*dd4268a2SPankaj Gupta	mov  x0, x5
649*dd4268a2SPankaj Gupta	bl   _soc_core_exit_pwrdn
650*dd4268a2SPankaj Gupta
651*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
652*dd4268a2SPankaj Gupta	 */
653*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
654*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
655*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
656*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
657*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
658*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
659*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
660*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
661*dd4268a2SPankaj Gupta	b    psci_completed
662*dd4268a2SPankaj Guptaendfunc _psci_core_exit_pwrdn
663*dd4268a2SPankaj Gupta
664*dd4268a2SPankaj Gupta#endif
665*dd4268a2SPankaj Gupta
666*dd4268a2SPankaj Gupta#if (SOC_CLUSTER_STANDBY)
667*dd4268a2SPankaj Gupta
668*dd4268a2SPankaj Gupta.global _psci_clstr_prep_stdby
669*dd4268a2SPankaj Gupta.global _psci_clstr_exit_stdby
670*dd4268a2SPankaj Gupta
671*dd4268a2SPankaj Gupta/*
672*dd4268a2SPankaj Gupta * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
673*dd4268a2SPankaj Gupta * sets up the clstr to enter standby state thru the normal path
674*dd4268a2SPankaj Gupta */
675*dd4268a2SPankaj Gupta
676*dd4268a2SPankaj Guptafunc _psci_clstr_prep_stdby
677*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
678*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
679*dd4268a2SPankaj Gupta
680*dd4268a2SPankaj Gupta	mov  x5, x0
681*dd4268a2SPankaj Gupta
682*dd4268a2SPankaj Gupta	/* x5 = core mask */
683*dd4268a2SPankaj Gupta
684*dd4268a2SPankaj Gupta	/* save scr_el3 */
685*dd4268a2SPankaj Gupta	mov  x0, x5
686*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
687*dd4268a2SPankaj Gupta	mov  x2, x4
688*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
689*dd4268a2SPankaj Gupta	bl    _setCoreData
690*dd4268a2SPankaj Gupta
691*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
692*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
693*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
694*dd4268a2SPankaj Gupta
695*dd4268a2SPankaj Gupta	/* x5 = core mask */
696*dd4268a2SPankaj Gupta
697*dd4268a2SPankaj Gupta	/* call for any SoC-specific programming */
698*dd4268a2SPankaj Gupta	mov  x0, x5
699*dd4268a2SPankaj Gupta	bl   _soc_clstr_prep_stdby
700*dd4268a2SPankaj Gupta
701*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
702*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
703*dd4268a2SPankaj Gupta	isb
704*dd4268a2SPankaj Gupta	ret
705*dd4268a2SPankaj Guptaendfunc _psci_clstr_prep_stdby
706*dd4268a2SPankaj Gupta
707*dd4268a2SPankaj Gupta/*
708*dd4268a2SPankaj Gupta * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
709*dd4268a2SPankaj Gupta * exits the clstr from standby state thru the normal path
710*dd4268a2SPankaj Gupta */
711*dd4268a2SPankaj Gupta
712*dd4268a2SPankaj Guptafunc _psci_clstr_exit_stdby
713*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
714*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
715*dd4268a2SPankaj Gupta
716*dd4268a2SPankaj Gupta	mov  x5, x0			/* x5 = core mask */
717*dd4268a2SPankaj Gupta
718*dd4268a2SPankaj Gupta	/* restore scr_el3 */
719*dd4268a2SPankaj Gupta	mov  x0, x5
720*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
721*dd4268a2SPankaj Gupta	bl   _getCoreData
722*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
723*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
724*dd4268a2SPankaj Gupta
725*dd4268a2SPankaj Gupta	/* x5 = core mask */
726*dd4268a2SPankaj Gupta
727*dd4268a2SPankaj Gupta	/* perform any SoC-specific programming after standby state */
728*dd4268a2SPankaj Gupta	mov  x0, x5
729*dd4268a2SPankaj Gupta	bl   _soc_clstr_exit_stdby
730*dd4268a2SPankaj Gupta
731*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
732*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
733*dd4268a2SPankaj Gupta	isb
734*dd4268a2SPankaj Gupta	ret
735*dd4268a2SPankaj Guptaendfunc _psci_clstr_exit_stdby
736*dd4268a2SPankaj Gupta
737*dd4268a2SPankaj Gupta#endif
738*dd4268a2SPankaj Gupta
739*dd4268a2SPankaj Gupta#if (SOC_CLUSTER_PWR_DWN)
740*dd4268a2SPankaj Gupta
741*dd4268a2SPankaj Gupta.global _psci_clstr_prep_pwrdn
742*dd4268a2SPankaj Gupta.global _psci_clstr_exit_pwrdn
743*dd4268a2SPankaj Gupta
744*dd4268a2SPankaj Gupta/*
745*dd4268a2SPankaj Gupta * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
746*dd4268a2SPankaj Gupta * this function prepares the cluster+core for power-down
747*dd4268a2SPankaj Gupta * x0 = core mask
748*dd4268a2SPankaj Gupta *
749*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
750*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
751*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
752*dd4268a2SPankaj Gupta */
753*dd4268a2SPankaj Gupta
754*dd4268a2SPankaj Guptafunc _psci_clstr_prep_pwrdn
755*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
756*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
757*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
758*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
759*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
760*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
761*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
762*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
763*dd4268a2SPankaj Gupta
764*dd4268a2SPankaj Gupta	mov  x6, x0			/* x6 = core mask */
765*dd4268a2SPankaj Gupta
766*dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
767*dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
768*dd4268a2SPankaj Gupta
769*dd4268a2SPankaj Gupta	/* save scr_el3 */
770*dd4268a2SPankaj Gupta	mov  x0, x6
771*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
772*dd4268a2SPankaj Gupta	mov  x2, x4
773*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
774*dd4268a2SPankaj Gupta	bl    _setCoreData
775*dd4268a2SPankaj Gupta
776*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
777*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
778*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
779*dd4268a2SPankaj Gupta
780*dd4268a2SPankaj Gupta	/* save cpuectlr */
781*dd4268a2SPankaj Gupta	mov  x0, x6
782*dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
783*dd4268a2SPankaj Gupta	mrs  x2, CORTEX_A72_ECTLR_EL1
784*dd4268a2SPankaj Gupta	mov  x4, x2
785*dd4268a2SPankaj Gupta	bl   _setCoreData
786*dd4268a2SPankaj Gupta
787*dd4268a2SPankaj Gupta	/* remove core from coherency */
788*dd4268a2SPankaj Gupta	bic   x4, x4, #CPUECTLR_SMPEN_MASK
789*dd4268a2SPankaj Gupta	msr   CORTEX_A72_ECTLR_EL1, x4
790*dd4268a2SPankaj Gupta
791*dd4268a2SPankaj Gupta	/* x6 = core mask */
792*dd4268a2SPankaj Gupta
793*dd4268a2SPankaj Gupta	/* SoC-specific programming for power-down */
794*dd4268a2SPankaj Gupta	mov  x0, x6
795*dd4268a2SPankaj Gupta	bl  _soc_clstr_prep_pwrdn
796*dd4268a2SPankaj Gupta
797*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
798*dd4268a2SPankaj Gupta	 */
799*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
800*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
801*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
802*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
803*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
804*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
805*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
806*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
807*dd4268a2SPankaj Gupta	b    psci_completed
808*dd4268a2SPankaj Guptaendfunc _psci_clstr_prep_pwrdn
809*dd4268a2SPankaj Gupta
810*dd4268a2SPankaj Gupta/*
811*dd4268a2SPankaj Gupta * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
812*dd4268a2SPankaj Gupta * this function cleans up after a cluster power-down
813*dd4268a2SPankaj Gupta * x0 = core mask
814*dd4268a2SPankaj Gupta *
815*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
816*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
817*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
818*dd4268a2SPankaj Gupta */
819*dd4268a2SPankaj Gupta
820*dd4268a2SPankaj Guptafunc _psci_clstr_exit_pwrdn
821*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
822*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
823*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
824*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
825*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
826*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
827*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
828*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
829*dd4268a2SPankaj Gupta
830*dd4268a2SPankaj Gupta	mov  x4, x0			/* x4 = core mask */
831*dd4268a2SPankaj Gupta
832*dd4268a2SPankaj Gupta	/* restore scr_el3 */
833*dd4268a2SPankaj Gupta	mov  x0, x4
834*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
835*dd4268a2SPankaj Gupta	bl   _getCoreData
836*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
837*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
838*dd4268a2SPankaj Gupta
839*dd4268a2SPankaj Gupta	/* x4 = core mask */
840*dd4268a2SPankaj Gupta
841*dd4268a2SPankaj Gupta	/* restore cpuectlr */
842*dd4268a2SPankaj Gupta	mov  x0, x4
843*dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
844*dd4268a2SPankaj Gupta	bl   _getCoreData
845*dd4268a2SPankaj Gupta	/* make sure smp is set */
846*dd4268a2SPankaj Gupta	orr  x0, x0, #CPUECTLR_SMPEN_MASK
847*dd4268a2SPankaj Gupta	msr  CORTEX_A72_ECTLR_EL1, x0
848*dd4268a2SPankaj Gupta
849*dd4268a2SPankaj Gupta	/* x4 = core mask */
850*dd4268a2SPankaj Gupta
851*dd4268a2SPankaj Gupta	/* SoC-specific cleanup */
852*dd4268a2SPankaj Gupta	mov  x0, x4
853*dd4268a2SPankaj Gupta	bl   _soc_clstr_exit_pwrdn
854*dd4268a2SPankaj Gupta
855*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
856*dd4268a2SPankaj Gupta	 */
857*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
858*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
859*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
860*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
861*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
862*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
863*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
864*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
865*dd4268a2SPankaj Gupta	b    psci_completed
866*dd4268a2SPankaj Guptaendfunc _psci_clstr_exit_pwrdn
867*dd4268a2SPankaj Gupta
868*dd4268a2SPankaj Gupta#endif
869*dd4268a2SPankaj Gupta
870*dd4268a2SPankaj Gupta#if (SOC_SYSTEM_STANDBY)
871*dd4268a2SPankaj Gupta
872*dd4268a2SPankaj Gupta.global _psci_sys_prep_stdby
873*dd4268a2SPankaj Gupta.global _psci_sys_exit_stdby
874*dd4268a2SPankaj Gupta
875*dd4268a2SPankaj Gupta/*
876*dd4268a2SPankaj Gupta * void _psci_sys_prep_stdby(u_register_t core_mask) - this
877*dd4268a2SPankaj Gupta * sets up the system to enter standby state thru the normal path
878*dd4268a2SPankaj Gupta */
879*dd4268a2SPankaj Gupta
880*dd4268a2SPankaj Guptafunc _psci_sys_prep_stdby
881*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
882*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
883*dd4268a2SPankaj Gupta
884*dd4268a2SPankaj Gupta	mov  x5, x0			/* x5 = core mask */
885*dd4268a2SPankaj Gupta
886*dd4268a2SPankaj Gupta	/* save scr_el3 */
887*dd4268a2SPankaj Gupta	mov  x0, x5
888*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
889*dd4268a2SPankaj Gupta	mov  x2, x4
890*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
891*dd4268a2SPankaj Gupta	bl    _setCoreData
892*dd4268a2SPankaj Gupta
893*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
894*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
895*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
896*dd4268a2SPankaj Gupta
897*dd4268a2SPankaj Gupta	/* x5 = core mask */
898*dd4268a2SPankaj Gupta
899*dd4268a2SPankaj Gupta	/* call for any SoC-specific programming */
900*dd4268a2SPankaj Gupta	mov  x0, x5
901*dd4268a2SPankaj Gupta	bl   _soc_sys_prep_stdby
902*dd4268a2SPankaj Gupta
903*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
904*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
905*dd4268a2SPankaj Gupta	isb
906*dd4268a2SPankaj Gupta	ret
907*dd4268a2SPankaj Guptaendfunc _psci_sys_prep_stdby
908*dd4268a2SPankaj Gupta
909*dd4268a2SPankaj Gupta/*
910*dd4268a2SPankaj Gupta * void _psci_sys_exit_stdby(u_register_t core_mask) - this
911*dd4268a2SPankaj Gupta * exits the system from standby state thru the normal path
912*dd4268a2SPankaj Gupta */
913*dd4268a2SPankaj Gupta
914*dd4268a2SPankaj Guptafunc _psci_sys_exit_stdby
915*dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
916*dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
917*dd4268a2SPankaj Gupta
918*dd4268a2SPankaj Gupta	mov  x5, x0
919*dd4268a2SPankaj Gupta
920*dd4268a2SPankaj Gupta	/* x5 = core mask */
921*dd4268a2SPankaj Gupta
922*dd4268a2SPankaj Gupta	/* restore scr_el3 */
923*dd4268a2SPankaj Gupta	mov  x0, x5
924*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
925*dd4268a2SPankaj Gupta	bl   _getCoreData
926*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
927*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
928*dd4268a2SPankaj Gupta
929*dd4268a2SPankaj Gupta	/* x5 = core mask */
930*dd4268a2SPankaj Gupta
931*dd4268a2SPankaj Gupta	/* perform any SoC-specific programming after standby state */
932*dd4268a2SPankaj Gupta	mov  x0, x5
933*dd4268a2SPankaj Gupta	bl   _soc_sys_exit_stdby
934*dd4268a2SPankaj Gupta
935*dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
936*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
937*dd4268a2SPankaj Gupta	isb
938*dd4268a2SPankaj Gupta	ret
939*dd4268a2SPankaj Guptaendfunc _psci_sys_exit_stdby
940*dd4268a2SPankaj Gupta
941*dd4268a2SPankaj Gupta#endif
942*dd4268a2SPankaj Gupta
943*dd4268a2SPankaj Gupta#if (SOC_SYSTEM_PWR_DWN)
944*dd4268a2SPankaj Gupta
945*dd4268a2SPankaj Gupta.global _psci_sys_prep_pwrdn
946*dd4268a2SPankaj Gupta.global _psci_sys_pwrdn_wfi
947*dd4268a2SPankaj Gupta.global _psci_sys_exit_pwrdn
948*dd4268a2SPankaj Gupta
949*dd4268a2SPankaj Gupta/*
950*dd4268a2SPankaj Gupta * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
951*dd4268a2SPankaj Gupta * this function prepares the system+core for power-down
952*dd4268a2SPankaj Gupta * x0 = core mask
953*dd4268a2SPankaj Gupta *
954*dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
955*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
956*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
957*dd4268a2SPankaj Gupta */
958*dd4268a2SPankaj Gupta
959*dd4268a2SPankaj Guptafunc _psci_sys_prep_pwrdn
960*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
961*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
962*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
963*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
964*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
965*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
966*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
967*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
968*dd4268a2SPankaj Gupta
969*dd4268a2SPankaj Gupta	mov  x6, x0			/* x6 = core mask */
970*dd4268a2SPankaj Gupta
971*dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
972*dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
973*dd4268a2SPankaj Gupta
974*dd4268a2SPankaj Gupta	/* save scr_el3 */
975*dd4268a2SPankaj Gupta	mov  x0, x6
976*dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
977*dd4268a2SPankaj Gupta	mov  x2, x4
978*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
979*dd4268a2SPankaj Gupta	bl    _setCoreData
980*dd4268a2SPankaj Gupta
981*dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
982*dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
983*dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
984*dd4268a2SPankaj Gupta
985*dd4268a2SPankaj Gupta	/* save cpuectlr */
986*dd4268a2SPankaj Gupta	mov  x0, x6
987*dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
988*dd4268a2SPankaj Gupta	mrs  x2, CORTEX_A72_ECTLR_EL1
989*dd4268a2SPankaj Gupta	mov  x4, x2
990*dd4268a2SPankaj Gupta	bl   _setCoreData
991*dd4268a2SPankaj Gupta
992*dd4268a2SPankaj Gupta	/* remove core from coherency */
993*dd4268a2SPankaj Gupta	bic   x4, x4, #CPUECTLR_SMPEN_MASK
994*dd4268a2SPankaj Gupta	msr   CORTEX_A72_ECTLR_EL1, x4
995*dd4268a2SPankaj Gupta
996*dd4268a2SPankaj Gupta	/* x6 = core mask */
997*dd4268a2SPankaj Gupta
998*dd4268a2SPankaj Gupta	/* SoC-specific programming for power-down */
999*dd4268a2SPankaj Gupta	mov  x0, x6
1000*dd4268a2SPankaj Gupta	bl  _soc_sys_prep_pwrdn
1001*dd4268a2SPankaj Gupta
1002*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
1003*dd4268a2SPankaj Gupta	 */
1004*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
1005*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
1006*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
1007*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
1008*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
1009*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
1010*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
1011*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
1012*dd4268a2SPankaj Gupta	b    psci_completed
1013*dd4268a2SPankaj Guptaendfunc _psci_sys_prep_pwrdn
1014*dd4268a2SPankaj Gupta
1015*dd4268a2SPankaj Gupta
1016*dd4268a2SPankaj Gupta/*
1017*dd4268a2SPankaj Gupta * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
1018*dd4268a2SPankaj Gupta * this function powers down the system
1019*dd4268a2SPankaj Gupta */
1020*dd4268a2SPankaj Gupta
1021*dd4268a2SPankaj Guptafunc _psci_sys_pwrdn_wfi
1022*dd4268a2SPankaj Gupta	/* save the wakeup address */
1023*dd4268a2SPankaj Gupta	mov  x29, x1
1024*dd4268a2SPankaj Gupta
1025*dd4268a2SPankaj Gupta	/* x0 = core mask */
1026*dd4268a2SPankaj Gupta
1027*dd4268a2SPankaj Gupta	/* shutdown the system */
1028*dd4268a2SPankaj Gupta	bl   _soc_sys_pwrdn_wfi
1029*dd4268a2SPankaj Gupta
1030*dd4268a2SPankaj Gupta	/* branch to resume execution */
1031*dd4268a2SPankaj Gupta	br   x29
1032*dd4268a2SPankaj Guptaendfunc _psci_sys_pwrdn_wfi
1033*dd4268a2SPankaj Gupta
1034*dd4268a2SPankaj Gupta/*
1035*dd4268a2SPankaj Gupta * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
1036*dd4268a2SPankaj Gupta * this function cleans up after a system power-down
1037*dd4268a2SPankaj Gupta * x0 = core mask
1038*dd4268a2SPankaj Gupta *
1039*dd4268a2SPankaj Gupta * Called from C, so save the non-volatile regs
1040*dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
1041*dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
1042*dd4268a2SPankaj Gupta */
1043*dd4268a2SPankaj Gupta
1044*dd4268a2SPankaj Guptafunc _psci_sys_exit_pwrdn
1045*dd4268a2SPankaj Gupta
1046*dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
1047*dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
1048*dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
1049*dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
1050*dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
1051*dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
1052*dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
1053*dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
1054*dd4268a2SPankaj Gupta
1055*dd4268a2SPankaj Gupta	mov  x4, x0			/* x4 = core mask */
1056*dd4268a2SPankaj Gupta
1057*dd4268a2SPankaj Gupta	/* restore scr_el3 */
1058*dd4268a2SPankaj Gupta	mov  x0, x4
1059*dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
1060*dd4268a2SPankaj Gupta	bl   _getCoreData
1061*dd4268a2SPankaj Gupta
1062*dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
1063*dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
1064*dd4268a2SPankaj Gupta
1065*dd4268a2SPankaj Gupta	/* x4 = core mask */
1066*dd4268a2SPankaj Gupta
1067*dd4268a2SPankaj Gupta	/* restore cpuectlr */
1068*dd4268a2SPankaj Gupta	mov  x0, x4
1069*dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
1070*dd4268a2SPankaj Gupta	bl   _getCoreData
1071*dd4268a2SPankaj Gupta
1072*dd4268a2SPankaj Gupta	/* make sure smp is set */
1073*dd4268a2SPankaj Gupta	orr  x0, x0, #CPUECTLR_SMPEN_MASK
1074*dd4268a2SPankaj Gupta	msr  CORTEX_A72_ECTLR_EL1, x0
1075*dd4268a2SPankaj Gupta
1076*dd4268a2SPankaj Gupta	/* x4 = core mask */
1077*dd4268a2SPankaj Gupta
1078*dd4268a2SPankaj Gupta	/* SoC-specific cleanup */
1079*dd4268a2SPankaj Gupta	mov  x0, x4
1080*dd4268a2SPankaj Gupta	bl   _soc_sys_exit_pwrdn
1081*dd4268a2SPankaj Gupta
1082*dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
1083*dd4268a2SPankaj Gupta	 */
1084*dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
1085*dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
1086*dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
1087*dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
1088*dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
1089*dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
1090*dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
1091*dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
1092*dd4268a2SPankaj Gupta	b    psci_completed
1093*dd4268a2SPankaj Guptaendfunc _psci_sys_exit_pwrdn
1094*dd4268a2SPankaj Gupta
1095*dd4268a2SPankaj Gupta#endif
1096*dd4268a2SPankaj Gupta
1097*dd4268a2SPankaj Gupta
1098*dd4268a2SPankaj Gupta/* psci std returns */
1099*dd4268a2SPankaj Guptafunc psci_disabled
1100*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_DISABLED
1101*dd4268a2SPankaj Gupta	b    psci_completed
1102*dd4268a2SPankaj Guptaendfunc psci_disabled
1103*dd4268a2SPankaj Gupta
1104*dd4268a2SPankaj Gupta
1105*dd4268a2SPankaj Guptafunc psci_not_present
1106*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_NOT_PRESENT
1107*dd4268a2SPankaj Gupta	b    psci_completed
1108*dd4268a2SPankaj Guptaendfunc psci_not_present
1109*dd4268a2SPankaj Gupta
1110*dd4268a2SPankaj Gupta
1111*dd4268a2SPankaj Guptafunc psci_on_pending
1112*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_ON_PENDING
1113*dd4268a2SPankaj Gupta	b    psci_completed
1114*dd4268a2SPankaj Guptaendfunc psci_on_pending
1115*dd4268a2SPankaj Gupta
1116*dd4268a2SPankaj Gupta
1117*dd4268a2SPankaj Guptafunc psci_already_on
1118*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_ALREADY_ON
1119*dd4268a2SPankaj Gupta	b    psci_completed
1120*dd4268a2SPankaj Guptaendfunc psci_already_on
1121*dd4268a2SPankaj Gupta
1122*dd4268a2SPankaj Gupta
1123*dd4268a2SPankaj Guptafunc psci_failure
1124*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_INTERN_FAIL
1125*dd4268a2SPankaj Gupta	b    psci_completed
1126*dd4268a2SPankaj Guptaendfunc psci_failure
1127*dd4268a2SPankaj Gupta
1128*dd4268a2SPankaj Gupta
1129*dd4268a2SPankaj Guptafunc psci_unimplemented
1130*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_NOT_SUPPORTED
1131*dd4268a2SPankaj Gupta	b    psci_completed
1132*dd4268a2SPankaj Guptaendfunc psci_unimplemented
1133*dd4268a2SPankaj Gupta
1134*dd4268a2SPankaj Gupta
1135*dd4268a2SPankaj Guptafunc psci_denied
1136*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_DENIED
1137*dd4268a2SPankaj Gupta	b    psci_completed
1138*dd4268a2SPankaj Guptaendfunc psci_denied
1139*dd4268a2SPankaj Gupta
1140*dd4268a2SPankaj Gupta
1141*dd4268a2SPankaj Guptafunc psci_invalid
1142*dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_INVALID_PARAMS
1143*dd4268a2SPankaj Gupta	b    psci_completed
1144*dd4268a2SPankaj Guptaendfunc psci_invalid
1145*dd4268a2SPankaj Gupta
1146*dd4268a2SPankaj Gupta
1147*dd4268a2SPankaj Guptafunc psci_success
1148*dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
1149*dd4268a2SPankaj Guptaendfunc psci_success
1150*dd4268a2SPankaj Gupta
1151*dd4268a2SPankaj Gupta
1152*dd4268a2SPankaj Guptafunc psci_completed
1153*dd4268a2SPankaj Gupta	/* x0 = status code */
1154*dd4268a2SPankaj Gupta	ret
1155*dd4268a2SPankaj Guptaendfunc psci_completed
1156