xref: /rk3399_ARM-atf/plat/nxp/common/psci/aarch64/psci_utils.S (revision ab5964aadcf090c816804a798c0d49bc0c9b5183)
1dd4268a2SPankaj Gupta
2dd4268a2SPankaj Gupta/*
3*35efe7a4SJiafei Pan * Copyright 2018-2021 NXP
4dd4268a2SPankaj Gupta *
5dd4268a2SPankaj Gupta * SPDX-License-Identifier: BSD-3-Clause
6dd4268a2SPankaj Gupta *
7dd4268a2SPankaj Gupta */
8dd4268a2SPankaj Gupta
9dd4268a2SPankaj Gupta#include <asm_macros.S>
10dd4268a2SPankaj Gupta#include <assert_macros.S>
11dd4268a2SPankaj Gupta
12dd4268a2SPankaj Gupta#include <lib/psci/psci.h>
13dd4268a2SPankaj Gupta
14dd4268a2SPankaj Gupta#include <bl31_data.h>
15dd4268a2SPankaj Gupta#include <plat_psci.h>
16dd4268a2SPankaj Gupta
17dd4268a2SPankaj Gupta
18dd4268a2SPankaj Gupta#define RESET_RETRY_CNT   800
19dd4268a2SPankaj Gupta#define PSCI_ABORT_CNT	100
20dd4268a2SPankaj Gupta
21dd4268a2SPankaj Gupta#if (SOC_CORE_RELEASE)
22dd4268a2SPankaj Gupta
23dd4268a2SPankaj Gupta.global _psci_cpu_on
24dd4268a2SPankaj Gupta
25dd4268a2SPankaj Gupta/*
26dd4268a2SPankaj Gupta * int _psci_cpu_on(u_register_t core_mask)
27dd4268a2SPankaj Gupta * x0   = target cpu core mask
28dd4268a2SPankaj Gupta *
29dd4268a2SPankaj Gupta * Called from C, so save the non-volatile regs
30dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
31dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
32dd4268a2SPankaj Gupta *
33dd4268a2SPankaj Gupta */
34dd4268a2SPankaj Gupta
35dd4268a2SPankaj Guptafunc _psci_cpu_on
36dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
37dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
38dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
39dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
40dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
41dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
42dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
43dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
44dd4268a2SPankaj Gupta
45dd4268a2SPankaj Gupta	mov  x6, x0
46dd4268a2SPankaj Gupta
47dd4268a2SPankaj Gupta	/* x0   = core mask (lsb)
48dd4268a2SPankaj Gupta	 * x6   = core mask (lsb)
49dd4268a2SPankaj Gupta	 */
50dd4268a2SPankaj Gupta
51dd4268a2SPankaj Gupta	/* check if core disabled */
52dd4268a2SPankaj Gupta	bl   _soc_ck_disabled		/* 0-2 */
53dd4268a2SPankaj Gupta	cbnz w0, psci_disabled
54dd4268a2SPankaj Gupta
55dd4268a2SPankaj Gupta	/* check core data area to see if core cannot be turned on
56dd4268a2SPankaj Gupta	 * read the core state
57dd4268a2SPankaj Gupta	 */
58dd4268a2SPankaj Gupta	mov  x0, x6
59dd4268a2SPankaj Gupta	bl   _getCoreState		/* 0-5 */
60dd4268a2SPankaj Gupta	mov  x9, x0
61dd4268a2SPankaj Gupta
62dd4268a2SPankaj Gupta	/* x6   = core mask (lsb)
63dd4268a2SPankaj Gupta	 * x9   = core state (from data area)
64dd4268a2SPankaj Gupta	 */
65dd4268a2SPankaj Gupta
66dd4268a2SPankaj Gupta	cmp  x9, #CORE_DISABLED
67dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_DISABLED
68dd4268a2SPankaj Gupta	b.eq cpu_on_done
69dd4268a2SPankaj Gupta
70dd4268a2SPankaj Gupta	cmp  x9, #CORE_PENDING
71dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_ON_PENDING
72dd4268a2SPankaj Gupta	b.eq cpu_on_done
73dd4268a2SPankaj Gupta
74dd4268a2SPankaj Gupta	cmp  x9, #CORE_RELEASED
75dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_ALREADY_ON
76dd4268a2SPankaj Gupta	b.eq cpu_on_done
77dd4268a2SPankaj Gupta
78dd4268a2SPankaj Gupta8:
79dd4268a2SPankaj Gupta	/* x6   = core mask (lsb)
80dd4268a2SPankaj Gupta	 * x9   = core state (from data area)
81dd4268a2SPankaj Gupta	 */
82dd4268a2SPankaj Gupta
83dd4268a2SPankaj Gupta	cmp  x9, #CORE_WFE
84dd4268a2SPankaj Gupta	b.eq core_in_wfe
85dd4268a2SPankaj Gupta	cmp  x9, #CORE_IN_RESET
86dd4268a2SPankaj Gupta	b.eq core_in_reset
87dd4268a2SPankaj Gupta	cmp  x9, #CORE_OFF
88dd4268a2SPankaj Gupta	b.eq core_is_off
89dd4268a2SPankaj Gupta	cmp  x9, #CORE_OFF_PENDING
90dd4268a2SPankaj Gupta
91dd4268a2SPankaj Gupta	/* if state == CORE_OFF_PENDING, set abort */
92dd4268a2SPankaj Gupta	mov  x0, x6
93dd4268a2SPankaj Gupta	mov  x1, #ABORT_FLAG_DATA
94dd4268a2SPankaj Gupta	mov  x2, #CORE_ABORT_OP
95dd4268a2SPankaj Gupta	bl   _setCoreData		/* 0-3, [13-15] */
96dd4268a2SPankaj Gupta
97dd4268a2SPankaj Gupta	ldr  x3, =PSCI_ABORT_CNT
98dd4268a2SPankaj Gupta7:
99dd4268a2SPankaj Gupta	/* watch for abort to take effect */
100dd4268a2SPankaj Gupta	mov  x0, x6
101dd4268a2SPankaj Gupta	bl   _getCoreState		/* 0-5 */
102dd4268a2SPankaj Gupta	cmp  x0, #CORE_OFF
103dd4268a2SPankaj Gupta	b.eq core_is_off
104dd4268a2SPankaj Gupta	cmp  x0, #CORE_PENDING
105dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
106dd4268a2SPankaj Gupta	b.eq cpu_on_done
107dd4268a2SPankaj Gupta
108dd4268a2SPankaj Gupta	/* loop til finished */
109dd4268a2SPankaj Gupta	sub  x3, x3, #1
110dd4268a2SPankaj Gupta	cbnz x3, 7b
111dd4268a2SPankaj Gupta
112dd4268a2SPankaj Gupta	/* if we didn't see either CORE_OFF or CORE_PENDING, then this
113dd4268a2SPankaj Gupta	 * core is in CORE_OFF_PENDING - exit with success, as the core will
114dd4268a2SPankaj Gupta	 * respond to the abort request
115dd4268a2SPankaj Gupta	 */
116dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
117dd4268a2SPankaj Gupta	b    cpu_on_done
118dd4268a2SPankaj Gupta
119dd4268a2SPankaj Gupta/* this is where we start up a core out of reset */
120dd4268a2SPankaj Guptacore_in_reset:
121dd4268a2SPankaj Gupta	/* see if the soc-specific module supports this op */
122dd4268a2SPankaj Gupta	ldr  x7, =SOC_CORE_RELEASE
123dd4268a2SPankaj Gupta	cbnz  x7, 3f
124dd4268a2SPankaj Gupta
125dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_NOT_SUPPORTED
126dd4268a2SPankaj Gupta	b    cpu_on_done
127dd4268a2SPankaj Gupta
128dd4268a2SPankaj Gupta	/* x6   = core mask (lsb) */
129dd4268a2SPankaj Gupta3:
130dd4268a2SPankaj Gupta	/* set core state in data area */
131dd4268a2SPankaj Gupta	mov  x0, x6
132dd4268a2SPankaj Gupta	mov  x1, #CORE_PENDING
133dd4268a2SPankaj Gupta	bl   _setCoreState   			/* 0-3, [13-15] */
134dd4268a2SPankaj Gupta
135dd4268a2SPankaj Gupta	/* release the core from reset */
136dd4268a2SPankaj Gupta	mov   x0, x6
137dd4268a2SPankaj Gupta	bl    _soc_core_release 		/* 0-3 */
138dd4268a2SPankaj Gupta	mov   x0, #PSCI_E_SUCCESS
139dd4268a2SPankaj Gupta	b     cpu_on_done
140dd4268a2SPankaj Gupta
141dd4268a2SPankaj Gupta	/* Start up the core that has been powered-down via CPU_OFF
142dd4268a2SPankaj Gupta	 */
143dd4268a2SPankaj Guptacore_is_off:
144dd4268a2SPankaj Gupta	/* see if the soc-specific module supports this op
145dd4268a2SPankaj Gupta	 */
146dd4268a2SPankaj Gupta	ldr  x7, =SOC_CORE_RESTART
147dd4268a2SPankaj Gupta	cbnz x7, 2f
148dd4268a2SPankaj Gupta
149dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_NOT_SUPPORTED
150dd4268a2SPankaj Gupta	b    cpu_on_done
151dd4268a2SPankaj Gupta
152dd4268a2SPankaj Gupta	/* x6   = core mask (lsb) */
153dd4268a2SPankaj Gupta2:
154dd4268a2SPankaj Gupta	/* set core state in data area */
155dd4268a2SPankaj Gupta	mov  x0, x6
156dd4268a2SPankaj Gupta	mov  x1, #CORE_WAKEUP
157dd4268a2SPankaj Gupta	bl   _setCoreState			/* 0-3, [13-15] */
158dd4268a2SPankaj Gupta
159dd4268a2SPankaj Gupta	/* put the core back into service */
160dd4268a2SPankaj Gupta	mov  x0, x6
161dd4268a2SPankaj Gupta#if (SOC_CORE_RESTART)
162dd4268a2SPankaj Gupta	bl   _soc_core_restart			/* 0-5 */
163dd4268a2SPankaj Gupta#endif
164dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
165dd4268a2SPankaj Gupta	b    cpu_on_done
166dd4268a2SPankaj Gupta
167dd4268a2SPankaj Gupta/* this is where we release a core that is being held in wfe */
168dd4268a2SPankaj Guptacore_in_wfe:
169dd4268a2SPankaj Gupta	/* x6   = core mask (lsb) */
170dd4268a2SPankaj Gupta
171dd4268a2SPankaj Gupta	/* set core state in data area */
172dd4268a2SPankaj Gupta	mov  x0, x6
173dd4268a2SPankaj Gupta	mov  x1, #CORE_PENDING
174dd4268a2SPankaj Gupta	bl   _setCoreState			/* 0-3, [13-15] */
175dd4268a2SPankaj Gupta	dsb  sy
176dd4268a2SPankaj Gupta	isb
177dd4268a2SPankaj Gupta
178dd4268a2SPankaj Gupta	/* put the core back into service */
179dd4268a2SPankaj Gupta	sev
180dd4268a2SPankaj Gupta	sev
181dd4268a2SPankaj Gupta	isb
182dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
183dd4268a2SPankaj Gupta
184dd4268a2SPankaj Guptacpu_on_done:
185dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers */
186dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
187dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
188dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
189dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
190dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
191dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
192dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
193dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
194dd4268a2SPankaj Gupta	b    psci_completed
195dd4268a2SPankaj Guptaendfunc _psci_cpu_on
196dd4268a2SPankaj Gupta
197dd4268a2SPankaj Gupta#endif
198dd4268a2SPankaj Gupta
199dd4268a2SPankaj Gupta
200dd4268a2SPankaj Gupta#if (SOC_CORE_OFF)
201dd4268a2SPankaj Gupta
202dd4268a2SPankaj Gupta.global _psci_cpu_prep_off
203dd4268a2SPankaj Gupta.global _psci_cpu_off_wfi
204dd4268a2SPankaj Gupta
205dd4268a2SPankaj Gupta/*
206dd4268a2SPankaj Gupta * void _psci_cpu_prep_off(u_register_t core_mask)
207dd4268a2SPankaj Gupta * this function performs the SoC-specific programming prior
208dd4268a2SPankaj Gupta * to shutting the core down
209dd4268a2SPankaj Gupta * x0 = core_mask
210dd4268a2SPankaj Gupta *
211dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
212dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
213dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
214dd4268a2SPankaj Gupta */
215dd4268a2SPankaj Gupta
216dd4268a2SPankaj Guptafunc _psci_cpu_prep_off
217dd4268a2SPankaj Gupta
218dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
219dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
220dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
221dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
222dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
223dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
224dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
225dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
226dd4268a2SPankaj Gupta
227dd4268a2SPankaj Gupta	mov  x10, x0			/* x10 = core_mask */
228dd4268a2SPankaj Gupta
229dd4268a2SPankaj Gupta	/* the core does not return from cpu_off, so no need
230dd4268a2SPankaj Gupta	 * to save/restore non-volatile registers
231dd4268a2SPankaj Gupta	 */
232dd4268a2SPankaj Gupta
233dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
234dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
235dd4268a2SPankaj Gupta
236dd4268a2SPankaj Gupta	/* read cpuectlr and save current value */
237*35efe7a4SJiafei Pan	mrs   x4, CPUECTLR_EL1
238dd4268a2SPankaj Gupta	mov   x1, #CPUECTLR_DATA
239dd4268a2SPankaj Gupta	mov   x2, x4
240dd4268a2SPankaj Gupta	mov   x0, x10
241dd4268a2SPankaj Gupta	bl    _setCoreData
242dd4268a2SPankaj Gupta
243dd4268a2SPankaj Gupta	/* remove the core from coherency */
244dd4268a2SPankaj Gupta	bic   x4, x4, #CPUECTLR_SMPEN_MASK
245*35efe7a4SJiafei Pan	msr   CPUECTLR_EL1, x4
246dd4268a2SPankaj Gupta
247dd4268a2SPankaj Gupta	/* save scr_el3 */
248dd4268a2SPankaj Gupta	mov  x0, x10
249dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
250dd4268a2SPankaj Gupta	mov  x2, x4
251dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
252dd4268a2SPankaj Gupta	bl    _setCoreData
253dd4268a2SPankaj Gupta
254dd4268a2SPankaj Gupta	/* x4 = scr_el3 */
255dd4268a2SPankaj Gupta
256dd4268a2SPankaj Gupta	/* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
257dd4268a2SPankaj Gupta	orr   x4, x4, #SCR_FIQ_MASK
258dd4268a2SPankaj Gupta	msr   scr_el3, x4
259dd4268a2SPankaj Gupta
260dd4268a2SPankaj Gupta	/* x10 = core_mask */
261dd4268a2SPankaj Gupta
262dd4268a2SPankaj Gupta	/* prep the core for shutdown */
263dd4268a2SPankaj Gupta	mov  x0, x10
264dd4268a2SPankaj Gupta	bl   _soc_core_prep_off
265dd4268a2SPankaj Gupta
266dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers */
267dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
268dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
269dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
270dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
271dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
272dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
273dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
274dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
275dd4268a2SPankaj Gupta	b    psci_completed
276dd4268a2SPankaj Guptaendfunc _psci_cpu_prep_off
277dd4268a2SPankaj Gupta
278dd4268a2SPankaj Gupta/*
279dd4268a2SPankaj Gupta * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
280dd4268a2SPankaj Gupta *   - this function shuts down the core
281dd4268a2SPankaj Gupta *   - this function does not return!!
282dd4268a2SPankaj Gupta */
283dd4268a2SPankaj Gupta
284dd4268a2SPankaj Guptafunc _psci_cpu_off_wfi
285dd4268a2SPankaj Gupta	/* save the wakeup address */
286dd4268a2SPankaj Gupta	mov  x29, x1
287dd4268a2SPankaj Gupta
288dd4268a2SPankaj Gupta	/* x0 = core_mask */
289dd4268a2SPankaj Gupta
290dd4268a2SPankaj Gupta	/* shutdown the core */
291dd4268a2SPankaj Gupta	bl   _soc_core_entr_off
292dd4268a2SPankaj Gupta
293dd4268a2SPankaj Gupta	/* branch to resume execution */
294dd4268a2SPankaj Gupta	br   x29
295dd4268a2SPankaj Guptaendfunc _psci_cpu_off_wfi
296dd4268a2SPankaj Gupta
297dd4268a2SPankaj Gupta#endif
298dd4268a2SPankaj Gupta
299dd4268a2SPankaj Gupta
300dd4268a2SPankaj Gupta#if (SOC_CORE_RESTART)
301dd4268a2SPankaj Gupta
302dd4268a2SPankaj Gupta.global _psci_wakeup
303dd4268a2SPankaj Gupta
304dd4268a2SPankaj Gupta/*
305dd4268a2SPankaj Gupta * void _psci_wakeup(u_register_t core_mask)
306dd4268a2SPankaj Gupta * this function performs the SoC-specific programming
307dd4268a2SPankaj Gupta * after a core wakes up from OFF
308dd4268a2SPankaj Gupta * x0 = core mask
309dd4268a2SPankaj Gupta *
310dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
311dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
312dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
313dd4268a2SPankaj Gupta */
314dd4268a2SPankaj Gupta
315dd4268a2SPankaj Guptafunc _psci_wakeup
316dd4268a2SPankaj Gupta
317dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
318dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
319dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
320dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
321dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
322dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
323dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
324dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
325dd4268a2SPankaj Gupta
326dd4268a2SPankaj Gupta	mov  x4, x0			/* x4 = core mask */
327dd4268a2SPankaj Gupta
328dd4268a2SPankaj Gupta	/* restore scr_el3 */
329dd4268a2SPankaj Gupta	mov  x0, x4
330dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
331dd4268a2SPankaj Gupta	bl   _getCoreData
332dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
333dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
334dd4268a2SPankaj Gupta
335dd4268a2SPankaj Gupta	/* x4 = core mask */
336dd4268a2SPankaj Gupta
337dd4268a2SPankaj Gupta	/* restore CPUECTLR */
338dd4268a2SPankaj Gupta	mov   x0, x4
339dd4268a2SPankaj Gupta	mov   x1, #CPUECTLR_DATA
340dd4268a2SPankaj Gupta	bl    _getCoreData
341dd4268a2SPankaj Gupta	orr   x0, x0, #CPUECTLR_SMPEN_MASK
342*35efe7a4SJiafei Pan	msr   CPUECTLR_EL1, x0
343dd4268a2SPankaj Gupta
344dd4268a2SPankaj Gupta	/* x4 = core mask */
345dd4268a2SPankaj Gupta
346dd4268a2SPankaj Gupta	/* start the core back up */
347dd4268a2SPankaj Gupta	mov   x0, x4
348dd4268a2SPankaj Gupta	bl   _soc_core_exit_off
349dd4268a2SPankaj Gupta
350dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
351dd4268a2SPankaj Gupta	 */
352dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
353dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
354dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
355dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
356dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
357dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
358dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
359dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
360dd4268a2SPankaj Gupta	b    psci_completed
361dd4268a2SPankaj Guptaendfunc _psci_wakeup
362dd4268a2SPankaj Gupta
363dd4268a2SPankaj Gupta#endif
364dd4268a2SPankaj Gupta
365dd4268a2SPankaj Gupta
366dd4268a2SPankaj Gupta#if (SOC_SYSTEM_RESET)
367dd4268a2SPankaj Gupta
368dd4268a2SPankaj Gupta.global _psci_system_reset
369dd4268a2SPankaj Gupta
370dd4268a2SPankaj Guptafunc _psci_system_reset
371dd4268a2SPankaj Gupta
372dd4268a2SPankaj Gupta	/* system reset is mandatory
373dd4268a2SPankaj Gupta	 * system reset is soc-specific
374dd4268a2SPankaj Gupta	 * Note: under no circumstances do we return from this call
375dd4268a2SPankaj Gupta	 */
376dd4268a2SPankaj Gupta	bl   _soc_sys_reset
377dd4268a2SPankaj Guptaendfunc _psci_system_reset
378dd4268a2SPankaj Gupta
379dd4268a2SPankaj Gupta#endif
380dd4268a2SPankaj Gupta
381dd4268a2SPankaj Gupta
382dd4268a2SPankaj Gupta#if (SOC_SYSTEM_OFF)
383dd4268a2SPankaj Gupta
384dd4268a2SPankaj Gupta.global _psci_system_off
385dd4268a2SPankaj Gupta
386dd4268a2SPankaj Guptafunc _psci_system_off
387dd4268a2SPankaj Gupta
388dd4268a2SPankaj Gupta	/* system off is mandatory
389dd4268a2SPankaj Gupta	 * system off is soc-specific
390dd4268a2SPankaj Gupta	 * Note: under no circumstances do we return from this call */
391dd4268a2SPankaj Gupta	b    _soc_sys_off
392dd4268a2SPankaj Guptaendfunc _psci_system_off
393dd4268a2SPankaj Gupta
394dd4268a2SPankaj Gupta#endif
395dd4268a2SPankaj Gupta
396dd4268a2SPankaj Gupta
397dd4268a2SPankaj Gupta#if (SOC_CORE_STANDBY)
398dd4268a2SPankaj Gupta
399dd4268a2SPankaj Gupta.global _psci_core_entr_stdby
400dd4268a2SPankaj Gupta.global _psci_core_prep_stdby
401dd4268a2SPankaj Gupta.global _psci_core_exit_stdby
402dd4268a2SPankaj Gupta
403dd4268a2SPankaj Gupta/*
404dd4268a2SPankaj Gupta * void _psci_core_entr_stdby(u_register_t core_mask) - this
405dd4268a2SPankaj Gupta * is the fast-path for simple core standby
406dd4268a2SPankaj Gupta */
407dd4268a2SPankaj Gupta
408dd4268a2SPankaj Guptafunc _psci_core_entr_stdby
409dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
410dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
411dd4268a2SPankaj Gupta
412dd4268a2SPankaj Gupta	mov  x5, x0		/* x5 = core mask */
413dd4268a2SPankaj Gupta
414dd4268a2SPankaj Gupta	/* save scr_el3 */
415dd4268a2SPankaj Gupta	mov  x0, x5
416dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
417dd4268a2SPankaj Gupta	mov  x2, x4
418dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
419dd4268a2SPankaj Gupta	bl    _setCoreData
420dd4268a2SPankaj Gupta
421dd4268a2SPankaj Gupta	/* x4 = SCR_EL3
422dd4268a2SPankaj Gupta	 * x5 = core mask
423dd4268a2SPankaj Gupta	 */
424dd4268a2SPankaj Gupta
425dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
426dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
427dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
428dd4268a2SPankaj Gupta
429dd4268a2SPankaj Gupta	/* x5 = core mask */
430dd4268a2SPankaj Gupta
431dd4268a2SPankaj Gupta	/* put the core into standby */
432dd4268a2SPankaj Gupta	mov  x0, x5
433dd4268a2SPankaj Gupta	bl   _soc_core_entr_stdby
434dd4268a2SPankaj Gupta
435dd4268a2SPankaj Gupta	/* restore scr_el3 */
436dd4268a2SPankaj Gupta	mov  x0, x5
437dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
438dd4268a2SPankaj Gupta	bl   _getCoreData
439dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
440dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
441dd4268a2SPankaj Gupta
442dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
443dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
444dd4268a2SPankaj Gupta	isb
445dd4268a2SPankaj Gupta	ret
446dd4268a2SPankaj Guptaendfunc _psci_core_entr_stdby
447dd4268a2SPankaj Gupta
448dd4268a2SPankaj Gupta/*
449dd4268a2SPankaj Gupta * void _psci_core_prep_stdby(u_register_t core_mask) - this
450dd4268a2SPankaj Gupta * sets up the core to enter standby state thru the normal path
451dd4268a2SPankaj Gupta */
452dd4268a2SPankaj Gupta
453dd4268a2SPankaj Guptafunc _psci_core_prep_stdby
454dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
455dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
456dd4268a2SPankaj Gupta
457dd4268a2SPankaj Gupta	mov  x5, x0
458dd4268a2SPankaj Gupta
459dd4268a2SPankaj Gupta	/* x5 = core mask */
460dd4268a2SPankaj Gupta
461dd4268a2SPankaj Gupta	/* save scr_el3 */
462dd4268a2SPankaj Gupta	mov  x0, x5
463dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
464dd4268a2SPankaj Gupta	mov  x2, x4
465dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
466dd4268a2SPankaj Gupta	bl    _setCoreData
467dd4268a2SPankaj Gupta
468dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
469dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
470dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
471dd4268a2SPankaj Gupta
472dd4268a2SPankaj Gupta	/* x5 = core mask */
473dd4268a2SPankaj Gupta
474dd4268a2SPankaj Gupta	/* call for any SoC-specific programming */
475dd4268a2SPankaj Gupta	mov  x0, x5
476dd4268a2SPankaj Gupta	bl   _soc_core_prep_stdby
477dd4268a2SPankaj Gupta
478dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
479dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
480dd4268a2SPankaj Gupta	isb
481dd4268a2SPankaj Gupta	ret
482dd4268a2SPankaj Guptaendfunc _psci_core_prep_stdby
483dd4268a2SPankaj Gupta
484dd4268a2SPankaj Gupta/*
485dd4268a2SPankaj Gupta * void _psci_core_exit_stdby(u_register_t core_mask) - this
486dd4268a2SPankaj Gupta * exits the core from standby state thru the normal path
487dd4268a2SPankaj Gupta */
488dd4268a2SPankaj Gupta
489dd4268a2SPankaj Guptafunc _psci_core_exit_stdby
490dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
491dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
492dd4268a2SPankaj Gupta
493dd4268a2SPankaj Gupta	mov  x5, x0
494dd4268a2SPankaj Gupta
495dd4268a2SPankaj Gupta	/* x5 = core mask */
496dd4268a2SPankaj Gupta
497dd4268a2SPankaj Gupta	/* restore scr_el3 */
498dd4268a2SPankaj Gupta	mov  x0, x5
499dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
500dd4268a2SPankaj Gupta	bl   _getCoreData
501dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
502dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
503dd4268a2SPankaj Gupta
504dd4268a2SPankaj Gupta	/* x5 = core mask */
505dd4268a2SPankaj Gupta
506dd4268a2SPankaj Gupta	/* perform any SoC-specific programming after standby state */
507dd4268a2SPankaj Gupta	mov  x0, x5
508dd4268a2SPankaj Gupta	bl   _soc_core_exit_stdby
509dd4268a2SPankaj Gupta
510dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
511dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
512dd4268a2SPankaj Gupta	isb
513dd4268a2SPankaj Gupta	ret
514dd4268a2SPankaj Guptaendfunc _psci_core_exit_stdby
515dd4268a2SPankaj Gupta
516dd4268a2SPankaj Gupta#endif
517dd4268a2SPankaj Gupta
518dd4268a2SPankaj Gupta
519dd4268a2SPankaj Gupta#if (SOC_CORE_PWR_DWN)
520dd4268a2SPankaj Gupta
521dd4268a2SPankaj Gupta.global _psci_core_prep_pwrdn
522dd4268a2SPankaj Gupta.global _psci_cpu_pwrdn_wfi
523dd4268a2SPankaj Gupta.global _psci_core_exit_pwrdn
524dd4268a2SPankaj Gupta
525dd4268a2SPankaj Gupta/*
526dd4268a2SPankaj Gupta * void _psci_core_prep_pwrdn_(u_register_t core_mask)
527dd4268a2SPankaj Gupta * this function prepares the core for power-down
528dd4268a2SPankaj Gupta * x0 = core mask
529dd4268a2SPankaj Gupta *
530dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
531dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
532dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
533dd4268a2SPankaj Gupta */
534dd4268a2SPankaj Gupta
535dd4268a2SPankaj Guptafunc _psci_core_prep_pwrdn
536dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
537dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
538dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
539dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
540dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
541dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
542dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
543dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
544dd4268a2SPankaj Gupta
545dd4268a2SPankaj Gupta	mov  x6, x0
546dd4268a2SPankaj Gupta
547dd4268a2SPankaj Gupta	/* x6 = core mask */
548dd4268a2SPankaj Gupta
549dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
550dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
551dd4268a2SPankaj Gupta
552dd4268a2SPankaj Gupta	/* save scr_el3 */
553dd4268a2SPankaj Gupta	mov  x0, x6
554dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
555dd4268a2SPankaj Gupta	mov  x2, x4
556dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
557dd4268a2SPankaj Gupta	bl    _setCoreData
558dd4268a2SPankaj Gupta
559dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
560dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
561dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
562dd4268a2SPankaj Gupta
563dd4268a2SPankaj Gupta	/* save cpuectlr */
564dd4268a2SPankaj Gupta	mov  x0, x6
565dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
566*35efe7a4SJiafei Pan	mrs  x2, CPUECTLR_EL1
567dd4268a2SPankaj Gupta	bl   _setCoreData
568dd4268a2SPankaj Gupta
569dd4268a2SPankaj Gupta	/* x6 = core mask */
570dd4268a2SPankaj Gupta
571dd4268a2SPankaj Gupta	/* SoC-specific programming for power-down */
572dd4268a2SPankaj Gupta	mov  x0, x6
573dd4268a2SPankaj Gupta	bl  _soc_core_prep_pwrdn
574dd4268a2SPankaj Gupta
575dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
576dd4268a2SPankaj Gupta	 */
577dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
578dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
579dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
580dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
581dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
582dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
583dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
584dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
585dd4268a2SPankaj Gupta	b    psci_completed
586dd4268a2SPankaj Guptaendfunc _psci_core_prep_pwrdn
587dd4268a2SPankaj Gupta
588dd4268a2SPankaj Gupta/*
589dd4268a2SPankaj Gupta * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
590dd4268a2SPankaj Gupta * this function powers down the core
591dd4268a2SPankaj Gupta */
592dd4268a2SPankaj Gupta
593dd4268a2SPankaj Guptafunc _psci_cpu_pwrdn_wfi
594dd4268a2SPankaj Gupta	/* save the wakeup address */
595dd4268a2SPankaj Gupta	mov  x29, x1
596dd4268a2SPankaj Gupta
597dd4268a2SPankaj Gupta	/* x0 = core mask */
598dd4268a2SPankaj Gupta
599dd4268a2SPankaj Gupta	/* shutdown the core */
600dd4268a2SPankaj Gupta	bl   _soc_core_entr_pwrdn
601dd4268a2SPankaj Gupta
602dd4268a2SPankaj Gupta	/* branch to resume execution */
603dd4268a2SPankaj Gupta	br   x29
604dd4268a2SPankaj Guptaendfunc _psci_cpu_pwrdn_wfi
605dd4268a2SPankaj Gupta
606dd4268a2SPankaj Gupta/*
607dd4268a2SPankaj Gupta * void _psci_core_exit_pwrdn_(u_register_t core_mask)
608dd4268a2SPankaj Gupta * this function cleans up after a core power-down
609dd4268a2SPankaj Gupta * x0 = core mask
610dd4268a2SPankaj Gupta *
611dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
612dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
613dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
614dd4268a2SPankaj Gupta */
615dd4268a2SPankaj Gupta
616dd4268a2SPankaj Guptafunc _psci_core_exit_pwrdn
617dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
618dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
619dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
620dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
621dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
622dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
623dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
624dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
625dd4268a2SPankaj Gupta
626dd4268a2SPankaj Gupta	mov  x5, x0			/* x5 = core mask */
627dd4268a2SPankaj Gupta
628dd4268a2SPankaj Gupta	/* restore scr_el3 */
629dd4268a2SPankaj Gupta	mov  x0, x5
630dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
631dd4268a2SPankaj Gupta	bl   _getCoreData
632dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
633dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
634dd4268a2SPankaj Gupta
635dd4268a2SPankaj Gupta	/* x5 = core mask */
636dd4268a2SPankaj Gupta
637dd4268a2SPankaj Gupta	/* restore cpuectlr */
638dd4268a2SPankaj Gupta	mov  x0, x5
639dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
640dd4268a2SPankaj Gupta	bl   _getCoreData
641dd4268a2SPankaj Gupta	/* make sure smp is set */
642dd4268a2SPankaj Gupta	orr  x0, x0, #CPUECTLR_SMPEN_MASK
643*35efe7a4SJiafei Pan	msr  CPUECTLR_EL1, x0
644dd4268a2SPankaj Gupta
645dd4268a2SPankaj Gupta	/* x5 = core mask */
646dd4268a2SPankaj Gupta
647dd4268a2SPankaj Gupta	/* SoC-specific cleanup */
648dd4268a2SPankaj Gupta	mov  x0, x5
649dd4268a2SPankaj Gupta	bl   _soc_core_exit_pwrdn
650dd4268a2SPankaj Gupta
651dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
652dd4268a2SPankaj Gupta	 */
653dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
654dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
655dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
656dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
657dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
658dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
659dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
660dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
661dd4268a2SPankaj Gupta	b    psci_completed
662dd4268a2SPankaj Guptaendfunc _psci_core_exit_pwrdn
663dd4268a2SPankaj Gupta
664dd4268a2SPankaj Gupta#endif
665dd4268a2SPankaj Gupta
666dd4268a2SPankaj Gupta#if (SOC_CLUSTER_STANDBY)
667dd4268a2SPankaj Gupta
668dd4268a2SPankaj Gupta.global _psci_clstr_prep_stdby
669dd4268a2SPankaj Gupta.global _psci_clstr_exit_stdby
670dd4268a2SPankaj Gupta
671dd4268a2SPankaj Gupta/*
672dd4268a2SPankaj Gupta * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
673dd4268a2SPankaj Gupta * sets up the clstr to enter standby state thru the normal path
674dd4268a2SPankaj Gupta */
675dd4268a2SPankaj Gupta
676dd4268a2SPankaj Guptafunc _psci_clstr_prep_stdby
677dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
678dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
679dd4268a2SPankaj Gupta
680dd4268a2SPankaj Gupta	mov  x5, x0
681dd4268a2SPankaj Gupta
682dd4268a2SPankaj Gupta	/* x5 = core mask */
683dd4268a2SPankaj Gupta
684dd4268a2SPankaj Gupta	/* save scr_el3 */
685dd4268a2SPankaj Gupta	mov  x0, x5
686dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
687dd4268a2SPankaj Gupta	mov  x2, x4
688dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
689dd4268a2SPankaj Gupta	bl    _setCoreData
690dd4268a2SPankaj Gupta
691dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
692dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
693dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
694dd4268a2SPankaj Gupta
695dd4268a2SPankaj Gupta	/* x5 = core mask */
696dd4268a2SPankaj Gupta
697dd4268a2SPankaj Gupta	/* call for any SoC-specific programming */
698dd4268a2SPankaj Gupta	mov  x0, x5
699dd4268a2SPankaj Gupta	bl   _soc_clstr_prep_stdby
700dd4268a2SPankaj Gupta
701dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
702dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
703dd4268a2SPankaj Gupta	isb
704dd4268a2SPankaj Gupta	ret
705dd4268a2SPankaj Guptaendfunc _psci_clstr_prep_stdby
706dd4268a2SPankaj Gupta
707dd4268a2SPankaj Gupta/*
708dd4268a2SPankaj Gupta * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
709dd4268a2SPankaj Gupta * exits the clstr from standby state thru the normal path
710dd4268a2SPankaj Gupta */
711dd4268a2SPankaj Gupta
712dd4268a2SPankaj Guptafunc _psci_clstr_exit_stdby
713dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
714dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
715dd4268a2SPankaj Gupta
716dd4268a2SPankaj Gupta	mov  x5, x0			/* x5 = core mask */
717dd4268a2SPankaj Gupta
718dd4268a2SPankaj Gupta	/* restore scr_el3 */
719dd4268a2SPankaj Gupta	mov  x0, x5
720dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
721dd4268a2SPankaj Gupta	bl   _getCoreData
722dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
723dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
724dd4268a2SPankaj Gupta
725dd4268a2SPankaj Gupta	/* x5 = core mask */
726dd4268a2SPankaj Gupta
727dd4268a2SPankaj Gupta	/* perform any SoC-specific programming after standby state */
728dd4268a2SPankaj Gupta	mov  x0, x5
729dd4268a2SPankaj Gupta	bl   _soc_clstr_exit_stdby
730dd4268a2SPankaj Gupta
731dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
732dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
733dd4268a2SPankaj Gupta	isb
734dd4268a2SPankaj Gupta	ret
735dd4268a2SPankaj Guptaendfunc _psci_clstr_exit_stdby
736dd4268a2SPankaj Gupta
737dd4268a2SPankaj Gupta#endif
738dd4268a2SPankaj Gupta
739dd4268a2SPankaj Gupta#if (SOC_CLUSTER_PWR_DWN)
740dd4268a2SPankaj Gupta
741dd4268a2SPankaj Gupta.global _psci_clstr_prep_pwrdn
742dd4268a2SPankaj Gupta.global _psci_clstr_exit_pwrdn
743dd4268a2SPankaj Gupta
744dd4268a2SPankaj Gupta/*
745dd4268a2SPankaj Gupta * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
746dd4268a2SPankaj Gupta * this function prepares the cluster+core for power-down
747dd4268a2SPankaj Gupta * x0 = core mask
748dd4268a2SPankaj Gupta *
749dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
750dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
751dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
752dd4268a2SPankaj Gupta */
753dd4268a2SPankaj Gupta
754dd4268a2SPankaj Guptafunc _psci_clstr_prep_pwrdn
755dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
756dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
757dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
758dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
759dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
760dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
761dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
762dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
763dd4268a2SPankaj Gupta
764dd4268a2SPankaj Gupta	mov  x6, x0			/* x6 = core mask */
765dd4268a2SPankaj Gupta
766dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
767dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
768dd4268a2SPankaj Gupta
769dd4268a2SPankaj Gupta	/* save scr_el3 */
770dd4268a2SPankaj Gupta	mov  x0, x6
771dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
772dd4268a2SPankaj Gupta	mov  x2, x4
773dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
774dd4268a2SPankaj Gupta	bl    _setCoreData
775dd4268a2SPankaj Gupta
776dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
777dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
778dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
779dd4268a2SPankaj Gupta
780dd4268a2SPankaj Gupta	/* save cpuectlr */
781dd4268a2SPankaj Gupta	mov  x0, x6
782dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
783*35efe7a4SJiafei Pan	mrs  x2, CPUECTLR_EL1
784dd4268a2SPankaj Gupta	mov  x4, x2
785dd4268a2SPankaj Gupta	bl   _setCoreData
786dd4268a2SPankaj Gupta
787dd4268a2SPankaj Gupta	/* remove core from coherency */
788dd4268a2SPankaj Gupta	bic   x4, x4, #CPUECTLR_SMPEN_MASK
789*35efe7a4SJiafei Pan	msr   CPUECTLR_EL1, x4
790dd4268a2SPankaj Gupta
791dd4268a2SPankaj Gupta	/* x6 = core mask */
792dd4268a2SPankaj Gupta
793dd4268a2SPankaj Gupta	/* SoC-specific programming for power-down */
794dd4268a2SPankaj Gupta	mov  x0, x6
795dd4268a2SPankaj Gupta	bl  _soc_clstr_prep_pwrdn
796dd4268a2SPankaj Gupta
797dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
798dd4268a2SPankaj Gupta	 */
799dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
800dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
801dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
802dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
803dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
804dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
805dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
806dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
807dd4268a2SPankaj Gupta	b    psci_completed
808dd4268a2SPankaj Guptaendfunc _psci_clstr_prep_pwrdn
809dd4268a2SPankaj Gupta
810dd4268a2SPankaj Gupta/*
811dd4268a2SPankaj Gupta * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
812dd4268a2SPankaj Gupta * this function cleans up after a cluster power-down
813dd4268a2SPankaj Gupta * x0 = core mask
814dd4268a2SPankaj Gupta *
815dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
816dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
817dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
818dd4268a2SPankaj Gupta */
819dd4268a2SPankaj Gupta
820dd4268a2SPankaj Guptafunc _psci_clstr_exit_pwrdn
821dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
822dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
823dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
824dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
825dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
826dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
827dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
828dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
829dd4268a2SPankaj Gupta
830dd4268a2SPankaj Gupta	mov  x4, x0			/* x4 = core mask */
831dd4268a2SPankaj Gupta
832dd4268a2SPankaj Gupta	/* restore scr_el3 */
833dd4268a2SPankaj Gupta	mov  x0, x4
834dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
835dd4268a2SPankaj Gupta	bl   _getCoreData
836dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
837dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
838dd4268a2SPankaj Gupta
839dd4268a2SPankaj Gupta	/* x4 = core mask */
840dd4268a2SPankaj Gupta
841dd4268a2SPankaj Gupta	/* restore cpuectlr */
842dd4268a2SPankaj Gupta	mov  x0, x4
843dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
844dd4268a2SPankaj Gupta	bl   _getCoreData
845dd4268a2SPankaj Gupta	/* make sure smp is set */
846dd4268a2SPankaj Gupta	orr  x0, x0, #CPUECTLR_SMPEN_MASK
847*35efe7a4SJiafei Pan	msr  CPUECTLR_EL1, x0
848dd4268a2SPankaj Gupta
849dd4268a2SPankaj Gupta	/* x4 = core mask */
850dd4268a2SPankaj Gupta
851dd4268a2SPankaj Gupta	/* SoC-specific cleanup */
852dd4268a2SPankaj Gupta	mov  x0, x4
853dd4268a2SPankaj Gupta	bl   _soc_clstr_exit_pwrdn
854dd4268a2SPankaj Gupta
855dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
856dd4268a2SPankaj Gupta	 */
857dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
858dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
859dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
860dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
861dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
862dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
863dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
864dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
865dd4268a2SPankaj Gupta	b    psci_completed
866dd4268a2SPankaj Guptaendfunc _psci_clstr_exit_pwrdn
867dd4268a2SPankaj Gupta
868dd4268a2SPankaj Gupta#endif
869dd4268a2SPankaj Gupta
870dd4268a2SPankaj Gupta#if (SOC_SYSTEM_STANDBY)
871dd4268a2SPankaj Gupta
872dd4268a2SPankaj Gupta.global _psci_sys_prep_stdby
873dd4268a2SPankaj Gupta.global _psci_sys_exit_stdby
874dd4268a2SPankaj Gupta
875dd4268a2SPankaj Gupta/*
876dd4268a2SPankaj Gupta * void _psci_sys_prep_stdby(u_register_t core_mask) - this
877dd4268a2SPankaj Gupta * sets up the system to enter standby state thru the normal path
878dd4268a2SPankaj Gupta */
879dd4268a2SPankaj Gupta
880dd4268a2SPankaj Guptafunc _psci_sys_prep_stdby
881dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
882dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
883dd4268a2SPankaj Gupta
884dd4268a2SPankaj Gupta	mov  x5, x0			/* x5 = core mask */
885dd4268a2SPankaj Gupta
886dd4268a2SPankaj Gupta	/* save scr_el3 */
887dd4268a2SPankaj Gupta	mov  x0, x5
888dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
889dd4268a2SPankaj Gupta	mov  x2, x4
890dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
891dd4268a2SPankaj Gupta	bl    _setCoreData
892dd4268a2SPankaj Gupta
893dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
894dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
895dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
896dd4268a2SPankaj Gupta
897dd4268a2SPankaj Gupta	/* x5 = core mask */
898dd4268a2SPankaj Gupta
899dd4268a2SPankaj Gupta	/* call for any SoC-specific programming */
900dd4268a2SPankaj Gupta	mov  x0, x5
901dd4268a2SPankaj Gupta	bl   _soc_sys_prep_stdby
902dd4268a2SPankaj Gupta
903dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
904dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
905dd4268a2SPankaj Gupta	isb
906dd4268a2SPankaj Gupta	ret
907dd4268a2SPankaj Guptaendfunc _psci_sys_prep_stdby
908dd4268a2SPankaj Gupta
909dd4268a2SPankaj Gupta/*
910dd4268a2SPankaj Gupta * void _psci_sys_exit_stdby(u_register_t core_mask) - this
911dd4268a2SPankaj Gupta * exits the system from standby state thru the normal path
912dd4268a2SPankaj Gupta */
913dd4268a2SPankaj Gupta
914dd4268a2SPankaj Guptafunc _psci_sys_exit_stdby
915dd4268a2SPankaj Gupta	stp  x4,  x5, [sp, #-16]!
916dd4268a2SPankaj Gupta	stp  x6, x30, [sp, #-16]!
917dd4268a2SPankaj Gupta
918dd4268a2SPankaj Gupta	mov  x5, x0
919dd4268a2SPankaj Gupta
920dd4268a2SPankaj Gupta	/* x5 = core mask */
921dd4268a2SPankaj Gupta
922dd4268a2SPankaj Gupta	/* restore scr_el3 */
923dd4268a2SPankaj Gupta	mov  x0, x5
924dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
925dd4268a2SPankaj Gupta	bl   _getCoreData
926dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
927dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
928dd4268a2SPankaj Gupta
929dd4268a2SPankaj Gupta	/* x5 = core mask */
930dd4268a2SPankaj Gupta
931dd4268a2SPankaj Gupta	/* perform any SoC-specific programming after standby state */
932dd4268a2SPankaj Gupta	mov  x0, x5
933dd4268a2SPankaj Gupta	bl   _soc_sys_exit_stdby
934dd4268a2SPankaj Gupta
935dd4268a2SPankaj Gupta	ldp  x6,  x30, [sp], #16
936dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
937dd4268a2SPankaj Gupta	isb
938dd4268a2SPankaj Gupta	ret
939dd4268a2SPankaj Guptaendfunc _psci_sys_exit_stdby
940dd4268a2SPankaj Gupta
941dd4268a2SPankaj Gupta#endif
942dd4268a2SPankaj Gupta
943dd4268a2SPankaj Gupta#if (SOC_SYSTEM_PWR_DWN)
944dd4268a2SPankaj Gupta
945dd4268a2SPankaj Gupta.global _psci_sys_prep_pwrdn
946dd4268a2SPankaj Gupta.global _psci_sys_pwrdn_wfi
947dd4268a2SPankaj Gupta.global _psci_sys_exit_pwrdn
948dd4268a2SPankaj Gupta
949dd4268a2SPankaj Gupta/*
950dd4268a2SPankaj Gupta * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
951dd4268a2SPankaj Gupta * this function prepares the system+core for power-down
952dd4268a2SPankaj Gupta * x0 = core mask
953dd4268a2SPankaj Gupta *
954dd4268a2SPankaj Gupta * called from C, so save the non-volatile regs
955dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
956dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
957dd4268a2SPankaj Gupta */
958dd4268a2SPankaj Gupta
959dd4268a2SPankaj Guptafunc _psci_sys_prep_pwrdn
960dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
961dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
962dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
963dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
964dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
965dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
966dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
967dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
968dd4268a2SPankaj Gupta
969dd4268a2SPankaj Gupta	mov  x6, x0			/* x6 = core mask */
970dd4268a2SPankaj Gupta
971dd4268a2SPankaj Gupta	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
972dd4268a2SPankaj Gupta	msr DAIFSet, #0xF
973dd4268a2SPankaj Gupta
974dd4268a2SPankaj Gupta	/* save scr_el3 */
975dd4268a2SPankaj Gupta	mov  x0, x6
976dd4268a2SPankaj Gupta	mrs  x4, SCR_EL3
977dd4268a2SPankaj Gupta	mov  x2, x4
978dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
979dd4268a2SPankaj Gupta	bl    _setCoreData
980dd4268a2SPankaj Gupta
981dd4268a2SPankaj Gupta	/* allow interrupts @ EL3 */
982dd4268a2SPankaj Gupta	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
983dd4268a2SPankaj Gupta	msr  SCR_EL3, x4
984dd4268a2SPankaj Gupta
985dd4268a2SPankaj Gupta	/* save cpuectlr */
986dd4268a2SPankaj Gupta	mov  x0, x6
987dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
988*35efe7a4SJiafei Pan	mrs  x2, CPUECTLR_EL1
989dd4268a2SPankaj Gupta	mov  x4, x2
990dd4268a2SPankaj Gupta	bl   _setCoreData
991dd4268a2SPankaj Gupta
992dd4268a2SPankaj Gupta	/* remove core from coherency */
993dd4268a2SPankaj Gupta	bic   x4, x4, #CPUECTLR_SMPEN_MASK
994*35efe7a4SJiafei Pan	msr   CPUECTLR_EL1, x4
995dd4268a2SPankaj Gupta
996dd4268a2SPankaj Gupta	/* x6 = core mask */
997dd4268a2SPankaj Gupta
998dd4268a2SPankaj Gupta	/* SoC-specific programming for power-down */
999dd4268a2SPankaj Gupta	mov  x0, x6
1000dd4268a2SPankaj Gupta	bl  _soc_sys_prep_pwrdn
1001dd4268a2SPankaj Gupta
1002dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
1003dd4268a2SPankaj Gupta	 */
1004dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
1005dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
1006dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
1007dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
1008dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
1009dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
1010dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
1011dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
1012dd4268a2SPankaj Gupta	b    psci_completed
1013dd4268a2SPankaj Guptaendfunc _psci_sys_prep_pwrdn
1014dd4268a2SPankaj Gupta
1015dd4268a2SPankaj Gupta
1016dd4268a2SPankaj Gupta/*
1017dd4268a2SPankaj Gupta * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
1018dd4268a2SPankaj Gupta * this function powers down the system
1019dd4268a2SPankaj Gupta */
1020dd4268a2SPankaj Gupta
1021dd4268a2SPankaj Guptafunc _psci_sys_pwrdn_wfi
1022dd4268a2SPankaj Gupta	/* save the wakeup address */
1023dd4268a2SPankaj Gupta	mov  x29, x1
1024dd4268a2SPankaj Gupta
1025dd4268a2SPankaj Gupta	/* x0 = core mask */
1026dd4268a2SPankaj Gupta
1027dd4268a2SPankaj Gupta	/* shutdown the system */
1028dd4268a2SPankaj Gupta	bl   _soc_sys_pwrdn_wfi
1029dd4268a2SPankaj Gupta
1030dd4268a2SPankaj Gupta	/* branch to resume execution */
1031dd4268a2SPankaj Gupta	br   x29
1032dd4268a2SPankaj Guptaendfunc _psci_sys_pwrdn_wfi
1033dd4268a2SPankaj Gupta
1034dd4268a2SPankaj Gupta/*
1035dd4268a2SPankaj Gupta * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
1036dd4268a2SPankaj Gupta * this function cleans up after a system power-down
1037dd4268a2SPankaj Gupta * x0 = core mask
1038dd4268a2SPankaj Gupta *
1039dd4268a2SPankaj Gupta * Called from C, so save the non-volatile regs
1040dd4268a2SPankaj Gupta * save these as pairs of registers to maintain the
1041dd4268a2SPankaj Gupta * required 16-byte alignment on the stack
1042dd4268a2SPankaj Gupta */
1043dd4268a2SPankaj Gupta
1044dd4268a2SPankaj Guptafunc _psci_sys_exit_pwrdn
1045dd4268a2SPankaj Gupta
1046dd4268a2SPankaj Gupta	stp  x4,  x5,  [sp, #-16]!
1047dd4268a2SPankaj Gupta	stp  x6,  x7,  [sp, #-16]!
1048dd4268a2SPankaj Gupta	stp  x8,  x9,  [sp, #-16]!
1049dd4268a2SPankaj Gupta	stp  x10, x11, [sp, #-16]!
1050dd4268a2SPankaj Gupta	stp  x12, x13, [sp, #-16]!
1051dd4268a2SPankaj Gupta	stp  x14, x15, [sp, #-16]!
1052dd4268a2SPankaj Gupta	stp  x16, x17, [sp, #-16]!
1053dd4268a2SPankaj Gupta	stp  x18, x30, [sp, #-16]!
1054dd4268a2SPankaj Gupta
1055dd4268a2SPankaj Gupta	mov  x4, x0			/* x4 = core mask */
1056dd4268a2SPankaj Gupta
1057dd4268a2SPankaj Gupta	/* restore scr_el3 */
1058dd4268a2SPankaj Gupta	mov  x0, x4
1059dd4268a2SPankaj Gupta	mov  x1, #SCR_EL3_DATA
1060dd4268a2SPankaj Gupta	bl   _getCoreData
1061dd4268a2SPankaj Gupta
1062dd4268a2SPankaj Gupta	/* x0 = saved scr_el3 */
1063dd4268a2SPankaj Gupta	msr  SCR_EL3, x0
1064dd4268a2SPankaj Gupta
1065dd4268a2SPankaj Gupta	/* x4 = core mask */
1066dd4268a2SPankaj Gupta
1067dd4268a2SPankaj Gupta	/* restore cpuectlr */
1068dd4268a2SPankaj Gupta	mov  x0, x4
1069dd4268a2SPankaj Gupta	mov  x1, #CPUECTLR_DATA
1070dd4268a2SPankaj Gupta	bl   _getCoreData
1071dd4268a2SPankaj Gupta
1072dd4268a2SPankaj Gupta	/* make sure smp is set */
1073dd4268a2SPankaj Gupta	orr  x0, x0, #CPUECTLR_SMPEN_MASK
1074*35efe7a4SJiafei Pan	msr  CPUECTLR_EL1, x0
1075dd4268a2SPankaj Gupta
1076dd4268a2SPankaj Gupta	/* x4 = core mask */
1077dd4268a2SPankaj Gupta
1078dd4268a2SPankaj Gupta	/* SoC-specific cleanup */
1079dd4268a2SPankaj Gupta	mov  x0, x4
1080dd4268a2SPankaj Gupta	bl   _soc_sys_exit_pwrdn
1081dd4268a2SPankaj Gupta
1082dd4268a2SPankaj Gupta	/* restore the aarch32/64 non-volatile registers
1083dd4268a2SPankaj Gupta	 */
1084dd4268a2SPankaj Gupta	ldp  x18, x30, [sp], #16
1085dd4268a2SPankaj Gupta	ldp  x16, x17, [sp], #16
1086dd4268a2SPankaj Gupta	ldp  x14, x15, [sp], #16
1087dd4268a2SPankaj Gupta	ldp  x12, x13, [sp], #16
1088dd4268a2SPankaj Gupta	ldp  x10, x11, [sp], #16
1089dd4268a2SPankaj Gupta	ldp  x8,  x9,  [sp], #16
1090dd4268a2SPankaj Gupta	ldp  x6,  x7,  [sp], #16
1091dd4268a2SPankaj Gupta	ldp  x4,  x5,  [sp], #16
1092dd4268a2SPankaj Gupta	b    psci_completed
1093dd4268a2SPankaj Guptaendfunc _psci_sys_exit_pwrdn
1094dd4268a2SPankaj Gupta
1095dd4268a2SPankaj Gupta#endif
1096dd4268a2SPankaj Gupta
1097dd4268a2SPankaj Gupta
1098dd4268a2SPankaj Gupta/* psci std returns */
1099dd4268a2SPankaj Guptafunc psci_disabled
1100dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_DISABLED
1101dd4268a2SPankaj Gupta	b    psci_completed
1102dd4268a2SPankaj Guptaendfunc psci_disabled
1103dd4268a2SPankaj Gupta
1104dd4268a2SPankaj Gupta
1105dd4268a2SPankaj Guptafunc psci_not_present
1106dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_NOT_PRESENT
1107dd4268a2SPankaj Gupta	b    psci_completed
1108dd4268a2SPankaj Guptaendfunc psci_not_present
1109dd4268a2SPankaj Gupta
1110dd4268a2SPankaj Gupta
1111dd4268a2SPankaj Guptafunc psci_on_pending
1112dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_ON_PENDING
1113dd4268a2SPankaj Gupta	b    psci_completed
1114dd4268a2SPankaj Guptaendfunc psci_on_pending
1115dd4268a2SPankaj Gupta
1116dd4268a2SPankaj Gupta
1117dd4268a2SPankaj Guptafunc psci_already_on
1118dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_ALREADY_ON
1119dd4268a2SPankaj Gupta	b    psci_completed
1120dd4268a2SPankaj Guptaendfunc psci_already_on
1121dd4268a2SPankaj Gupta
1122dd4268a2SPankaj Gupta
1123dd4268a2SPankaj Guptafunc psci_failure
1124dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_INTERN_FAIL
1125dd4268a2SPankaj Gupta	b    psci_completed
1126dd4268a2SPankaj Guptaendfunc psci_failure
1127dd4268a2SPankaj Gupta
1128dd4268a2SPankaj Gupta
1129dd4268a2SPankaj Guptafunc psci_unimplemented
1130dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_NOT_SUPPORTED
1131dd4268a2SPankaj Gupta	b    psci_completed
1132dd4268a2SPankaj Guptaendfunc psci_unimplemented
1133dd4268a2SPankaj Gupta
1134dd4268a2SPankaj Gupta
1135dd4268a2SPankaj Guptafunc psci_denied
1136dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_DENIED
1137dd4268a2SPankaj Gupta	b    psci_completed
1138dd4268a2SPankaj Guptaendfunc psci_denied
1139dd4268a2SPankaj Gupta
1140dd4268a2SPankaj Gupta
1141dd4268a2SPankaj Guptafunc psci_invalid
1142dd4268a2SPankaj Gupta	ldr  w0, =PSCI_E_INVALID_PARAMS
1143dd4268a2SPankaj Gupta	b    psci_completed
1144dd4268a2SPankaj Guptaendfunc psci_invalid
1145dd4268a2SPankaj Gupta
1146dd4268a2SPankaj Gupta
1147dd4268a2SPankaj Guptafunc psci_success
1148dd4268a2SPankaj Gupta	mov  x0, #PSCI_E_SUCCESS
1149dd4268a2SPankaj Guptaendfunc psci_success
1150dd4268a2SPankaj Gupta
1151dd4268a2SPankaj Gupta
1152dd4268a2SPankaj Guptafunc psci_completed
1153dd4268a2SPankaj Gupta	/* x0 = status code */
1154dd4268a2SPankaj Gupta	ret
1155dd4268a2SPankaj Guptaendfunc psci_completed
1156