xref: /rk3399_ARM-atf/plat/nxp/common/psci/aarch64/psci_utils.S (revision 772a328130c522182ecbfe8c827740988a472993)
1
2/*
3 * Copyright 2018-2021 NXP
4 *
5 * SPDX-License-Identifier: BSD-3-Clause
6 *
7 */
8
9#include <asm_macros.S>
10#include <assert_macros.S>
11
12#include <lib/psci/psci.h>
13
14#include <cortex_a53.h>
15#include <cortex_a72.h>
16#include <bl31_data.h>
17#include <plat_psci.h>
18
19
20#define RESET_RETRY_CNT   800
21#define PSCI_ABORT_CNT	100
22
23#if (SOC_CORE_RELEASE)
24
25.global _psci_cpu_on
26
27/*
28 * int _psci_cpu_on(u_register_t core_mask)
29 * x0   = target cpu core mask
30 *
31 * Called from C, so save the non-volatile regs
32 * save these as pairs of registers to maintain the
33 * required 16-byte alignment on the stack
34 *
35 */
36
37func _psci_cpu_on
38	stp  x4,  x5,  [sp, #-16]!
39	stp  x6,  x7,  [sp, #-16]!
40	stp  x8,  x9,  [sp, #-16]!
41	stp  x10, x11, [sp, #-16]!
42	stp  x12, x13, [sp, #-16]!
43	stp  x14, x15, [sp, #-16]!
44	stp  x16, x17, [sp, #-16]!
45	stp  x18, x30, [sp, #-16]!
46
47	mov  x6, x0
48
49	/* x0   = core mask (lsb)
50	 * x6   = core mask (lsb)
51	 */
52
53	/* check if core disabled */
54	bl   _soc_ck_disabled		/* 0-2 */
55	cbnz w0, psci_disabled
56
57	/* check core data area to see if core cannot be turned on
58	 * read the core state
59	 */
60	mov  x0, x6
61	bl   _getCoreState		/* 0-5 */
62	mov  x9, x0
63
64	/* x6   = core mask (lsb)
65	 * x9   = core state (from data area)
66	 */
67
68	cmp  x9, #CORE_DISABLED
69	mov  x0, #PSCI_E_DISABLED
70	b.eq cpu_on_done
71
72	cmp  x9, #CORE_PENDING
73	mov  x0, #PSCI_E_ON_PENDING
74	b.eq cpu_on_done
75
76	cmp  x9, #CORE_RELEASED
77	mov  x0, #PSCI_E_ALREADY_ON
78	b.eq cpu_on_done
79
808:
81	/* x6   = core mask (lsb)
82	 * x9   = core state (from data area)
83	 */
84
85	cmp  x9, #CORE_WFE
86	b.eq core_in_wfe
87	cmp  x9, #CORE_IN_RESET
88	b.eq core_in_reset
89	cmp  x9, #CORE_OFF
90	b.eq core_is_off
91	cmp  x9, #CORE_OFF_PENDING
92
93	/* if state == CORE_OFF_PENDING, set abort */
94	mov  x0, x6
95	mov  x1, #ABORT_FLAG_DATA
96	mov  x2, #CORE_ABORT_OP
97	bl   _setCoreData		/* 0-3, [13-15] */
98
99	ldr  x3, =PSCI_ABORT_CNT
1007:
101	/* watch for abort to take effect */
102	mov  x0, x6
103	bl   _getCoreState		/* 0-5 */
104	cmp  x0, #CORE_OFF
105	b.eq core_is_off
106	cmp  x0, #CORE_PENDING
107	mov  x0, #PSCI_E_SUCCESS
108	b.eq cpu_on_done
109
110	/* loop til finished */
111	sub  x3, x3, #1
112	cbnz x3, 7b
113
114	/* if we didn't see either CORE_OFF or CORE_PENDING, then this
115	 * core is in CORE_OFF_PENDING - exit with success, as the core will
116	 * respond to the abort request
117	 */
118	mov  x0, #PSCI_E_SUCCESS
119	b    cpu_on_done
120
121/* this is where we start up a core out of reset */
122core_in_reset:
123	/* see if the soc-specific module supports this op */
124	ldr  x7, =SOC_CORE_RELEASE
125	cbnz  x7, 3f
126
127	mov  x0, #PSCI_E_NOT_SUPPORTED
128	b    cpu_on_done
129
130	/* x6   = core mask (lsb) */
1313:
132	/* set core state in data area */
133	mov  x0, x6
134	mov  x1, #CORE_PENDING
135	bl   _setCoreState   			/* 0-3, [13-15] */
136
137	/* release the core from reset */
138	mov   x0, x6
139	bl    _soc_core_release 		/* 0-3 */
140	mov   x0, #PSCI_E_SUCCESS
141	b     cpu_on_done
142
143	/* Start up the core that has been powered-down via CPU_OFF
144	 */
145core_is_off:
146	/* see if the soc-specific module supports this op
147	 */
148	ldr  x7, =SOC_CORE_RESTART
149	cbnz x7, 2f
150
151	mov  x0, #PSCI_E_NOT_SUPPORTED
152	b    cpu_on_done
153
154	/* x6   = core mask (lsb) */
1552:
156	/* set core state in data area */
157	mov  x0, x6
158	mov  x1, #CORE_WAKEUP
159	bl   _setCoreState			/* 0-3, [13-15] */
160
161	/* put the core back into service */
162	mov  x0, x6
163#if (SOC_CORE_RESTART)
164	bl   _soc_core_restart			/* 0-5 */
165#endif
166	mov  x0, #PSCI_E_SUCCESS
167	b    cpu_on_done
168
169/* this is where we release a core that is being held in wfe */
170core_in_wfe:
171	/* x6   = core mask (lsb) */
172
173	/* set core state in data area */
174	mov  x0, x6
175	mov  x1, #CORE_PENDING
176	bl   _setCoreState			/* 0-3, [13-15] */
177	dsb  sy
178	isb
179
180	/* put the core back into service */
181	sev
182	sev
183	isb
184	mov  x0, #PSCI_E_SUCCESS
185
186cpu_on_done:
187	/* restore the aarch32/64 non-volatile registers */
188	ldp  x18, x30, [sp], #16
189	ldp  x16, x17, [sp], #16
190	ldp  x14, x15, [sp], #16
191	ldp  x12, x13, [sp], #16
192	ldp  x10, x11, [sp], #16
193	ldp  x8,  x9,  [sp], #16
194	ldp  x6,  x7,  [sp], #16
195	ldp  x4,  x5,  [sp], #16
196	b    psci_completed
197endfunc _psci_cpu_on
198
199#endif
200
201
202#if (SOC_CORE_OFF)
203
204.global _psci_cpu_prep_off
205.global _psci_cpu_off_wfi
206
207/*
208 * void _psci_cpu_prep_off(u_register_t core_mask)
209 * this function performs the SoC-specific programming prior
210 * to shutting the core down
211 * x0 = core_mask
212 *
213 * called from C, so save the non-volatile regs
214 * save these as pairs of registers to maintain the
215 * required 16-byte alignment on the stack
216 */
217
218func _psci_cpu_prep_off
219
220	stp  x4,  x5,  [sp, #-16]!
221	stp  x6,  x7,  [sp, #-16]!
222	stp  x8,  x9,  [sp, #-16]!
223	stp  x10, x11, [sp, #-16]!
224	stp  x12, x13, [sp, #-16]!
225	stp  x14, x15, [sp, #-16]!
226	stp  x16, x17, [sp, #-16]!
227	stp  x18, x30, [sp, #-16]!
228
229	mov  x10, x0			/* x10 = core_mask */
230
231	/* the core does not return from cpu_off, so no need
232	 * to save/restore non-volatile registers
233	 */
234
235	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
236	msr DAIFSet, #0xF
237
238	/* read cpuectlr and save current value */
239	mrs   x4, CPUECTLR_EL1
240	mov   x1, #CPUECTLR_DATA
241	mov   x2, x4
242	mov   x0, x10
243	bl    _setCoreData
244
245	/* remove the core from coherency */
246	bic   x4, x4, #CPUECTLR_SMPEN_MASK
247	msr   CPUECTLR_EL1, x4
248
249	/* save scr_el3 */
250	mov  x0, x10
251	mrs  x4, SCR_EL3
252	mov  x2, x4
253	mov  x1, #SCR_EL3_DATA
254	bl    _setCoreData
255
256	/* x4 = scr_el3 */
257
258	/* secure SGI (FIQ) taken to EL3, set SCR_EL3[FIQ] */
259	orr   x4, x4, #SCR_FIQ_MASK
260	msr   scr_el3, x4
261
262	/* x10 = core_mask */
263
264	/* prep the core for shutdown */
265	mov  x0, x10
266	bl   _soc_core_prep_off
267
268	/* restore the aarch32/64 non-volatile registers */
269	ldp  x18, x30, [sp], #16
270	ldp  x16, x17, [sp], #16
271	ldp  x14, x15, [sp], #16
272	ldp  x12, x13, [sp], #16
273	ldp  x10, x11, [sp], #16
274	ldp  x8,  x9,  [sp], #16
275	ldp  x6,  x7,  [sp], #16
276	ldp  x4,  x5,  [sp], #16
277	b    psci_completed
278endfunc _psci_cpu_prep_off
279
280/*
281 * void _psci_cpu_off_wfi(u_register_t core_mask, u_register_t resume_addr)
282 *   - this function shuts down the core
283 *   - this function does not return!!
284 */
285
286func _psci_cpu_off_wfi
287	/* save the wakeup address */
288	mov  x29, x1
289
290	/* x0 = core_mask */
291
292	/* shutdown the core */
293	bl   _soc_core_entr_off
294
295	/* branch to resume execution */
296	br   x29
297endfunc _psci_cpu_off_wfi
298
299#endif
300
301
302#if (SOC_CORE_RESTART)
303
304.global _psci_wakeup
305
306/*
307 * void _psci_wakeup(u_register_t core_mask)
308 * this function performs the SoC-specific programming
309 * after a core wakes up from OFF
310 * x0 = core mask
311 *
312 * called from C, so save the non-volatile regs
313 * save these as pairs of registers to maintain the
314 * required 16-byte alignment on the stack
315 */
316
317func _psci_wakeup
318
319	stp  x4,  x5,  [sp, #-16]!
320	stp  x6,  x7,  [sp, #-16]!
321	stp  x8,  x9,  [sp, #-16]!
322	stp  x10, x11, [sp, #-16]!
323	stp  x12, x13, [sp, #-16]!
324	stp  x14, x15, [sp, #-16]!
325	stp  x16, x17, [sp, #-16]!
326	stp  x18, x30, [sp, #-16]!
327
328	mov  x4, x0			/* x4 = core mask */
329
330	/* restore scr_el3 */
331	mov  x0, x4
332	mov  x1, #SCR_EL3_DATA
333	bl   _getCoreData
334	/* x0 = saved scr_el3 */
335	msr  SCR_EL3, x0
336
337	/* x4 = core mask */
338
339	/* restore CPUECTLR */
340	mov   x0, x4
341	mov   x1, #CPUECTLR_DATA
342	bl    _getCoreData
343	orr   x0, x0, #CPUECTLR_SMPEN_MASK
344	msr   CPUECTLR_EL1, x0
345
346	/* x4 = core mask */
347
348	/* start the core back up */
349	mov   x0, x4
350	bl   _soc_core_exit_off
351
352	/* restore the aarch32/64 non-volatile registers
353	 */
354	ldp  x18, x30, [sp], #16
355	ldp  x16, x17, [sp], #16
356	ldp  x14, x15, [sp], #16
357	ldp  x12, x13, [sp], #16
358	ldp  x10, x11, [sp], #16
359	ldp  x8,  x9,  [sp], #16
360	ldp  x6,  x7,  [sp], #16
361	ldp  x4,  x5,  [sp], #16
362	b    psci_completed
363endfunc _psci_wakeup
364
365#endif
366
367
368#if (SOC_SYSTEM_RESET)
369
370.global _psci_system_reset
371
372func _psci_system_reset
373
374	/* system reset is mandatory
375	 * system reset is soc-specific
376	 * Note: under no circumstances do we return from this call
377	 */
378	bl   _soc_sys_reset
379endfunc _psci_system_reset
380
381#endif
382
383
384#if (SOC_SYSTEM_OFF)
385
386.global _psci_system_off
387
388func _psci_system_off
389
390	/* system off is mandatory
391	 * system off is soc-specific
392	 * Note: under no circumstances do we return from this call */
393	b    _soc_sys_off
394endfunc _psci_system_off
395
396#endif
397
398
399#if (SOC_CORE_STANDBY)
400
401.global _psci_core_entr_stdby
402.global _psci_core_prep_stdby
403.global _psci_core_exit_stdby
404
405/*
406 * void _psci_core_entr_stdby(u_register_t core_mask) - this
407 * is the fast-path for simple core standby
408 */
409
410func _psci_core_entr_stdby
411	stp  x4,  x5, [sp, #-16]!
412	stp  x6, x30, [sp, #-16]!
413
414	mov  x5, x0		/* x5 = core mask */
415
416	/* save scr_el3 */
417	mov  x0, x5
418	mrs  x4, SCR_EL3
419	mov  x2, x4
420	mov  x1, #SCR_EL3_DATA
421	bl    _setCoreData
422
423	/* x4 = SCR_EL3
424	 * x5 = core mask
425	 */
426
427	/* allow interrupts @ EL3 */
428	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
429	msr  SCR_EL3, x4
430
431	/* x5 = core mask */
432
433	/* put the core into standby */
434	mov  x0, x5
435	bl   _soc_core_entr_stdby
436
437	/* restore scr_el3 */
438	mov  x0, x5
439	mov  x1, #SCR_EL3_DATA
440	bl   _getCoreData
441	/* x0 = saved scr_el3 */
442	msr  SCR_EL3, x0
443
444	ldp  x6,  x30, [sp], #16
445	ldp  x4,  x5,  [sp], #16
446	isb
447	ret
448endfunc _psci_core_entr_stdby
449
450/*
451 * void _psci_core_prep_stdby(u_register_t core_mask) - this
452 * sets up the core to enter standby state thru the normal path
453 */
454
455func _psci_core_prep_stdby
456	stp  x4,  x5, [sp, #-16]!
457	stp  x6, x30, [sp, #-16]!
458
459	mov  x5, x0
460
461	/* x5 = core mask */
462
463	/* save scr_el3 */
464	mov  x0, x5
465	mrs  x4, SCR_EL3
466	mov  x2, x4
467	mov  x1, #SCR_EL3_DATA
468	bl    _setCoreData
469
470	/* allow interrupts @ EL3 */
471	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
472	msr  SCR_EL3, x4
473
474	/* x5 = core mask */
475
476	/* call for any SoC-specific programming */
477	mov  x0, x5
478	bl   _soc_core_prep_stdby
479
480	ldp  x6,  x30, [sp], #16
481	ldp  x4,  x5,  [sp], #16
482	isb
483	ret
484endfunc _psci_core_prep_stdby
485
486/*
487 * void _psci_core_exit_stdby(u_register_t core_mask) - this
488 * exits the core from standby state thru the normal path
489 */
490
491func _psci_core_exit_stdby
492	stp  x4,  x5, [sp, #-16]!
493	stp  x6, x30, [sp, #-16]!
494
495	mov  x5, x0
496
497	/* x5 = core mask */
498
499	/* restore scr_el3 */
500	mov  x0, x5
501	mov  x1, #SCR_EL3_DATA
502	bl   _getCoreData
503	/* x0 = saved scr_el3 */
504	msr  SCR_EL3, x0
505
506	/* x5 = core mask */
507
508	/* perform any SoC-specific programming after standby state */
509	mov  x0, x5
510	bl   _soc_core_exit_stdby
511
512	ldp  x6,  x30, [sp], #16
513	ldp  x4,  x5,  [sp], #16
514	isb
515	ret
516endfunc _psci_core_exit_stdby
517
518#endif
519
520
521#if (SOC_CORE_PWR_DWN)
522
523.global _psci_core_prep_pwrdn
524.global _psci_cpu_pwrdn_wfi
525.global _psci_core_exit_pwrdn
526
527/*
528 * void _psci_core_prep_pwrdn_(u_register_t core_mask)
529 * this function prepares the core for power-down
530 * x0 = core mask
531 *
532 * called from C, so save the non-volatile regs
533 * save these as pairs of registers to maintain the
534 * required 16-byte alignment on the stack
535 */
536
537func _psci_core_prep_pwrdn
538	stp  x4,  x5,  [sp, #-16]!
539	stp  x6,  x7,  [sp, #-16]!
540	stp  x8,  x9,  [sp, #-16]!
541	stp  x10, x11, [sp, #-16]!
542	stp  x12, x13, [sp, #-16]!
543	stp  x14, x15, [sp, #-16]!
544	stp  x16, x17, [sp, #-16]!
545	stp  x18, x30, [sp, #-16]!
546
547	mov  x6, x0
548
549	/* x6 = core mask */
550
551	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
552	msr DAIFSet, #0xF
553
554	/* save scr_el3 */
555	mov  x0, x6
556	mrs  x4, SCR_EL3
557	mov  x2, x4
558	mov  x1, #SCR_EL3_DATA
559	bl    _setCoreData
560
561	/* allow interrupts @ EL3 */
562	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
563	msr  SCR_EL3, x4
564
565	/* save cpuectlr */
566	mov  x0, x6
567	mov  x1, #CPUECTLR_DATA
568	mrs  x2, CPUECTLR_EL1
569	bl   _setCoreData
570
571	/* x6 = core mask */
572
573	/* SoC-specific programming for power-down */
574	mov  x0, x6
575	bl  _soc_core_prep_pwrdn
576
577	/* restore the aarch32/64 non-volatile registers
578	 */
579	ldp  x18, x30, [sp], #16
580	ldp  x16, x17, [sp], #16
581	ldp  x14, x15, [sp], #16
582	ldp  x12, x13, [sp], #16
583	ldp  x10, x11, [sp], #16
584	ldp  x8,  x9,  [sp], #16
585	ldp  x6,  x7,  [sp], #16
586	ldp  x4,  x5,  [sp], #16
587	b    psci_completed
588endfunc _psci_core_prep_pwrdn
589
590/*
591 * void _psci_cpu_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
592 * this function powers down the core
593 */
594
595func _psci_cpu_pwrdn_wfi
596	/* save the wakeup address */
597	mov  x29, x1
598
599	/* x0 = core mask */
600
601	/* shutdown the core */
602	bl   _soc_core_entr_pwrdn
603
604	/* branch to resume execution */
605	br   x29
606endfunc _psci_cpu_pwrdn_wfi
607
608/*
609 * void _psci_core_exit_pwrdn_(u_register_t core_mask)
610 * this function cleans up after a core power-down
611 * x0 = core mask
612 *
613 * called from C, so save the non-volatile regs
614 * save these as pairs of registers to maintain the
615 * required 16-byte alignment on the stack
616 */
617
618func _psci_core_exit_pwrdn
619	stp  x4,  x5,  [sp, #-16]!
620	stp  x6,  x7,  [sp, #-16]!
621	stp  x8,  x9,  [sp, #-16]!
622	stp  x10, x11, [sp, #-16]!
623	stp  x12, x13, [sp, #-16]!
624	stp  x14, x15, [sp, #-16]!
625	stp  x16, x17, [sp, #-16]!
626	stp  x18, x30, [sp, #-16]!
627
628	mov  x5, x0			/* x5 = core mask */
629
630	/* restore scr_el3 */
631	mov  x0, x5
632	mov  x1, #SCR_EL3_DATA
633	bl   _getCoreData
634	/* x0 = saved scr_el3 */
635	msr  SCR_EL3, x0
636
637	/* x5 = core mask */
638
639	/* restore cpuectlr */
640	mov  x0, x5
641	mov  x1, #CPUECTLR_DATA
642	bl   _getCoreData
643	/* make sure smp is set */
644	orr  x0, x0, #CPUECTLR_SMPEN_MASK
645	msr  CPUECTLR_EL1, x0
646
647	/* x5 = core mask */
648
649	/* SoC-specific cleanup */
650	mov  x0, x5
651	bl   _soc_core_exit_pwrdn
652
653	/* restore the aarch32/64 non-volatile registers
654	 */
655	ldp  x18, x30, [sp], #16
656	ldp  x16, x17, [sp], #16
657	ldp  x14, x15, [sp], #16
658	ldp  x12, x13, [sp], #16
659	ldp  x10, x11, [sp], #16
660	ldp  x8,  x9,  [sp], #16
661	ldp  x6,  x7,  [sp], #16
662	ldp  x4,  x5,  [sp], #16
663	b    psci_completed
664endfunc _psci_core_exit_pwrdn
665
666#endif
667
668#if (SOC_CLUSTER_STANDBY)
669
670.global _psci_clstr_prep_stdby
671.global _psci_clstr_exit_stdby
672
673/*
674 * void _psci_clstr_prep_stdby(u_register_t core_mask) - this
675 * sets up the clstr to enter standby state thru the normal path
676 */
677
678func _psci_clstr_prep_stdby
679	stp  x4,  x5, [sp, #-16]!
680	stp  x6, x30, [sp, #-16]!
681
682	mov  x5, x0
683
684	/* x5 = core mask */
685
686	/* save scr_el3 */
687	mov  x0, x5
688	mrs  x4, SCR_EL3
689	mov  x2, x4
690	mov  x1, #SCR_EL3_DATA
691	bl    _setCoreData
692
693	/* allow interrupts @ EL3 */
694	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
695	msr  SCR_EL3, x4
696
697	/* x5 = core mask */
698
699	/* call for any SoC-specific programming */
700	mov  x0, x5
701	bl   _soc_clstr_prep_stdby
702
703	ldp  x6,  x30, [sp], #16
704	ldp  x4,  x5,  [sp], #16
705	isb
706	ret
707endfunc _psci_clstr_prep_stdby
708
709/*
710 * void _psci_clstr_exit_stdby(u_register_t core_mask) - this
711 * exits the clstr from standby state thru the normal path
712 */
713
714func _psci_clstr_exit_stdby
715	stp  x4,  x5, [sp, #-16]!
716	stp  x6, x30, [sp, #-16]!
717
718	mov  x5, x0			/* x5 = core mask */
719
720	/* restore scr_el3 */
721	mov  x0, x5
722	mov  x1, #SCR_EL3_DATA
723	bl   _getCoreData
724	/* x0 = saved scr_el3 */
725	msr  SCR_EL3, x0
726
727	/* x5 = core mask */
728
729	/* perform any SoC-specific programming after standby state */
730	mov  x0, x5
731	bl   _soc_clstr_exit_stdby
732
733	ldp  x6,  x30, [sp], #16
734	ldp  x4,  x5,  [sp], #16
735	isb
736	ret
737endfunc _psci_clstr_exit_stdby
738
739#endif
740
741#if (SOC_CLUSTER_PWR_DWN)
742
743.global _psci_clstr_prep_pwrdn
744.global _psci_clstr_exit_pwrdn
745
746/*
747 * void _psci_clstr_prep_pwrdn_(u_register_t core_mask)
748 * this function prepares the cluster+core for power-down
749 * x0 = core mask
750 *
751 * called from C, so save the non-volatile regs
752 * save these as pairs of registers to maintain the
753 * required 16-byte alignment on the stack
754 */
755
756func _psci_clstr_prep_pwrdn
757	stp  x4,  x5,  [sp, #-16]!
758	stp  x6,  x7,  [sp, #-16]!
759	stp  x8,  x9,  [sp, #-16]!
760	stp  x10, x11, [sp, #-16]!
761	stp  x12, x13, [sp, #-16]!
762	stp  x14, x15, [sp, #-16]!
763	stp  x16, x17, [sp, #-16]!
764	stp  x18, x30, [sp, #-16]!
765
766	mov  x6, x0			/* x6 = core mask */
767
768	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
769	msr DAIFSet, #0xF
770
771	/* save scr_el3 */
772	mov  x0, x6
773	mrs  x4, SCR_EL3
774	mov  x2, x4
775	mov  x1, #SCR_EL3_DATA
776	bl    _setCoreData
777
778	/* allow interrupts @ EL3 */
779	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
780	msr  SCR_EL3, x4
781
782	/* save cpuectlr */
783	mov  x0, x6
784	mov  x1, #CPUECTLR_DATA
785	mrs  x2, CPUECTLR_EL1
786	mov  x4, x2
787	bl   _setCoreData
788
789	/* remove core from coherency */
790	bic   x4, x4, #CPUECTLR_SMPEN_MASK
791	msr   CPUECTLR_EL1, x4
792
793	/* x6 = core mask */
794
795	/* SoC-specific programming for power-down */
796	mov  x0, x6
797	bl  _soc_clstr_prep_pwrdn
798
799	/* restore the aarch32/64 non-volatile registers
800	 */
801	ldp  x18, x30, [sp], #16
802	ldp  x16, x17, [sp], #16
803	ldp  x14, x15, [sp], #16
804	ldp  x12, x13, [sp], #16
805	ldp  x10, x11, [sp], #16
806	ldp  x8,  x9,  [sp], #16
807	ldp  x6,  x7,  [sp], #16
808	ldp  x4,  x5,  [sp], #16
809	b    psci_completed
810endfunc _psci_clstr_prep_pwrdn
811
812/*
813 * void _psci_clstr_exit_pwrdn_(u_register_t core_mask)
814 * this function cleans up after a cluster power-down
815 * x0 = core mask
816 *
817 * called from C, so save the non-volatile regs
818 * save these as pairs of registers to maintain the
819 * required 16-byte alignment on the stack
820 */
821
822func _psci_clstr_exit_pwrdn
823	stp  x4,  x5,  [sp, #-16]!
824	stp  x6,  x7,  [sp, #-16]!
825	stp  x8,  x9,  [sp, #-16]!
826	stp  x10, x11, [sp, #-16]!
827	stp  x12, x13, [sp, #-16]!
828	stp  x14, x15, [sp, #-16]!
829	stp  x16, x17, [sp, #-16]!
830	stp  x18, x30, [sp, #-16]!
831
832	mov  x4, x0			/* x4 = core mask */
833
834	/* restore scr_el3 */
835	mov  x0, x4
836	mov  x1, #SCR_EL3_DATA
837	bl   _getCoreData
838	/* x0 = saved scr_el3 */
839	msr  SCR_EL3, x0
840
841	/* x4 = core mask */
842
843	/* restore cpuectlr */
844	mov  x0, x4
845	mov  x1, #CPUECTLR_DATA
846	bl   _getCoreData
847	/* make sure smp is set */
848	orr  x0, x0, #CPUECTLR_SMPEN_MASK
849	msr  CPUECTLR_EL1, x0
850
851	/* x4 = core mask */
852
853	/* SoC-specific cleanup */
854	mov  x0, x4
855	bl   _soc_clstr_exit_pwrdn
856
857	/* restore the aarch32/64 non-volatile registers
858	 */
859	ldp  x18, x30, [sp], #16
860	ldp  x16, x17, [sp], #16
861	ldp  x14, x15, [sp], #16
862	ldp  x12, x13, [sp], #16
863	ldp  x10, x11, [sp], #16
864	ldp  x8,  x9,  [sp], #16
865	ldp  x6,  x7,  [sp], #16
866	ldp  x4,  x5,  [sp], #16
867	b    psci_completed
868endfunc _psci_clstr_exit_pwrdn
869
870#endif
871
872#if (SOC_SYSTEM_STANDBY)
873
874.global _psci_sys_prep_stdby
875.global _psci_sys_exit_stdby
876
877/*
878 * void _psci_sys_prep_stdby(u_register_t core_mask) - this
879 * sets up the system to enter standby state thru the normal path
880 */
881
882func _psci_sys_prep_stdby
883	stp  x4,  x5, [sp, #-16]!
884	stp  x6, x30, [sp, #-16]!
885
886	mov  x5, x0			/* x5 = core mask */
887
888	/* save scr_el3 */
889	mov  x0, x5
890	mrs  x4, SCR_EL3
891	mov  x2, x4
892	mov  x1, #SCR_EL3_DATA
893	bl    _setCoreData
894
895	/* allow interrupts @ EL3 */
896	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
897	msr  SCR_EL3, x4
898
899	/* x5 = core mask */
900
901	/* call for any SoC-specific programming */
902	mov  x0, x5
903	bl   _soc_sys_prep_stdby
904
905	ldp  x6,  x30, [sp], #16
906	ldp  x4,  x5,  [sp], #16
907	isb
908	ret
909endfunc _psci_sys_prep_stdby
910
911/*
912 * void _psci_sys_exit_stdby(u_register_t core_mask) - this
913 * exits the system from standby state thru the normal path
914 */
915
916func _psci_sys_exit_stdby
917	stp  x4,  x5, [sp, #-16]!
918	stp  x6, x30, [sp, #-16]!
919
920	mov  x5, x0
921
922	/* x5 = core mask */
923
924	/* restore scr_el3 */
925	mov  x0, x5
926	mov  x1, #SCR_EL3_DATA
927	bl   _getCoreData
928	/* x0 = saved scr_el3 */
929	msr  SCR_EL3, x0
930
931	/* x5 = core mask */
932
933	/* perform any SoC-specific programming after standby state */
934	mov  x0, x5
935	bl   _soc_sys_exit_stdby
936
937	ldp  x6,  x30, [sp], #16
938	ldp  x4,  x5,  [sp], #16
939	isb
940	ret
941endfunc _psci_sys_exit_stdby
942
943#endif
944
945#if (SOC_SYSTEM_PWR_DWN)
946
947.global _psci_sys_prep_pwrdn
948.global _psci_sys_pwrdn_wfi
949.global _psci_sys_exit_pwrdn
950
951/*
952 * void _psci_sys_prep_pwrdn_(u_register_t core_mask)
953 * this function prepares the system+core for power-down
954 * x0 = core mask
955 *
956 * called from C, so save the non-volatile regs
957 * save these as pairs of registers to maintain the
958 * required 16-byte alignment on the stack
959 */
960
961func _psci_sys_prep_pwrdn
962	stp  x4,  x5,  [sp, #-16]!
963	stp  x6,  x7,  [sp, #-16]!
964	stp  x8,  x9,  [sp, #-16]!
965	stp  x10, x11, [sp, #-16]!
966	stp  x12, x13, [sp, #-16]!
967	stp  x14, x15, [sp, #-16]!
968	stp  x16, x17, [sp, #-16]!
969	stp  x18, x30, [sp, #-16]!
970
971	mov  x6, x0			/* x6 = core mask */
972
973	/* mask interrupts by setting DAIF[7:4] to 'b1111 */
974	msr DAIFSet, #0xF
975
976	/* save scr_el3 */
977	mov  x0, x6
978	mrs  x4, SCR_EL3
979	mov  x2, x4
980	mov  x1, #SCR_EL3_DATA
981	bl    _setCoreData
982
983	/* allow interrupts @ EL3 */
984	orr  x4, x4, #(SCR_IRQ_MASK | SCR_FIQ_MASK)
985	msr  SCR_EL3, x4
986
987	/* save cpuectlr */
988	mov  x0, x6
989	mov  x1, #CPUECTLR_DATA
990	mrs  x2, CPUECTLR_EL1
991	mov  x4, x2
992	bl   _setCoreData
993
994	/* remove core from coherency */
995	bic   x4, x4, #CPUECTLR_SMPEN_MASK
996	msr   CPUECTLR_EL1, x4
997
998	/* x6 = core mask */
999
1000	/* SoC-specific programming for power-down */
1001	mov  x0, x6
1002	bl  _soc_sys_prep_pwrdn
1003
1004	/* restore the aarch32/64 non-volatile registers
1005	 */
1006	ldp  x18, x30, [sp], #16
1007	ldp  x16, x17, [sp], #16
1008	ldp  x14, x15, [sp], #16
1009	ldp  x12, x13, [sp], #16
1010	ldp  x10, x11, [sp], #16
1011	ldp  x8,  x9,  [sp], #16
1012	ldp  x6,  x7,  [sp], #16
1013	ldp  x4,  x5,  [sp], #16
1014	b    psci_completed
1015endfunc _psci_sys_prep_pwrdn
1016
1017
1018/*
1019 * void _psci_sys_pwrdn_wfi(u_register_t core_mask, u_register_t resume_addr)
1020 * this function powers down the system
1021 */
1022
1023func _psci_sys_pwrdn_wfi
1024	/* save the wakeup address */
1025	mov  x29, x1
1026
1027	/* x0 = core mask */
1028
1029	/* shutdown the system */
1030	bl   _soc_sys_pwrdn_wfi
1031
1032	/* branch to resume execution */
1033	br   x29
1034endfunc _psci_sys_pwrdn_wfi
1035
1036/*
1037 * void _psci_sys_exit_pwrdn_(u_register_t core_mask)
1038 * this function cleans up after a system power-down
1039 * x0 = core mask
1040 *
1041 * Called from C, so save the non-volatile regs
1042 * save these as pairs of registers to maintain the
1043 * required 16-byte alignment on the stack
1044 */
1045
1046func _psci_sys_exit_pwrdn
1047
1048	stp  x4,  x5,  [sp, #-16]!
1049	stp  x6,  x7,  [sp, #-16]!
1050	stp  x8,  x9,  [sp, #-16]!
1051	stp  x10, x11, [sp, #-16]!
1052	stp  x12, x13, [sp, #-16]!
1053	stp  x14, x15, [sp, #-16]!
1054	stp  x16, x17, [sp, #-16]!
1055	stp  x18, x30, [sp, #-16]!
1056
1057	mov  x4, x0			/* x4 = core mask */
1058
1059	/* restore scr_el3 */
1060	mov  x0, x4
1061	mov  x1, #SCR_EL3_DATA
1062	bl   _getCoreData
1063
1064	/* x0 = saved scr_el3 */
1065	msr  SCR_EL3, x0
1066
1067	/* x4 = core mask */
1068
1069	/* restore cpuectlr */
1070	mov  x0, x4
1071	mov  x1, #CPUECTLR_DATA
1072	bl   _getCoreData
1073
1074	/* make sure smp is set */
1075	orr  x0, x0, #CPUECTLR_SMPEN_MASK
1076	msr  CPUECTLR_EL1, x0
1077
1078	/* x4 = core mask */
1079
1080	/* SoC-specific cleanup */
1081	mov  x0, x4
1082	bl   _soc_sys_exit_pwrdn
1083
1084	/* restore the aarch32/64 non-volatile registers
1085	 */
1086	ldp  x18, x30, [sp], #16
1087	ldp  x16, x17, [sp], #16
1088	ldp  x14, x15, [sp], #16
1089	ldp  x12, x13, [sp], #16
1090	ldp  x10, x11, [sp], #16
1091	ldp  x8,  x9,  [sp], #16
1092	ldp  x6,  x7,  [sp], #16
1093	ldp  x4,  x5,  [sp], #16
1094	b    psci_completed
1095endfunc _psci_sys_exit_pwrdn
1096
1097#endif
1098
1099
1100/* psci std returns */
1101func psci_disabled
1102	ldr  w0, =PSCI_E_DISABLED
1103	b    psci_completed
1104endfunc psci_disabled
1105
1106
1107func psci_not_present
1108	ldr  w0, =PSCI_E_NOT_PRESENT
1109	b    psci_completed
1110endfunc psci_not_present
1111
1112
1113func psci_on_pending
1114	ldr  w0, =PSCI_E_ON_PENDING
1115	b    psci_completed
1116endfunc psci_on_pending
1117
1118
1119func psci_already_on
1120	ldr  w0, =PSCI_E_ALREADY_ON
1121	b    psci_completed
1122endfunc psci_already_on
1123
1124
1125func psci_failure
1126	ldr  w0, =PSCI_E_INTERN_FAIL
1127	b    psci_completed
1128endfunc psci_failure
1129
1130
1131func psci_unimplemented
1132	ldr  w0, =PSCI_E_NOT_SUPPORTED
1133	b    psci_completed
1134endfunc psci_unimplemented
1135
1136
1137func psci_denied
1138	ldr  w0, =PSCI_E_DENIED
1139	b    psci_completed
1140endfunc psci_denied
1141
1142
1143func psci_invalid
1144	ldr  w0, =PSCI_E_INVALID_PARAMS
1145	b    psci_completed
1146endfunc psci_invalid
1147
1148
1149func psci_success
1150	mov  x0, #PSCI_E_SUCCESS
1151endfunc psci_success
1152
1153
1154func psci_completed
1155	/* x0 = status code */
1156	ret
1157endfunc psci_completed
1158