xref: /OK3568_Linux_fs/kernel/arch/arm/mach-omap2/sleep44xx.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * OMAP44xx sleep code.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2011 Texas Instruments, Inc.
6*4882a593Smuzhiyun * 	Santosh Shilimkar <santosh.shilimkar@ti.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <linux/linkage.h>
10*4882a593Smuzhiyun#include <asm/assembler.h>
11*4882a593Smuzhiyun#include <asm/smp_scu.h>
12*4882a593Smuzhiyun#include <asm/memory.h>
13*4882a593Smuzhiyun#include <asm/hardware/cache-l2x0.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun#include "omap-secure.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun#include "common.h"
18*4882a593Smuzhiyun#include "omap44xx.h"
19*4882a593Smuzhiyun#include "omap4-sar-layout.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun	.arch armv7-a
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun#if defined(CONFIG_SMP) && defined(CONFIG_PM)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun	.arch_extension sec
26*4882a593Smuzhiyun.macro	DO_SMC
27*4882a593Smuzhiyun	dsb
28*4882a593Smuzhiyun	smc	#0
29*4882a593Smuzhiyun	dsb
30*4882a593Smuzhiyun.endm
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun#ifdef CONFIG_ARCH_OMAP4
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun/*
35*4882a593Smuzhiyun * =============================
36*4882a593Smuzhiyun * == CPU suspend finisher ==
37*4882a593Smuzhiyun * =============================
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * void omap4_finish_suspend(unsigned long cpu_state)
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun * This function code saves the CPU context and performs the CPU
42*4882a593Smuzhiyun * power down sequence. Calling WFI effectively changes the CPU
43*4882a593Smuzhiyun * power domains states to the desired target power state.
44*4882a593Smuzhiyun *
45*4882a593Smuzhiyun * @cpu_state : contains context save state (r0)
46*4882a593Smuzhiyun *	0 - No context lost
47*4882a593Smuzhiyun * 	1 - CPUx L1 and logic lost: MPUSS CSWR
48*4882a593Smuzhiyun * 	2 - CPUx L1 and logic lost + GIC lost: MPUSS OSWR
49*4882a593Smuzhiyun *	3 - CPUx L1 and logic lost + GIC + L2 lost: MPUSS OFF
50*4882a593Smuzhiyun * @return: This function never returns for CPU OFF and DORMANT power states.
51*4882a593Smuzhiyun * Post WFI, CPU transitions to DORMANT or OFF power state and on wake-up
52*4882a593Smuzhiyun * from this follows a full CPU reset path via ROM code to CPU restore code.
53*4882a593Smuzhiyun * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
54*4882a593Smuzhiyun * It returns to the caller for CPU INACTIVE and ON power states or in case
55*4882a593Smuzhiyun * CPU failed to transition to targeted OFF/DORMANT state.
56*4882a593Smuzhiyun *
57*4882a593Smuzhiyun * omap4_finish_suspend() calls v7_flush_dcache_all() which doesn't save
58*4882a593Smuzhiyun * stack frame and it expects the caller to take care of it. Hence the entire
59*4882a593Smuzhiyun * stack frame is saved to avoid possible stack corruption.
60*4882a593Smuzhiyun */
61*4882a593SmuzhiyunENTRY(omap4_finish_suspend)
62*4882a593Smuzhiyun	stmfd	sp!, {r4-r12, lr}
63*4882a593Smuzhiyun	cmp	r0, #0x0
64*4882a593Smuzhiyun	beq	do_WFI				@ No lowpower state, jump to WFI
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun	/*
67*4882a593Smuzhiyun	 * Flush all data from the L1 data cache before disabling
68*4882a593Smuzhiyun	 * SCTLR.C bit.
69*4882a593Smuzhiyun	 */
70*4882a593Smuzhiyun	bl	omap4_get_sar_ram_base
71*4882a593Smuzhiyun	ldr	r9, [r0, #OMAP_TYPE_OFFSET]
72*4882a593Smuzhiyun	cmp	r9, #0x1			@ Check for HS device
73*4882a593Smuzhiyun	bne	skip_secure_l1_clean
74*4882a593Smuzhiyun	mov	r0, #SCU_PM_NORMAL
75*4882a593Smuzhiyun	mov	r1, #0xFF			@ clean seucre L1
76*4882a593Smuzhiyun	stmfd   r13!, {r4-r12, r14}
77*4882a593Smuzhiyun	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
78*4882a593Smuzhiyun	DO_SMC
79*4882a593Smuzhiyun	ldmfd   r13!, {r4-r12, r14}
80*4882a593Smuzhiyunskip_secure_l1_clean:
81*4882a593Smuzhiyun	bl	v7_flush_dcache_all
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun	/*
84*4882a593Smuzhiyun	 * Clear the SCTLR.C bit to prevent further data cache
85*4882a593Smuzhiyun	 * allocation. Clearing SCTLR.C would make all the data accesses
86*4882a593Smuzhiyun	 * strongly ordered and would not hit the cache.
87*4882a593Smuzhiyun	 */
88*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0
89*4882a593Smuzhiyun	bic	r0, r0, #(1 << 2)		@ Disable the C bit
90*4882a593Smuzhiyun	mcr	p15, 0, r0, c1, c0, 0
91*4882a593Smuzhiyun	isb
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun	bl	v7_invalidate_l1
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun	/*
96*4882a593Smuzhiyun	 * Switch the CPU from Symmetric Multiprocessing (SMP) mode
97*4882a593Smuzhiyun	 * to AsymmetricMultiprocessing (AMP) mode by programming
98*4882a593Smuzhiyun	 * the SCU power status to DORMANT or OFF mode.
99*4882a593Smuzhiyun	 * This enables the CPU to be taken out of coherency by
100*4882a593Smuzhiyun	 * preventing the CPU from receiving cache, TLB, or BTB
101*4882a593Smuzhiyun	 * maintenance operations broadcast by other CPUs in the cluster.
102*4882a593Smuzhiyun	 */
103*4882a593Smuzhiyun	bl	omap4_get_sar_ram_base
104*4882a593Smuzhiyun	mov	r8, r0
105*4882a593Smuzhiyun	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
106*4882a593Smuzhiyun	cmp	r9, #0x1			@ Check for HS device
107*4882a593Smuzhiyun	bne	scu_gp_set
108*4882a593Smuzhiyun	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
109*4882a593Smuzhiyun	ands	r0, r0, #0x0f
110*4882a593Smuzhiyun	ldreq	r0, [r8, #SCU_OFFSET0]
111*4882a593Smuzhiyun	ldrne	r0, [r8, #SCU_OFFSET1]
112*4882a593Smuzhiyun	mov	r1, #0x00
113*4882a593Smuzhiyun	stmfd   r13!, {r4-r12, r14}
114*4882a593Smuzhiyun	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
115*4882a593Smuzhiyun	DO_SMC
116*4882a593Smuzhiyun	ldmfd   r13!, {r4-r12, r14}
117*4882a593Smuzhiyun	b	skip_scu_gp_set
118*4882a593Smuzhiyunscu_gp_set:
119*4882a593Smuzhiyun	mrc	p15, 0, r0, c0, c0, 5		@ Read MPIDR
120*4882a593Smuzhiyun	ands	r0, r0, #0x0f
121*4882a593Smuzhiyun	ldreq	r1, [r8, #SCU_OFFSET0]
122*4882a593Smuzhiyun	ldrne	r1, [r8, #SCU_OFFSET1]
123*4882a593Smuzhiyun	bl	omap4_get_scu_base
124*4882a593Smuzhiyun	bl	scu_power_mode
125*4882a593Smuzhiyunskip_scu_gp_set:
126*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c1, 2		@ Read NSACR data
127*4882a593Smuzhiyun	tst	r0, #(1 << 18)
128*4882a593Smuzhiyun	mrcne	p15, 0, r0, c1, c0, 1
129*4882a593Smuzhiyun	bicne	r0, r0, #(1 << 6)		@ Disable SMP bit
130*4882a593Smuzhiyun	mcrne	p15, 0, r0, c1, c0, 1
131*4882a593Smuzhiyun	isb
132*4882a593Smuzhiyun	dsb
133*4882a593Smuzhiyun#ifdef CONFIG_CACHE_L2X0
134*4882a593Smuzhiyun	/*
135*4882a593Smuzhiyun	 * Clean and invalidate the L2 cache.
136*4882a593Smuzhiyun	 * Common cache-l2x0.c functions can't be used here since it
137*4882a593Smuzhiyun	 * uses spinlocks. We are out of coherency here with data cache
138*4882a593Smuzhiyun	 * disabled. The spinlock implementation uses exclusive load/store
139*4882a593Smuzhiyun	 * instruction which can fail without data cache being enabled.
140*4882a593Smuzhiyun	 * OMAP4 hardware doesn't support exclusive monitor which can
141*4882a593Smuzhiyun	 * overcome exclusive access issue. Because of this, CPU can
142*4882a593Smuzhiyun	 * lead to deadlock.
143*4882a593Smuzhiyun	 */
144*4882a593Smuzhiyun	bl	omap4_get_sar_ram_base
145*4882a593Smuzhiyun	mov	r8, r0
146*4882a593Smuzhiyun	mrc	p15, 0, r5, c0, c0, 5		@ Read MPIDR
147*4882a593Smuzhiyun	ands	r5, r5, #0x0f
148*4882a593Smuzhiyun	ldreq	r0, [r8, #L2X0_SAVE_OFFSET0]	@ Retrieve L2 state from SAR
149*4882a593Smuzhiyun	ldrne	r0, [r8, #L2X0_SAVE_OFFSET1]	@ memory.
150*4882a593Smuzhiyun	cmp	r0, #3
151*4882a593Smuzhiyun	bne	do_WFI
152*4882a593Smuzhiyun#ifdef CONFIG_PL310_ERRATA_727915
153*4882a593Smuzhiyun	mov	r0, #0x03
154*4882a593Smuzhiyun	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
155*4882a593Smuzhiyun	DO_SMC
156*4882a593Smuzhiyun#endif
157*4882a593Smuzhiyun	bl	omap4_get_l2cache_base
158*4882a593Smuzhiyun	mov	r2, r0
159*4882a593Smuzhiyun	ldr	r0, =0xffff
160*4882a593Smuzhiyun	str	r0, [r2, #L2X0_CLEAN_INV_WAY]
161*4882a593Smuzhiyunwait:
162*4882a593Smuzhiyun	ldr	r0, [r2, #L2X0_CLEAN_INV_WAY]
163*4882a593Smuzhiyun	ldr	r1, =0xffff
164*4882a593Smuzhiyun	ands	r0, r0, r1
165*4882a593Smuzhiyun	bne	wait
166*4882a593Smuzhiyun#ifdef CONFIG_PL310_ERRATA_727915
167*4882a593Smuzhiyun	mov	r0, #0x00
168*4882a593Smuzhiyun	mov	r12, #OMAP4_MON_L2X0_DBG_CTRL_INDEX
169*4882a593Smuzhiyun	DO_SMC
170*4882a593Smuzhiyun#endif
171*4882a593Smuzhiyunl2x_sync:
172*4882a593Smuzhiyun	bl	omap4_get_l2cache_base
173*4882a593Smuzhiyun	mov	r2, r0
174*4882a593Smuzhiyun	mov	r0, #0x0
175*4882a593Smuzhiyun	str	r0, [r2, #L2X0_CACHE_SYNC]
176*4882a593Smuzhiyunsync:
177*4882a593Smuzhiyun	ldr	r0, [r2, #L2X0_CACHE_SYNC]
178*4882a593Smuzhiyun	ands	r0, r0, #0x1
179*4882a593Smuzhiyun	bne	sync
180*4882a593Smuzhiyun#endif
181*4882a593Smuzhiyun
182*4882a593Smuzhiyundo_WFI:
183*4882a593Smuzhiyun	bl	omap_do_wfi
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun	/*
186*4882a593Smuzhiyun	 * CPU is here when it failed to enter OFF/DORMANT or
187*4882a593Smuzhiyun	 * no low power state was attempted.
188*4882a593Smuzhiyun	 */
189*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0
190*4882a593Smuzhiyun	tst	r0, #(1 << 2)			@ Check C bit enabled?
191*4882a593Smuzhiyun	orreq	r0, r0, #(1 << 2)		@ Enable the C bit
192*4882a593Smuzhiyun	mcreq	p15, 0, r0, c1, c0, 0
193*4882a593Smuzhiyun	isb
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun	/*
196*4882a593Smuzhiyun	 * Ensure the CPU power state is set to NORMAL in
197*4882a593Smuzhiyun	 * SCU power state so that CPU is back in coherency.
198*4882a593Smuzhiyun	 * In non-coherent mode CPU can lock-up and lead to
199*4882a593Smuzhiyun	 * system deadlock.
200*4882a593Smuzhiyun	 */
201*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 1
202*4882a593Smuzhiyun	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
203*4882a593Smuzhiyun	orreq	r0, r0, #(1 << 6)
204*4882a593Smuzhiyun	mcreq	p15, 0, r0, c1, c0, 1
205*4882a593Smuzhiyun	isb
206*4882a593Smuzhiyun	bl	omap4_get_sar_ram_base
207*4882a593Smuzhiyun	mov	r8, r0
208*4882a593Smuzhiyun	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
209*4882a593Smuzhiyun	cmp	r9, #0x1			@ Check for HS device
210*4882a593Smuzhiyun	bne	scu_gp_clear
211*4882a593Smuzhiyun	mov	r0, #SCU_PM_NORMAL
212*4882a593Smuzhiyun	mov	r1, #0x00
213*4882a593Smuzhiyun	stmfd   r13!, {r4-r12, r14}
214*4882a593Smuzhiyun	ldr	r12, =OMAP4_MON_SCU_PWR_INDEX
215*4882a593Smuzhiyun	DO_SMC
216*4882a593Smuzhiyun	ldmfd   r13!, {r4-r12, r14}
217*4882a593Smuzhiyun	b	skip_scu_gp_clear
218*4882a593Smuzhiyunscu_gp_clear:
219*4882a593Smuzhiyun	bl	omap4_get_scu_base
220*4882a593Smuzhiyun	mov	r1, #SCU_PM_NORMAL
221*4882a593Smuzhiyun	bl	scu_power_mode
222*4882a593Smuzhiyunskip_scu_gp_clear:
223*4882a593Smuzhiyun	isb
224*4882a593Smuzhiyun	dsb
225*4882a593Smuzhiyun	ldmfd	sp!, {r4-r12, pc}
226*4882a593SmuzhiyunENDPROC(omap4_finish_suspend)
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun/*
229*4882a593Smuzhiyun * ============================
230*4882a593Smuzhiyun * == CPU resume entry point ==
231*4882a593Smuzhiyun * ============================
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * void omap4_cpu_resume(void)
234*4882a593Smuzhiyun *
235*4882a593Smuzhiyun * ROM code jumps to this function while waking up from CPU
236*4882a593Smuzhiyun * OFF or DORMANT state. Physical address of the function is
237*4882a593Smuzhiyun * stored in the SAR RAM while entering to OFF or DORMANT mode.
238*4882a593Smuzhiyun * The restore function pointer is stored at CPUx_WAKEUP_NS_PA_ADDR_OFFSET.
239*4882a593Smuzhiyun */
240*4882a593SmuzhiyunENTRY(omap4_cpu_resume)
241*4882a593Smuzhiyun	/*
242*4882a593Smuzhiyun	 * Configure ACTRL and enable NS SMP bit access on CPU1 on HS device.
243*4882a593Smuzhiyun	 * OMAP44XX EMU/HS devices - CPU0 SMP bit access is enabled in PPA
244*4882a593Smuzhiyun	 * init and for CPU1, a secure PPA API provided. CPU0 must be ON
245*4882a593Smuzhiyun	 * while executing NS_SMP API on CPU1 and PPA version must be 1.4.0+.
246*4882a593Smuzhiyun	 * OMAP443X GP devices- SMP bit isn't accessible.
247*4882a593Smuzhiyun	 * OMAP446X GP devices - SMP bit access is enabled on both CPUs.
248*4882a593Smuzhiyun	 */
249*4882a593Smuzhiyun	ldr	r8, =OMAP44XX_SAR_RAM_BASE
250*4882a593Smuzhiyun	ldr	r9, [r8, #OMAP_TYPE_OFFSET]
251*4882a593Smuzhiyun	cmp	r9, #0x1			@ Skip if GP device
252*4882a593Smuzhiyun	bne	skip_ns_smp_enable
253*4882a593Smuzhiyun	mrc     p15, 0, r0, c0, c0, 5
254*4882a593Smuzhiyun	ands    r0, r0, #0x0f
255*4882a593Smuzhiyun	beq	skip_ns_smp_enable
256*4882a593Smuzhiyunppa_actrl_retry:
257*4882a593Smuzhiyun	mov     r0, #OMAP4_PPA_CPU_ACTRL_SMP_INDEX
258*4882a593Smuzhiyun	adr	r1, ppa_zero_params_offset
259*4882a593Smuzhiyun	ldr	r3, [r1]
260*4882a593Smuzhiyun	add	r3, r3, r1			@ Pointer to ppa_zero_params
261*4882a593Smuzhiyun	mov	r1, #0x0			@ Process ID
262*4882a593Smuzhiyun	mov	r2, #0x4			@ Flag
263*4882a593Smuzhiyun	mov	r6, #0xff
264*4882a593Smuzhiyun	mov	r12, #0x00			@ Secure Service ID
265*4882a593Smuzhiyun	DO_SMC
266*4882a593Smuzhiyun	cmp	r0, #0x0			@ API returns 0 on success.
267*4882a593Smuzhiyun	beq	enable_smp_bit
268*4882a593Smuzhiyun	b	ppa_actrl_retry
269*4882a593Smuzhiyunenable_smp_bit:
270*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 1
271*4882a593Smuzhiyun	tst	r0, #(1 << 6)			@ Check SMP bit enabled?
272*4882a593Smuzhiyun	orreq	r0, r0, #(1 << 6)
273*4882a593Smuzhiyun	mcreq	p15, 0, r0, c1, c0, 1
274*4882a593Smuzhiyun	isb
275*4882a593Smuzhiyunskip_ns_smp_enable:
276*4882a593Smuzhiyun#ifdef CONFIG_CACHE_L2X0
277*4882a593Smuzhiyun	/*
278*4882a593Smuzhiyun	 * Restore the L2 AUXCTRL and enable the L2 cache.
279*4882a593Smuzhiyun	 * OMAP4_MON_L2X0_AUXCTRL_INDEX =  Program the L2X0 AUXCTRL
280*4882a593Smuzhiyun	 * OMAP4_MON_L2X0_CTRL_INDEX =  Enable the L2 using L2X0 CTRL
281*4882a593Smuzhiyun	 * register r0 contains value to be programmed.
282*4882a593Smuzhiyun	 * L2 cache is already invalidate by ROM code as part
283*4882a593Smuzhiyun	 * of MPUSS OFF wakeup path.
284*4882a593Smuzhiyun	 */
285*4882a593Smuzhiyun	ldr	r2, =OMAP44XX_L2CACHE_BASE
286*4882a593Smuzhiyun	ldr	r0, [r2, #L2X0_CTRL]
287*4882a593Smuzhiyun	and	r0, #0x0f
288*4882a593Smuzhiyun	cmp	r0, #1
289*4882a593Smuzhiyun	beq	skip_l2en			@ Skip if already enabled
290*4882a593Smuzhiyun	ldr	r3, =OMAP44XX_SAR_RAM_BASE
291*4882a593Smuzhiyun	ldr	r1, [r3, #OMAP_TYPE_OFFSET]
292*4882a593Smuzhiyun	cmp	r1, #0x1			@ Check for HS device
293*4882a593Smuzhiyun	bne     set_gp_por
294*4882a593Smuzhiyun	ldr     r0, =OMAP4_PPA_L2_POR_INDEX
295*4882a593Smuzhiyun	ldr     r1, =OMAP44XX_SAR_RAM_BASE
296*4882a593Smuzhiyun	ldr     r4, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
297*4882a593Smuzhiyun	adr     r1, ppa_por_params_offset
298*4882a593Smuzhiyun	ldr	r3, [r1]
299*4882a593Smuzhiyun	add	r3, r3, r1			@ Pointer to ppa_por_params
300*4882a593Smuzhiyun	str     r4, [r3, #0x04]
301*4882a593Smuzhiyun	mov	r1, #0x0			@ Process ID
302*4882a593Smuzhiyun	mov	r2, #0x4			@ Flag
303*4882a593Smuzhiyun	mov	r6, #0xff
304*4882a593Smuzhiyun	mov	r12, #0x00			@ Secure Service ID
305*4882a593Smuzhiyun	DO_SMC
306*4882a593Smuzhiyun	b	set_aux_ctrl
307*4882a593Smuzhiyunset_gp_por:
308*4882a593Smuzhiyun	ldr     r1, =OMAP44XX_SAR_RAM_BASE
309*4882a593Smuzhiyun	ldr     r0, [r1, #L2X0_PREFETCH_CTRL_OFFSET]
310*4882a593Smuzhiyun	ldr	r12, =OMAP4_MON_L2X0_PREFETCH_INDEX	@ Setup L2 PREFETCH
311*4882a593Smuzhiyun	DO_SMC
312*4882a593Smuzhiyunset_aux_ctrl:
313*4882a593Smuzhiyun	ldr     r1, =OMAP44XX_SAR_RAM_BASE
314*4882a593Smuzhiyun	ldr	r0, [r1, #L2X0_AUXCTRL_OFFSET]
315*4882a593Smuzhiyun	ldr	r12, =OMAP4_MON_L2X0_AUXCTRL_INDEX	@ Setup L2 AUXCTRL
316*4882a593Smuzhiyun	DO_SMC
317*4882a593Smuzhiyun	mov	r0, #0x1
318*4882a593Smuzhiyun	ldr	r12, =OMAP4_MON_L2X0_CTRL_INDEX		@ Enable L2 cache
319*4882a593Smuzhiyun	DO_SMC
320*4882a593Smuzhiyunskip_l2en:
321*4882a593Smuzhiyun#endif
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun	b	cpu_resume			@ Jump to generic resume
324*4882a593Smuzhiyunppa_por_params_offset:
325*4882a593Smuzhiyun	.long	ppa_por_params - .
326*4882a593SmuzhiyunENDPROC(omap4_cpu_resume)
327*4882a593Smuzhiyun#endif	/* CONFIG_ARCH_OMAP4 */
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun#endif	/* defined(CONFIG_SMP) && defined(CONFIG_PM) */
330*4882a593Smuzhiyun
331*4882a593SmuzhiyunENTRY(omap_do_wfi)
332*4882a593Smuzhiyun	stmfd	sp!, {lr}
333*4882a593Smuzhiyun#ifdef CONFIG_OMAP_INTERCONNECT_BARRIER
334*4882a593Smuzhiyun	/* Drain interconnect write buffers. */
335*4882a593Smuzhiyun	bl	omap_interconnect_sync
336*4882a593Smuzhiyun#endif
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun	/*
339*4882a593Smuzhiyun	 * Execute an ISB instruction to ensure that all of the
340*4882a593Smuzhiyun	 * CP15 register changes have been committed.
341*4882a593Smuzhiyun	 */
342*4882a593Smuzhiyun	isb
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun	/*
345*4882a593Smuzhiyun	 * Execute a barrier instruction to ensure that all cache,
346*4882a593Smuzhiyun	 * TLB and branch predictor maintenance operations issued
347*4882a593Smuzhiyun	 * by any CPU in the cluster have completed.
348*4882a593Smuzhiyun	 */
349*4882a593Smuzhiyun	dsb
350*4882a593Smuzhiyun	dmb
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun	/*
353*4882a593Smuzhiyun	 * Execute a WFI instruction and wait until the
354*4882a593Smuzhiyun	 * STANDBYWFI output is asserted to indicate that the
355*4882a593Smuzhiyun	 * CPU is in idle and low power state. CPU can specualatively
356*4882a593Smuzhiyun	 * prefetch the instructions so add NOPs after WFI. Sixteen
357*4882a593Smuzhiyun	 * NOPs as per Cortex-A9 pipeline.
358*4882a593Smuzhiyun	 */
359*4882a593Smuzhiyun	wfi					@ Wait For Interrupt
360*4882a593Smuzhiyun	nop
361*4882a593Smuzhiyun	nop
362*4882a593Smuzhiyun	nop
363*4882a593Smuzhiyun	nop
364*4882a593Smuzhiyun	nop
365*4882a593Smuzhiyun	nop
366*4882a593Smuzhiyun	nop
367*4882a593Smuzhiyun	nop
368*4882a593Smuzhiyun	nop
369*4882a593Smuzhiyun	nop
370*4882a593Smuzhiyun	nop
371*4882a593Smuzhiyun	nop
372*4882a593Smuzhiyun	nop
373*4882a593Smuzhiyun	nop
374*4882a593Smuzhiyun	nop
375*4882a593Smuzhiyun	nop
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun	ldmfd	sp!, {pc}
378*4882a593Smuzhiyunppa_zero_params_offset:
379*4882a593Smuzhiyun	.long	ppa_zero_params - .
380*4882a593SmuzhiyunENDPROC(omap_do_wfi)
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun	.data
383*4882a593Smuzhiyun	.align	2
384*4882a593Smuzhiyunppa_zero_params:
385*4882a593Smuzhiyun	.word		0
386*4882a593Smuzhiyun
387*4882a593Smuzhiyunppa_por_params:
388*4882a593Smuzhiyun	.word		1, 0
389