xref: /OK3568_Linux_fs/kernel/arch/arm/mach-omap2/sleep33xx.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * Low level suspend code for AM33XX SoCs
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012-2018 Texas Instruments Incorporated - https://www.ti.com/
6*4882a593Smuzhiyun *	Dave Gerlach, Vaibhav Bedia
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <linux/linkage.h>
10*4882a593Smuzhiyun#include <linux/platform_data/pm33xx.h>
11*4882a593Smuzhiyun#include <linux/ti-emif-sram.h>
12*4882a593Smuzhiyun#include <asm/assembler.h>
13*4882a593Smuzhiyun#include <asm/memory.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun#include "iomap.h"
16*4882a593Smuzhiyun#include "cm33xx.h"
17*4882a593Smuzhiyun#include "pm-asm-offsets.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun#define AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED			0x00030000
20*4882a593Smuzhiyun#define AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE			0x0003
21*4882a593Smuzhiyun#define AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE			0x0002
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun/* replicated define because linux/bitops.h cannot be included in assembly */
24*4882a593Smuzhiyun#define BIT(nr)			(1 << (nr))
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun	.arm
27*4882a593Smuzhiyun	.arch armv7-a
28*4882a593Smuzhiyun	.align 3
29*4882a593Smuzhiyun
30*4882a593SmuzhiyunENTRY(am33xx_do_wfi)
31*4882a593Smuzhiyun	stmfd	sp!, {r4 - r11, lr}	@ save registers on stack
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun	/* Save wfi_flags arg to data space */
34*4882a593Smuzhiyun	mov	r4, r0
35*4882a593Smuzhiyun	adr	r3, am33xx_pm_ro_sram_data
36*4882a593Smuzhiyun	ldr	r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
37*4882a593Smuzhiyun	str	r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun	/* Only flush cache is we know we are losing MPU context */
40*4882a593Smuzhiyun	tst	r4, #WFI_FLAG_FLUSH_CACHE
41*4882a593Smuzhiyun	beq	cache_skip_flush
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun	/*
44*4882a593Smuzhiyun	 * Flush all data from the L1 and L2 data cache before disabling
45*4882a593Smuzhiyun	 * SCTLR.C bit.
46*4882a593Smuzhiyun	 */
47*4882a593Smuzhiyun	ldr	r1, kernel_flush
48*4882a593Smuzhiyun	blx	r1
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun	/*
51*4882a593Smuzhiyun	 * Clear the SCTLR.C bit to prevent further data cache
52*4882a593Smuzhiyun	 * allocation. Clearing SCTLR.C would make all the data accesses
53*4882a593Smuzhiyun	 * strongly ordered and would not hit the cache.
54*4882a593Smuzhiyun	 */
55*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0
56*4882a593Smuzhiyun	bic	r0, r0, #(1 << 2)	@ Disable the C bit
57*4882a593Smuzhiyun	mcr	p15, 0, r0, c1, c0, 0
58*4882a593Smuzhiyun	isb
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun	/*
61*4882a593Smuzhiyun	 * Invalidate L1 and L2 data cache.
62*4882a593Smuzhiyun	 */
63*4882a593Smuzhiyun	ldr	r1, kernel_flush
64*4882a593Smuzhiyun	blx	r1
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun	adr	r3, am33xx_pm_ro_sram_data
67*4882a593Smuzhiyun	ldr	r2, [r3, #AMX3_PM_RO_SRAM_DATA_VIRT_OFFSET]
68*4882a593Smuzhiyun	ldr	r4, [r2, #AMX3_PM_WFI_FLAGS_OFFSET]
69*4882a593Smuzhiyun
70*4882a593Smuzhiyuncache_skip_flush:
71*4882a593Smuzhiyun	/* Check if we want self refresh */
72*4882a593Smuzhiyun	tst	r4, #WFI_FLAG_SELF_REFRESH
73*4882a593Smuzhiyun	beq	emif_skip_enter_sr
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun	adr	r9, am33xx_emif_sram_table
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun	ldr	r3, [r9, #EMIF_PM_ENTER_SR_OFFSET]
78*4882a593Smuzhiyun	blx	r3
79*4882a593Smuzhiyun
80*4882a593Smuzhiyunemif_skip_enter_sr:
81*4882a593Smuzhiyun	/* Only necessary if PER is losing context */
82*4882a593Smuzhiyun	tst	r4, #WFI_FLAG_SAVE_EMIF
83*4882a593Smuzhiyun	beq	emif_skip_save
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun	ldr	r3, [r9, #EMIF_PM_SAVE_CONTEXT_OFFSET]
86*4882a593Smuzhiyun	blx	r3
87*4882a593Smuzhiyun
88*4882a593Smuzhiyunemif_skip_save:
89*4882a593Smuzhiyun	/* Only can disable EMIF if we have entered self refresh */
90*4882a593Smuzhiyun	tst     r4, #WFI_FLAG_SELF_REFRESH
91*4882a593Smuzhiyun	beq     emif_skip_disable
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun	/* Disable EMIF */
94*4882a593Smuzhiyun	ldr     r1, virt_emif_clkctrl
95*4882a593Smuzhiyun	ldr     r2, [r1]
96*4882a593Smuzhiyun	bic     r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
97*4882a593Smuzhiyun	str     r2, [r1]
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun	ldr	r1, virt_emif_clkctrl
100*4882a593Smuzhiyunwait_emif_disable:
101*4882a593Smuzhiyun	ldr	r2, [r1]
102*4882a593Smuzhiyun	mov	r3, #AM33XX_CM_CLKCTRL_MODULESTATE_DISABLED
103*4882a593Smuzhiyun	cmp	r2, r3
104*4882a593Smuzhiyun	bne	wait_emif_disable
105*4882a593Smuzhiyun
106*4882a593Smuzhiyunemif_skip_disable:
107*4882a593Smuzhiyun	tst	r4, #WFI_FLAG_WAKE_M3
108*4882a593Smuzhiyun	beq	wkup_m3_skip
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun	/*
111*4882a593Smuzhiyun	 * For the MPU WFI to be registered as an interrupt
112*4882a593Smuzhiyun	 * to WKUP_M3, MPU_CLKCTRL.MODULEMODE needs to be set
113*4882a593Smuzhiyun	 * to DISABLED
114*4882a593Smuzhiyun	 */
115*4882a593Smuzhiyun	ldr	r1, virt_mpu_clkctrl
116*4882a593Smuzhiyun	ldr	r2, [r1]
117*4882a593Smuzhiyun	bic	r2, r2, #AM33XX_CM_CLKCTRL_MODULEMODE_DISABLE
118*4882a593Smuzhiyun	str	r2, [r1]
119*4882a593Smuzhiyun
120*4882a593Smuzhiyunwkup_m3_skip:
121*4882a593Smuzhiyun	/*
122*4882a593Smuzhiyun	 * Execute an ISB instruction to ensure that all of the
123*4882a593Smuzhiyun	 * CP15 register changes have been committed.
124*4882a593Smuzhiyun	 */
125*4882a593Smuzhiyun	isb
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun	/*
128*4882a593Smuzhiyun	 * Execute a barrier instruction to ensure that all cache,
129*4882a593Smuzhiyun	 * TLB and branch predictor maintenance operations issued
130*4882a593Smuzhiyun	 * have completed.
131*4882a593Smuzhiyun	 */
132*4882a593Smuzhiyun	dsb
133*4882a593Smuzhiyun	dmb
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun	/*
136*4882a593Smuzhiyun	 * Execute a WFI instruction and wait until the
137*4882a593Smuzhiyun	 * STANDBYWFI output is asserted to indicate that the
138*4882a593Smuzhiyun	 * CPU is in idle and low power state. CPU can specualatively
139*4882a593Smuzhiyun	 * prefetch the instructions so add NOPs after WFI. Thirteen
140*4882a593Smuzhiyun	 * NOPs as per Cortex-A8 pipeline.
141*4882a593Smuzhiyun	 */
142*4882a593Smuzhiyun	wfi
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun	nop
145*4882a593Smuzhiyun	nop
146*4882a593Smuzhiyun	nop
147*4882a593Smuzhiyun	nop
148*4882a593Smuzhiyun	nop
149*4882a593Smuzhiyun	nop
150*4882a593Smuzhiyun	nop
151*4882a593Smuzhiyun	nop
152*4882a593Smuzhiyun	nop
153*4882a593Smuzhiyun	nop
154*4882a593Smuzhiyun	nop
155*4882a593Smuzhiyun	nop
156*4882a593Smuzhiyun	nop
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun	/* We come here in case of an abort due to a late interrupt */
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun	/* Set MPU_CLKCTRL.MODULEMODE back to ENABLE */
161*4882a593Smuzhiyun	ldr	r1, virt_mpu_clkctrl
162*4882a593Smuzhiyun	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
163*4882a593Smuzhiyun	str	r2, [r1]
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun	/* Re-enable EMIF */
166*4882a593Smuzhiyun	ldr	r1, virt_emif_clkctrl
167*4882a593Smuzhiyun	mov	r2, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
168*4882a593Smuzhiyun	str	r2, [r1]
169*4882a593Smuzhiyunwait_emif_enable:
170*4882a593Smuzhiyun	ldr	r3, [r1]
171*4882a593Smuzhiyun	cmp	r2, r3
172*4882a593Smuzhiyun	bne	wait_emif_enable
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun	/* Only necessary if PER is losing context */
175*4882a593Smuzhiyun	tst	r4, #WFI_FLAG_SELF_REFRESH
176*4882a593Smuzhiyun	beq	emif_skip_exit_sr_abt
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun	adr	r9, am33xx_emif_sram_table
179*4882a593Smuzhiyun	ldr	r1, [r9, #EMIF_PM_ABORT_SR_OFFSET]
180*4882a593Smuzhiyun	blx	r1
181*4882a593Smuzhiyun
182*4882a593Smuzhiyunemif_skip_exit_sr_abt:
183*4882a593Smuzhiyun	tst	r4, #WFI_FLAG_FLUSH_CACHE
184*4882a593Smuzhiyun	beq	cache_skip_restore
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun	/*
187*4882a593Smuzhiyun	 * Set SCTLR.C bit to allow data cache allocation
188*4882a593Smuzhiyun	 */
189*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0
190*4882a593Smuzhiyun	orr	r0, r0, #(1 << 2)	@ Enable the C bit
191*4882a593Smuzhiyun	mcr	p15, 0, r0, c1, c0, 0
192*4882a593Smuzhiyun	isb
193*4882a593Smuzhiyun
194*4882a593Smuzhiyuncache_skip_restore:
195*4882a593Smuzhiyun	/* Let the suspend code know about the abort */
196*4882a593Smuzhiyun	mov	r0, #1
197*4882a593Smuzhiyun	ldmfd	sp!, {r4 - r11, pc}	@ restore regs and return
198*4882a593SmuzhiyunENDPROC(am33xx_do_wfi)
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun	.align
201*4882a593SmuzhiyunENTRY(am33xx_resume_offset)
202*4882a593Smuzhiyun	.word . - am33xx_do_wfi
203*4882a593Smuzhiyun
204*4882a593SmuzhiyunENTRY(am33xx_resume_from_deep_sleep)
205*4882a593Smuzhiyun	/* Re-enable EMIF */
206*4882a593Smuzhiyun	ldr	r0, phys_emif_clkctrl
207*4882a593Smuzhiyun	mov	r1, #AM33XX_CM_CLKCTRL_MODULEMODE_ENABLE
208*4882a593Smuzhiyun	str	r1, [r0]
209*4882a593Smuzhiyunwait_emif_enable1:
210*4882a593Smuzhiyun	ldr	r2, [r0]
211*4882a593Smuzhiyun	cmp	r1, r2
212*4882a593Smuzhiyun	bne	wait_emif_enable1
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun	adr	r9, am33xx_emif_sram_table
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun	ldr	r1, [r9, #EMIF_PM_RESTORE_CONTEXT_OFFSET]
217*4882a593Smuzhiyun	blx	r1
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun	ldr	r1, [r9, #EMIF_PM_EXIT_SR_OFFSET]
220*4882a593Smuzhiyun	blx	r1
221*4882a593Smuzhiyun
222*4882a593Smuzhiyunresume_to_ddr:
223*4882a593Smuzhiyun	/* We are back. Branch to the common CPU resume routine */
224*4882a593Smuzhiyun	mov	r0, #0
225*4882a593Smuzhiyun	ldr	pc, resume_addr
226*4882a593SmuzhiyunENDPROC(am33xx_resume_from_deep_sleep)
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun/*
229*4882a593Smuzhiyun * Local variables
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun	.align
232*4882a593Smuzhiyunkernel_flush:
233*4882a593Smuzhiyun	.word   v7_flush_dcache_all
234*4882a593Smuzhiyunvirt_mpu_clkctrl:
235*4882a593Smuzhiyun	.word	AM33XX_CM_MPU_MPU_CLKCTRL
236*4882a593Smuzhiyunvirt_emif_clkctrl:
237*4882a593Smuzhiyun	.word	AM33XX_CM_PER_EMIF_CLKCTRL
238*4882a593Smuzhiyunphys_emif_clkctrl:
239*4882a593Smuzhiyun	.word	(AM33XX_CM_BASE + AM33XX_CM_PER_MOD + \
240*4882a593Smuzhiyun		AM33XX_CM_PER_EMIF_CLKCTRL_OFFSET)
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun.align 3
243*4882a593Smuzhiyun/* DDR related defines */
244*4882a593Smuzhiyunam33xx_emif_sram_table:
245*4882a593Smuzhiyun	.space EMIF_PM_FUNCTIONS_SIZE
246*4882a593Smuzhiyun
247*4882a593SmuzhiyunENTRY(am33xx_pm_sram)
248*4882a593Smuzhiyun	.word am33xx_do_wfi
249*4882a593Smuzhiyun	.word am33xx_do_wfi_sz
250*4882a593Smuzhiyun	.word am33xx_resume_offset
251*4882a593Smuzhiyun	.word am33xx_emif_sram_table
252*4882a593Smuzhiyun	.word am33xx_pm_ro_sram_data
253*4882a593Smuzhiyun
254*4882a593Smuzhiyunresume_addr:
255*4882a593Smuzhiyun.word  cpu_resume - PAGE_OFFSET + 0x80000000
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun.align 3
258*4882a593SmuzhiyunENTRY(am33xx_pm_ro_sram_data)
259*4882a593Smuzhiyun	.space AMX3_PM_RO_SRAM_DATA_SIZE
260*4882a593Smuzhiyun
261*4882a593SmuzhiyunENTRY(am33xx_do_wfi_sz)
262*4882a593Smuzhiyun	.word	. - am33xx_do_wfi
263