xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/powermac/cache.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * This file contains low-level cache management functions
4*4882a593Smuzhiyun * used for sleep and CPU speed changes on Apple machines.
5*4882a593Smuzhiyun * (In fact the only thing that is Apple-specific is that we assume
6*4882a593Smuzhiyun * that we can read from ROM at physical address 0xfff00000.)
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun *    Copyright (C) 2004 Paul Mackerras (paulus@samba.org) and
9*4882a593Smuzhiyun *                       Benjamin Herrenschmidt (benh@kernel.crashing.org)
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun#include <asm/processor.h>
13*4882a593Smuzhiyun#include <asm/ppc_asm.h>
14*4882a593Smuzhiyun#include <asm/cputable.h>
15*4882a593Smuzhiyun#include <asm/feature-fixups.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun/*
18*4882a593Smuzhiyun * Flush and disable all data caches (dL1, L2, L3). This is used
19*4882a593Smuzhiyun * when going to sleep, when doing a PMU based cpufreq transition,
20*4882a593Smuzhiyun * or when "offlining" a CPU on SMP machines. This code is over
21*4882a593Smuzhiyun * paranoid, but I've had enough issues with various CPU revs and
22*4882a593Smuzhiyun * bugs that I decided it was worth being over cautious
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun_GLOBAL(flush_disable_caches)
26*4882a593Smuzhiyun#ifndef CONFIG_PPC_BOOK3S_32
27*4882a593Smuzhiyun	blr
28*4882a593Smuzhiyun#else
29*4882a593SmuzhiyunBEGIN_FTR_SECTION
30*4882a593Smuzhiyun	b	flush_disable_745x
31*4882a593SmuzhiyunEND_FTR_SECTION_IFSET(CPU_FTR_SPEC7450)
32*4882a593SmuzhiyunBEGIN_FTR_SECTION
33*4882a593Smuzhiyun	b	flush_disable_75x
34*4882a593SmuzhiyunEND_FTR_SECTION_IFSET(CPU_FTR_L2CR)
35*4882a593Smuzhiyun	b	__flush_disable_L1
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun/* This is the code for G3 and 74[01]0 */
38*4882a593Smuzhiyunflush_disable_75x:
39*4882a593Smuzhiyun	mflr	r10
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun	/* Turn off EE and DR in MSR */
42*4882a593Smuzhiyun	mfmsr	r11
43*4882a593Smuzhiyun	rlwinm	r0,r11,0,~MSR_EE
44*4882a593Smuzhiyun	rlwinm	r0,r0,0,~MSR_DR
45*4882a593Smuzhiyun	sync
46*4882a593Smuzhiyun	mtmsr	r0
47*4882a593Smuzhiyun	isync
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun	/* Stop DST streams */
50*4882a593SmuzhiyunBEGIN_FTR_SECTION
51*4882a593Smuzhiyun	PPC_DSSALL
52*4882a593Smuzhiyun	sync
53*4882a593SmuzhiyunEND_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun	/* Stop DPM */
56*4882a593Smuzhiyun	mfspr	r8,SPRN_HID0		/* Save SPRN_HID0 in r8 */
57*4882a593Smuzhiyun	rlwinm	r4,r8,0,12,10		/* Turn off HID0[DPM] */
58*4882a593Smuzhiyun	sync
59*4882a593Smuzhiyun	mtspr	SPRN_HID0,r4		/* Disable DPM */
60*4882a593Smuzhiyun	sync
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun	/* Disp-flush L1. We have a weird problem here that I never
63*4882a593Smuzhiyun	 * totally figured out. On 750FX, using the ROM for the flush
64*4882a593Smuzhiyun	 * results in a non-working flush. We use that workaround for
65*4882a593Smuzhiyun	 * now until I finally understand what's going on. --BenH
66*4882a593Smuzhiyun	 */
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun	/* ROM base by default */
69*4882a593Smuzhiyun	lis	r4,0xfff0
70*4882a593Smuzhiyun	mfpvr	r3
71*4882a593Smuzhiyun	srwi	r3,r3,16
72*4882a593Smuzhiyun	cmplwi	cr0,r3,0x7000
73*4882a593Smuzhiyun	bne+	1f
74*4882a593Smuzhiyun	/* RAM base on 750FX */
75*4882a593Smuzhiyun	li	r4,0
76*4882a593Smuzhiyun1:	li	r4,0x4000
77*4882a593Smuzhiyun	mtctr	r4
78*4882a593Smuzhiyun1:	lwz	r0,0(r4)
79*4882a593Smuzhiyun	addi	r4,r4,32
80*4882a593Smuzhiyun	bdnz	1b
81*4882a593Smuzhiyun	sync
82*4882a593Smuzhiyun	isync
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun	/* Disable / invalidate / enable L1 data */
85*4882a593Smuzhiyun	mfspr	r3,SPRN_HID0
86*4882a593Smuzhiyun	rlwinm	r3,r3,0,~(HID0_DCE | HID0_ICE)
87*4882a593Smuzhiyun	mtspr	SPRN_HID0,r3
88*4882a593Smuzhiyun	sync
89*4882a593Smuzhiyun	isync
90*4882a593Smuzhiyun	ori	r3,r3,(HID0_DCE|HID0_DCI|HID0_ICE|HID0_ICFI)
91*4882a593Smuzhiyun	sync
92*4882a593Smuzhiyun	isync
93*4882a593Smuzhiyun	mtspr	SPRN_HID0,r3
94*4882a593Smuzhiyun	xori	r3,r3,(HID0_DCI|HID0_ICFI)
95*4882a593Smuzhiyun	mtspr	SPRN_HID0,r3
96*4882a593Smuzhiyun	sync
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun	/* Get the current enable bit of the L2CR into r4 */
99*4882a593Smuzhiyun	mfspr	r5,SPRN_L2CR
100*4882a593Smuzhiyun	/* Set to data-only (pre-745x bit) */
101*4882a593Smuzhiyun	oris	r3,r5,L2CR_L2DO@h
102*4882a593Smuzhiyun	b	2f
103*4882a593Smuzhiyun	/* When disabling L2, code must be in L1 */
104*4882a593Smuzhiyun	.balign 32
105*4882a593Smuzhiyun1:	mtspr	SPRN_L2CR,r3
106*4882a593Smuzhiyun3:	sync
107*4882a593Smuzhiyun	isync
108*4882a593Smuzhiyun	b	1f
109*4882a593Smuzhiyun2:	b	3f
110*4882a593Smuzhiyun3:	sync
111*4882a593Smuzhiyun	isync
112*4882a593Smuzhiyun	b	1b
113*4882a593Smuzhiyun1:	/* disp-flush L2. The interesting thing here is that the L2 can be
114*4882a593Smuzhiyun	 * up to 2Mb ... so using the ROM, we'll end up wrapping back to memory
115*4882a593Smuzhiyun	 * but that is probbaly fine. We disp-flush over 4Mb to be safe
116*4882a593Smuzhiyun	 */
117*4882a593Smuzhiyun	lis	r4,2
118*4882a593Smuzhiyun	mtctr	r4
119*4882a593Smuzhiyun	lis	r4,0xfff0
120*4882a593Smuzhiyun1:	lwz	r0,0(r4)
121*4882a593Smuzhiyun	addi	r4,r4,32
122*4882a593Smuzhiyun	bdnz	1b
123*4882a593Smuzhiyun	sync
124*4882a593Smuzhiyun	isync
125*4882a593Smuzhiyun	lis	r4,2
126*4882a593Smuzhiyun	mtctr	r4
127*4882a593Smuzhiyun	lis	r4,0xfff0
128*4882a593Smuzhiyun1:	dcbf	0,r4
129*4882a593Smuzhiyun	addi	r4,r4,32
130*4882a593Smuzhiyun	bdnz	1b
131*4882a593Smuzhiyun	sync
132*4882a593Smuzhiyun	isync
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun	/* now disable L2 */
135*4882a593Smuzhiyun	rlwinm	r5,r5,0,~L2CR_L2E
136*4882a593Smuzhiyun	b	2f
137*4882a593Smuzhiyun	/* When disabling L2, code must be in L1 */
138*4882a593Smuzhiyun	.balign 32
139*4882a593Smuzhiyun1:	mtspr	SPRN_L2CR,r5
140*4882a593Smuzhiyun3:	sync
141*4882a593Smuzhiyun	isync
142*4882a593Smuzhiyun	b	1f
143*4882a593Smuzhiyun2:	b	3f
144*4882a593Smuzhiyun3:	sync
145*4882a593Smuzhiyun	isync
146*4882a593Smuzhiyun	b	1b
147*4882a593Smuzhiyun1:	sync
148*4882a593Smuzhiyun	isync
149*4882a593Smuzhiyun	/* Invalidate L2. This is pre-745x, we clear the L2I bit ourselves */
150*4882a593Smuzhiyun	oris	r4,r5,L2CR_L2I@h
151*4882a593Smuzhiyun	mtspr	SPRN_L2CR,r4
152*4882a593Smuzhiyun	sync
153*4882a593Smuzhiyun	isync
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun	/* Wait for the invalidation to complete */
156*4882a593Smuzhiyun1:	mfspr	r3,SPRN_L2CR
157*4882a593Smuzhiyun	rlwinm.	r0,r3,0,31,31
158*4882a593Smuzhiyun	bne	1b
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun	/* Clear L2I */
161*4882a593Smuzhiyun	xoris	r4,r4,L2CR_L2I@h
162*4882a593Smuzhiyun	sync
163*4882a593Smuzhiyun	mtspr	SPRN_L2CR,r4
164*4882a593Smuzhiyun	sync
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun	/* now disable the L1 data cache */
167*4882a593Smuzhiyun	mfspr	r0,SPRN_HID0
168*4882a593Smuzhiyun	rlwinm	r0,r0,0,~(HID0_DCE|HID0_ICE)
169*4882a593Smuzhiyun	mtspr	SPRN_HID0,r0
170*4882a593Smuzhiyun	sync
171*4882a593Smuzhiyun	isync
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun	/* Restore HID0[DPM] to whatever it was before */
174*4882a593Smuzhiyun	sync
175*4882a593Smuzhiyun	mfspr	r0,SPRN_HID0
176*4882a593Smuzhiyun	rlwimi	r0,r8,0,11,11		/* Turn back HID0[DPM] */
177*4882a593Smuzhiyun	mtspr	SPRN_HID0,r0
178*4882a593Smuzhiyun	sync
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun	/* restore DR and EE */
181*4882a593Smuzhiyun	sync
182*4882a593Smuzhiyun	mtmsr	r11
183*4882a593Smuzhiyun	isync
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun	mtlr	r10
186*4882a593Smuzhiyun	blr
187*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(flush_disable_75x)
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun/* This code is for 745x processors */
190*4882a593Smuzhiyunflush_disable_745x:
191*4882a593Smuzhiyun	/* Turn off EE and DR in MSR */
192*4882a593Smuzhiyun	mfmsr	r11
193*4882a593Smuzhiyun	rlwinm	r0,r11,0,~MSR_EE
194*4882a593Smuzhiyun	rlwinm	r0,r0,0,~MSR_DR
195*4882a593Smuzhiyun	sync
196*4882a593Smuzhiyun	mtmsr	r0
197*4882a593Smuzhiyun	isync
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun	/* Stop prefetch streams */
200*4882a593Smuzhiyun	PPC_DSSALL
201*4882a593Smuzhiyun	sync
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun	/* Disable L2 prefetching */
204*4882a593Smuzhiyun	mfspr	r0,SPRN_MSSCR0
205*4882a593Smuzhiyun	rlwinm	r0,r0,0,0,29
206*4882a593Smuzhiyun	mtspr	SPRN_MSSCR0,r0
207*4882a593Smuzhiyun	sync
208*4882a593Smuzhiyun	isync
209*4882a593Smuzhiyun	lis	r4,0
210*4882a593Smuzhiyun	dcbf	0,r4
211*4882a593Smuzhiyun	dcbf	0,r4
212*4882a593Smuzhiyun	dcbf	0,r4
213*4882a593Smuzhiyun	dcbf	0,r4
214*4882a593Smuzhiyun	dcbf	0,r4
215*4882a593Smuzhiyun	dcbf	0,r4
216*4882a593Smuzhiyun	dcbf	0,r4
217*4882a593Smuzhiyun	dcbf	0,r4
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun	/* Due to a bug with the HW flush on some CPU revs, we occasionally
220*4882a593Smuzhiyun	 * experience data corruption. I'm adding a displacement flush along
221*4882a593Smuzhiyun	 * with a dcbf loop over a few Mb to "help". The problem isn't totally
222*4882a593Smuzhiyun	 * fixed by this in theory, but at least, in practice, I couldn't reproduce
223*4882a593Smuzhiyun	 * it even with a big hammer...
224*4882a593Smuzhiyun	 */
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun        lis     r4,0x0002
227*4882a593Smuzhiyun        mtctr   r4
228*4882a593Smuzhiyun 	li      r4,0
229*4882a593Smuzhiyun1:
230*4882a593Smuzhiyun        lwz     r0,0(r4)
231*4882a593Smuzhiyun        addi    r4,r4,32                /* Go to start of next cache line */
232*4882a593Smuzhiyun        bdnz    1b
233*4882a593Smuzhiyun        isync
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun        /* Now, flush the first 4MB of memory */
236*4882a593Smuzhiyun        lis     r4,0x0002
237*4882a593Smuzhiyun        mtctr   r4
238*4882a593Smuzhiyun	li      r4,0
239*4882a593Smuzhiyun        sync
240*4882a593Smuzhiyun1:
241*4882a593Smuzhiyun        dcbf    0,r4
242*4882a593Smuzhiyun        addi    r4,r4,32                /* Go to start of next cache line */
243*4882a593Smuzhiyun        bdnz    1b
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun	/* Flush and disable the L1 data cache */
246*4882a593Smuzhiyun	mfspr	r6,SPRN_LDSTCR
247*4882a593Smuzhiyun	lis	r3,0xfff0	/* read from ROM for displacement flush */
248*4882a593Smuzhiyun	li	r4,0xfe		/* start with only way 0 unlocked */
249*4882a593Smuzhiyun	li	r5,128		/* 128 lines in each way */
250*4882a593Smuzhiyun1:	mtctr	r5
251*4882a593Smuzhiyun	rlwimi	r6,r4,0,24,31
252*4882a593Smuzhiyun	mtspr	SPRN_LDSTCR,r6
253*4882a593Smuzhiyun	sync
254*4882a593Smuzhiyun	isync
255*4882a593Smuzhiyun2:	lwz	r0,0(r3)	/* touch each cache line */
256*4882a593Smuzhiyun	addi	r3,r3,32
257*4882a593Smuzhiyun	bdnz	2b
258*4882a593Smuzhiyun	rlwinm	r4,r4,1,24,30	/* move on to the next way */
259*4882a593Smuzhiyun	ori	r4,r4,1
260*4882a593Smuzhiyun	cmpwi	r4,0xff		/* all done? */
261*4882a593Smuzhiyun	bne	1b
262*4882a593Smuzhiyun	/* now unlock the L1 data cache */
263*4882a593Smuzhiyun	li	r4,0
264*4882a593Smuzhiyun	rlwimi	r6,r4,0,24,31
265*4882a593Smuzhiyun	sync
266*4882a593Smuzhiyun	mtspr	SPRN_LDSTCR,r6
267*4882a593Smuzhiyun	sync
268*4882a593Smuzhiyun	isync
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun	/* Flush the L2 cache using the hardware assist */
271*4882a593Smuzhiyun	mfspr	r3,SPRN_L2CR
272*4882a593Smuzhiyun	cmpwi	r3,0		/* check if it is enabled first */
273*4882a593Smuzhiyun	bge	4f
274*4882a593Smuzhiyun	oris	r0,r3,(L2CR_L2IO_745x|L2CR_L2DO_745x)@h
275*4882a593Smuzhiyun	b	2f
276*4882a593Smuzhiyun	/* When disabling/locking L2, code must be in L1 */
277*4882a593Smuzhiyun	.balign 32
278*4882a593Smuzhiyun1:	mtspr	SPRN_L2CR,r0	/* lock the L2 cache */
279*4882a593Smuzhiyun3:	sync
280*4882a593Smuzhiyun	isync
281*4882a593Smuzhiyun	b	1f
282*4882a593Smuzhiyun2:	b	3f
283*4882a593Smuzhiyun3:	sync
284*4882a593Smuzhiyun	isync
285*4882a593Smuzhiyun	b	1b
286*4882a593Smuzhiyun1:	sync
287*4882a593Smuzhiyun	isync
288*4882a593Smuzhiyun	ori	r0,r3,L2CR_L2HWF_745x
289*4882a593Smuzhiyun	sync
290*4882a593Smuzhiyun	mtspr	SPRN_L2CR,r0	/* set the hardware flush bit */
291*4882a593Smuzhiyun3:	mfspr	r0,SPRN_L2CR	/* wait for it to go to 0 */
292*4882a593Smuzhiyun	andi.	r0,r0,L2CR_L2HWF_745x
293*4882a593Smuzhiyun	bne	3b
294*4882a593Smuzhiyun	sync
295*4882a593Smuzhiyun	rlwinm	r3,r3,0,~L2CR_L2E
296*4882a593Smuzhiyun	b	2f
297*4882a593Smuzhiyun	/* When disabling L2, code must be in L1 */
298*4882a593Smuzhiyun	.balign 32
299*4882a593Smuzhiyun1:	mtspr	SPRN_L2CR,r3	/* disable the L2 cache */
300*4882a593Smuzhiyun3:	sync
301*4882a593Smuzhiyun	isync
302*4882a593Smuzhiyun	b	1f
303*4882a593Smuzhiyun2:	b	3f
304*4882a593Smuzhiyun3:	sync
305*4882a593Smuzhiyun	isync
306*4882a593Smuzhiyun	b	1b
307*4882a593Smuzhiyun1:	sync
308*4882a593Smuzhiyun	isync
309*4882a593Smuzhiyun	oris	r4,r3,L2CR_L2I@h
310*4882a593Smuzhiyun	mtspr	SPRN_L2CR,r4
311*4882a593Smuzhiyun	sync
312*4882a593Smuzhiyun	isync
313*4882a593Smuzhiyun1:	mfspr	r4,SPRN_L2CR
314*4882a593Smuzhiyun	andis.	r0,r4,L2CR_L2I@h
315*4882a593Smuzhiyun	bne	1b
316*4882a593Smuzhiyun	sync
317*4882a593Smuzhiyun
318*4882a593SmuzhiyunBEGIN_FTR_SECTION
319*4882a593Smuzhiyun	/* Flush the L3 cache using the hardware assist */
320*4882a593Smuzhiyun4:	mfspr	r3,SPRN_L3CR
321*4882a593Smuzhiyun	cmpwi	r3,0		/* check if it is enabled */
322*4882a593Smuzhiyun	bge	6f
323*4882a593Smuzhiyun	oris	r0,r3,L3CR_L3IO@h
324*4882a593Smuzhiyun	ori	r0,r0,L3CR_L3DO
325*4882a593Smuzhiyun	sync
326*4882a593Smuzhiyun	mtspr	SPRN_L3CR,r0	/* lock the L3 cache */
327*4882a593Smuzhiyun	sync
328*4882a593Smuzhiyun	isync
329*4882a593Smuzhiyun	ori	r0,r0,L3CR_L3HWF
330*4882a593Smuzhiyun	sync
331*4882a593Smuzhiyun	mtspr	SPRN_L3CR,r0	/* set the hardware flush bit */
332*4882a593Smuzhiyun5:	mfspr	r0,SPRN_L3CR	/* wait for it to go to zero */
333*4882a593Smuzhiyun	andi.	r0,r0,L3CR_L3HWF
334*4882a593Smuzhiyun	bne	5b
335*4882a593Smuzhiyun	rlwinm	r3,r3,0,~L3CR_L3E
336*4882a593Smuzhiyun	sync
337*4882a593Smuzhiyun	mtspr	SPRN_L3CR,r3	/* disable the L3 cache */
338*4882a593Smuzhiyun	sync
339*4882a593Smuzhiyun	ori	r4,r3,L3CR_L3I
340*4882a593Smuzhiyun	mtspr	SPRN_L3CR,r4
341*4882a593Smuzhiyun1:	mfspr	r4,SPRN_L3CR
342*4882a593Smuzhiyun	andi.	r0,r4,L3CR_L3I
343*4882a593Smuzhiyun	bne	1b
344*4882a593Smuzhiyun	sync
345*4882a593SmuzhiyunEND_FTR_SECTION_IFSET(CPU_FTR_L3CR)
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun6:	mfspr	r0,SPRN_HID0	/* now disable the L1 data cache */
348*4882a593Smuzhiyun	rlwinm	r0,r0,0,~HID0_DCE
349*4882a593Smuzhiyun	mtspr	SPRN_HID0,r0
350*4882a593Smuzhiyun	sync
351*4882a593Smuzhiyun	isync
352*4882a593Smuzhiyun	mtmsr	r11		/* restore DR and EE */
353*4882a593Smuzhiyun	isync
354*4882a593Smuzhiyun	blr
355*4882a593Smuzhiyun_ASM_NOKPROBE_SYMBOL(flush_disable_745x)
356*4882a593Smuzhiyun#endif	/* CONFIG_PPC_BOOK3S_32 */
357