xref: /OK3568_Linux_fs/kernel/arch/arm/mm/proc-arm925.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/arm925.S: MMU functions for ARM925
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Copyright (C) 1999,2000 ARM Limited
6*4882a593Smuzhiyun *  Copyright (C) 2000 Deep Blue Solutions Ltd.
7*4882a593Smuzhiyun *  Copyright (C) 2002 RidgeRun, Inc.
8*4882a593Smuzhiyun *  Copyright (C) 2002-2003 MontaVista Software, Inc.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun *  Update for Linux-2.6 and cache flush improvements
11*4882a593Smuzhiyun *  Copyright (C) 2004 Nokia Corporation by Tony Lindgren <tony@atomide.com>
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun *  hacked for non-paged-MM by Hyok S. Choi, 2004.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * These are the low level assembler for performing cache and TLB
16*4882a593Smuzhiyun * functions on the arm925.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun *  CONFIG_CPU_ARM925_CPU_IDLE -> nohlt
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * Some additional notes based on deciphering the TI TRM on OMAP-5910:
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * NOTE1: The TI925T Configuration Register bit "D-cache clean and flush
23*4882a593Smuzhiyun *	  entry mode" must be 0 to flush the entries in both segments
24*4882a593Smuzhiyun *	  at once. This is the default value. See TRM 2-20 and 2-24 for
25*4882a593Smuzhiyun *	  more information.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * NOTE2: Default is the "D-cache clean and flush entry mode". It looks
28*4882a593Smuzhiyun *	  like the "Transparent mode" must be on for partial cache flushes
29*4882a593Smuzhiyun *	  to work in this mode. This mode only works with 16-bit external
30*4882a593Smuzhiyun *	  memory. See TRM 2-24 for more information.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * NOTE3: Write-back cache flushing seems to be flakey with devices using
33*4882a593Smuzhiyun *        direct memory access, such as USB OHCI. The workaround is to use
34*4882a593Smuzhiyun *        write-through cache with CONFIG_CPU_DCACHE_WRITETHROUGH (this is
35*4882a593Smuzhiyun *        the default for OMAP-1510).
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun#include <linux/linkage.h>
39*4882a593Smuzhiyun#include <linux/init.h>
40*4882a593Smuzhiyun#include <linux/pgtable.h>
41*4882a593Smuzhiyun#include <asm/assembler.h>
42*4882a593Smuzhiyun#include <asm/hwcap.h>
43*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h>
44*4882a593Smuzhiyun#include <asm/page.h>
45*4882a593Smuzhiyun#include <asm/ptrace.h>
46*4882a593Smuzhiyun#include "proc-macros.S"
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun/*
49*4882a593Smuzhiyun * The size of one data cache line.
50*4882a593Smuzhiyun */
51*4882a593Smuzhiyun#define CACHE_DLINESIZE	16
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun/*
54*4882a593Smuzhiyun * The number of data cache segments.
55*4882a593Smuzhiyun */
56*4882a593Smuzhiyun#define CACHE_DSEGMENTS	2
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun/*
59*4882a593Smuzhiyun * The number of lines in a cache segment.
60*4882a593Smuzhiyun */
61*4882a593Smuzhiyun#define CACHE_DENTRIES	256
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun/*
64*4882a593Smuzhiyun * This is the size at which it becomes more efficient to
65*4882a593Smuzhiyun * clean the whole cache, rather than using the individual
66*4882a593Smuzhiyun * cache line maintenance instructions.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun#define CACHE_DLIMIT	8192
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun	.text
71*4882a593Smuzhiyun/*
72*4882a593Smuzhiyun * cpu_arm925_proc_init()
73*4882a593Smuzhiyun */
74*4882a593SmuzhiyunENTRY(cpu_arm925_proc_init)
75*4882a593Smuzhiyun	ret	lr
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun/*
78*4882a593Smuzhiyun * cpu_arm925_proc_fin()
79*4882a593Smuzhiyun */
80*4882a593SmuzhiyunENTRY(cpu_arm925_proc_fin)
81*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
82*4882a593Smuzhiyun	bic	r0, r0, #0x1000			@ ...i............
83*4882a593Smuzhiyun	bic	r0, r0, #0x000e			@ ............wca.
84*4882a593Smuzhiyun	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
85*4882a593Smuzhiyun	ret	lr
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun/*
88*4882a593Smuzhiyun * cpu_arm925_reset(loc)
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * Perform a soft reset of the system.  Put the CPU into the
91*4882a593Smuzhiyun * same state as it would be if it had been reset, and branch
92*4882a593Smuzhiyun * to what would be the reset vector.
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * loc: location to jump to for soft reset
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun	.align	5
97*4882a593Smuzhiyun	.pushsection	.idmap.text, "ax"
98*4882a593SmuzhiyunENTRY(cpu_arm925_reset)
99*4882a593Smuzhiyun	/* Send software reset to MPU and DSP */
100*4882a593Smuzhiyun	mov	ip, #0xff000000
101*4882a593Smuzhiyun	orr	ip, ip, #0x00fe0000
102*4882a593Smuzhiyun	orr	ip, ip, #0x0000ce00
103*4882a593Smuzhiyun	mov	r4, #1
104*4882a593Smuzhiyun	strh	r4, [ip, #0x10]
105*4882a593SmuzhiyunENDPROC(cpu_arm925_reset)
106*4882a593Smuzhiyun	.popsection
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun	mov	ip, #0
109*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
110*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
111*4882a593Smuzhiyun#ifdef CONFIG_MMU
112*4882a593Smuzhiyun	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
113*4882a593Smuzhiyun#endif
114*4882a593Smuzhiyun	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
115*4882a593Smuzhiyun	bic	ip, ip, #0x000f			@ ............wcam
116*4882a593Smuzhiyun	bic	ip, ip, #0x1100			@ ...i...s........
117*4882a593Smuzhiyun	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
118*4882a593Smuzhiyun	ret	r0
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun/*
121*4882a593Smuzhiyun * cpu_arm925_do_idle()
122*4882a593Smuzhiyun *
123*4882a593Smuzhiyun * Called with IRQs disabled
124*4882a593Smuzhiyun */
125*4882a593Smuzhiyun	.align	10
126*4882a593SmuzhiyunENTRY(cpu_arm925_do_idle)
127*4882a593Smuzhiyun	mov	r0, #0
128*4882a593Smuzhiyun	mrc	p15, 0, r1, c1, c0, 0		@ Read control register
129*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ Drain write buffer
130*4882a593Smuzhiyun	bic	r2, r1, #1 << 12
131*4882a593Smuzhiyun	mcr	p15, 0, r2, c1, c0, 0		@ Disable I cache
132*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
133*4882a593Smuzhiyun	mcr	p15, 0, r1, c1, c0, 0		@ Restore ICache enable
134*4882a593Smuzhiyun	ret	lr
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun/*
137*4882a593Smuzhiyun *	flush_icache_all()
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun *	Unconditionally clean and invalidate the entire icache.
140*4882a593Smuzhiyun */
141*4882a593SmuzhiyunENTRY(arm925_flush_icache_all)
142*4882a593Smuzhiyun	mov	r0, #0
143*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
144*4882a593Smuzhiyun	ret	lr
145*4882a593SmuzhiyunENDPROC(arm925_flush_icache_all)
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun/*
148*4882a593Smuzhiyun *	flush_user_cache_all()
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun *	Clean and invalidate all cache entries in a particular
151*4882a593Smuzhiyun *	address space.
152*4882a593Smuzhiyun */
153*4882a593SmuzhiyunENTRY(arm925_flush_user_cache_all)
154*4882a593Smuzhiyun	/* FALLTHROUGH */
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun/*
157*4882a593Smuzhiyun *	flush_kern_cache_all()
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun *	Clean and invalidate the entire cache.
160*4882a593Smuzhiyun */
161*4882a593SmuzhiyunENTRY(arm925_flush_kern_cache_all)
162*4882a593Smuzhiyun	mov	r2, #VM_EXEC
163*4882a593Smuzhiyun	mov	ip, #0
164*4882a593Smuzhiyun__flush_whole_cache:
165*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
166*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
167*4882a593Smuzhiyun#else
168*4882a593Smuzhiyun	/* Flush entries in both segments at once, see NOTE1 above */
169*4882a593Smuzhiyun	mov	r3, #(CACHE_DENTRIES - 1) << 4	@ 256 entries in segment
170*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c14, 2		@ clean+invalidate D index
171*4882a593Smuzhiyun	subs	r3, r3, #1 << 4
172*4882a593Smuzhiyun	bcs	2b				@ entries 255 to 0
173*4882a593Smuzhiyun#endif
174*4882a593Smuzhiyun	tst	r2, #VM_EXEC
175*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
176*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
177*4882a593Smuzhiyun	ret	lr
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun/*
180*4882a593Smuzhiyun *	flush_user_cache_range(start, end, flags)
181*4882a593Smuzhiyun *
182*4882a593Smuzhiyun *	Clean and invalidate a range of cache entries in the
183*4882a593Smuzhiyun *	specified address range.
184*4882a593Smuzhiyun *
185*4882a593Smuzhiyun *	- start	- start address (inclusive)
186*4882a593Smuzhiyun *	- end	- end address (exclusive)
187*4882a593Smuzhiyun *	- flags	- vm_flags describing address space
188*4882a593Smuzhiyun */
189*4882a593SmuzhiyunENTRY(arm925_flush_user_cache_range)
190*4882a593Smuzhiyun	mov	ip, #0
191*4882a593Smuzhiyun	sub	r3, r1, r0			@ calculate total size
192*4882a593Smuzhiyun	cmp	r3, #CACHE_DLIMIT
193*4882a593Smuzhiyun	bgt	__flush_whole_cache
194*4882a593Smuzhiyun1:	tst	r2, #VM_EXEC
195*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
196*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
197*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
198*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
199*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
200*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
201*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
202*4882a593Smuzhiyun#else
203*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
204*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
205*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
206*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
207*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
208*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
209*4882a593Smuzhiyun#endif
210*4882a593Smuzhiyun	cmp	r0, r1
211*4882a593Smuzhiyun	blo	1b
212*4882a593Smuzhiyun	tst	r2, #VM_EXEC
213*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
214*4882a593Smuzhiyun	ret	lr
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun/*
217*4882a593Smuzhiyun *	coherent_kern_range(start, end)
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
220*4882a593Smuzhiyun *	region described by start, end.  If you have non-snooping
221*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
222*4882a593Smuzhiyun *
223*4882a593Smuzhiyun *	- start	- virtual start address
224*4882a593Smuzhiyun *	- end	- virtual end address
225*4882a593Smuzhiyun */
226*4882a593SmuzhiyunENTRY(arm925_coherent_kern_range)
227*4882a593Smuzhiyun	/* FALLTHROUGH */
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun/*
230*4882a593Smuzhiyun *	coherent_user_range(start, end)
231*4882a593Smuzhiyun *
232*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
233*4882a593Smuzhiyun *	region described by start, end.  If you have non-snooping
234*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun *	- start	- virtual start address
237*4882a593Smuzhiyun *	- end	- virtual end address
238*4882a593Smuzhiyun */
239*4882a593SmuzhiyunENTRY(arm925_coherent_user_range)
240*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
241*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
242*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
243*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
244*4882a593Smuzhiyun	cmp	r0, r1
245*4882a593Smuzhiyun	blo	1b
246*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
247*4882a593Smuzhiyun	mov	r0, #0
248*4882a593Smuzhiyun	ret	lr
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun/*
251*4882a593Smuzhiyun *	flush_kern_dcache_area(void *addr, size_t size)
252*4882a593Smuzhiyun *
253*4882a593Smuzhiyun *	Ensure no D cache aliasing occurs, either with itself or
254*4882a593Smuzhiyun *	the I cache
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun *	- addr	- kernel address
257*4882a593Smuzhiyun *	- size	- region size
258*4882a593Smuzhiyun */
259*4882a593SmuzhiyunENTRY(arm925_flush_kern_dcache_area)
260*4882a593Smuzhiyun	add	r1, r0, r1
261*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
262*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
263*4882a593Smuzhiyun	cmp	r0, r1
264*4882a593Smuzhiyun	blo	1b
265*4882a593Smuzhiyun	mov	r0, #0
266*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
267*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
268*4882a593Smuzhiyun	ret	lr
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun/*
271*4882a593Smuzhiyun *	dma_inv_range(start, end)
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun *	Invalidate (discard) the specified virtual address range.
274*4882a593Smuzhiyun *	May not write back any entries.  If 'start' or 'end'
275*4882a593Smuzhiyun *	are not cache line aligned, those lines must be written
276*4882a593Smuzhiyun *	back.
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun *	- start	- virtual start address
279*4882a593Smuzhiyun *	- end	- virtual end address
280*4882a593Smuzhiyun *
281*4882a593Smuzhiyun * (same as v4wb)
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyunarm925_dma_inv_range:
284*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
285*4882a593Smuzhiyun	tst	r0, #CACHE_DLINESIZE - 1
286*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
287*4882a593Smuzhiyun	tst	r1, #CACHE_DLINESIZE - 1
288*4882a593Smuzhiyun	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
289*4882a593Smuzhiyun#endif
290*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
291*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
292*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
293*4882a593Smuzhiyun	cmp	r0, r1
294*4882a593Smuzhiyun	blo	1b
295*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
296*4882a593Smuzhiyun	ret	lr
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun/*
299*4882a593Smuzhiyun *	dma_clean_range(start, end)
300*4882a593Smuzhiyun *
301*4882a593Smuzhiyun *	Clean the specified virtual address range.
302*4882a593Smuzhiyun *
303*4882a593Smuzhiyun *	- start	- virtual start address
304*4882a593Smuzhiyun *	- end	- virtual end address
305*4882a593Smuzhiyun *
306*4882a593Smuzhiyun * (same as v4wb)
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyunarm925_dma_clean_range:
309*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
310*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
311*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
312*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
313*4882a593Smuzhiyun	cmp	r0, r1
314*4882a593Smuzhiyun	blo	1b
315*4882a593Smuzhiyun#endif
316*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
317*4882a593Smuzhiyun	ret	lr
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun/*
320*4882a593Smuzhiyun *	dma_flush_range(start, end)
321*4882a593Smuzhiyun *
322*4882a593Smuzhiyun *	Clean and invalidate the specified virtual address range.
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun *	- start	- virtual start address
325*4882a593Smuzhiyun *	- end	- virtual end address
326*4882a593Smuzhiyun */
327*4882a593SmuzhiyunENTRY(arm925_dma_flush_range)
328*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
329*4882a593Smuzhiyun1:
330*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
331*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
332*4882a593Smuzhiyun#else
333*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
334*4882a593Smuzhiyun#endif
335*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
336*4882a593Smuzhiyun	cmp	r0, r1
337*4882a593Smuzhiyun	blo	1b
338*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
339*4882a593Smuzhiyun	ret	lr
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun/*
342*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
343*4882a593Smuzhiyun *	- start	- kernel virtual start address
344*4882a593Smuzhiyun *	- size	- size of region
345*4882a593Smuzhiyun *	- dir	- DMA direction
346*4882a593Smuzhiyun */
347*4882a593SmuzhiyunENTRY(arm925_dma_map_area)
348*4882a593Smuzhiyun	add	r1, r1, r0
349*4882a593Smuzhiyun	cmp	r2, #DMA_TO_DEVICE
350*4882a593Smuzhiyun	beq	arm925_dma_clean_range
351*4882a593Smuzhiyun	bcs	arm925_dma_inv_range
352*4882a593Smuzhiyun	b	arm925_dma_flush_range
353*4882a593SmuzhiyunENDPROC(arm925_dma_map_area)
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun/*
356*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
357*4882a593Smuzhiyun *	- start	- kernel virtual start address
358*4882a593Smuzhiyun *	- size	- size of region
359*4882a593Smuzhiyun *	- dir	- DMA direction
360*4882a593Smuzhiyun */
361*4882a593SmuzhiyunENTRY(arm925_dma_unmap_area)
362*4882a593Smuzhiyun	ret	lr
363*4882a593SmuzhiyunENDPROC(arm925_dma_unmap_area)
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun	.globl	arm925_flush_kern_cache_louis
366*4882a593Smuzhiyun	.equ	arm925_flush_kern_cache_louis, arm925_flush_kern_cache_all
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
369*4882a593Smuzhiyun	define_cache_functions arm925
370*4882a593Smuzhiyun
371*4882a593SmuzhiyunENTRY(cpu_arm925_dcache_clean_area)
372*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
373*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
374*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
375*4882a593Smuzhiyun	subs	r1, r1, #CACHE_DLINESIZE
376*4882a593Smuzhiyun	bhi	1b
377*4882a593Smuzhiyun#endif
378*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
379*4882a593Smuzhiyun	ret	lr
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun/* =============================== PageTable ============================== */
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun/*
384*4882a593Smuzhiyun * cpu_arm925_switch_mm(pgd)
385*4882a593Smuzhiyun *
386*4882a593Smuzhiyun * Set the translation base pointer to be as described by pgd.
387*4882a593Smuzhiyun *
388*4882a593Smuzhiyun * pgd: new page tables
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun	.align	5
391*4882a593SmuzhiyunENTRY(cpu_arm925_switch_mm)
392*4882a593Smuzhiyun#ifdef CONFIG_MMU
393*4882a593Smuzhiyun	mov	ip, #0
394*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
395*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
396*4882a593Smuzhiyun#else
397*4882a593Smuzhiyun	/* Flush entries in bothe segments at once, see NOTE1 above */
398*4882a593Smuzhiyun	mov	r3, #(CACHE_DENTRIES - 1) << 4	@ 256 entries in segment
399*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c14, 2		@ clean & invalidate D index
400*4882a593Smuzhiyun	subs	r3, r3, #1 << 4
401*4882a593Smuzhiyun	bcs	2b				@ entries 255 to 0
402*4882a593Smuzhiyun#endif
403*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
404*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
405*4882a593Smuzhiyun	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
406*4882a593Smuzhiyun	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
407*4882a593Smuzhiyun#endif
408*4882a593Smuzhiyun	ret	lr
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun/*
411*4882a593Smuzhiyun * cpu_arm925_set_pte_ext(ptep, pte, ext)
412*4882a593Smuzhiyun *
413*4882a593Smuzhiyun * Set a PTE and flush it out
414*4882a593Smuzhiyun */
415*4882a593Smuzhiyun	.align	5
416*4882a593SmuzhiyunENTRY(cpu_arm925_set_pte_ext)
417*4882a593Smuzhiyun#ifdef CONFIG_MMU
418*4882a593Smuzhiyun	armv3_set_pte_ext
419*4882a593Smuzhiyun	mov	r0, r0
420*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
421*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
422*4882a593Smuzhiyun#endif
423*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
424*4882a593Smuzhiyun#endif /* CONFIG_MMU */
425*4882a593Smuzhiyun	ret	lr
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun	.type	__arm925_setup, #function
428*4882a593Smuzhiyun__arm925_setup:
429*4882a593Smuzhiyun	mov	r0, #0
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun	/* Transparent on, D-cache clean & flush mode. See  NOTE2 above */
432*4882a593Smuzhiyun        orr     r0,r0,#1 << 1			@ transparent mode on
433*4882a593Smuzhiyun        mcr     p15, 0, r0, c15, c1, 0          @ write TI config register
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun	mov	r0, #0
436*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
437*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
438*4882a593Smuzhiyun#ifdef CONFIG_MMU
439*4882a593Smuzhiyun	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
440*4882a593Smuzhiyun#endif
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
443*4882a593Smuzhiyun	mov	r0, #4				@ disable write-back on caches explicitly
444*4882a593Smuzhiyun	mcr	p15, 7, r0, c15, c0, 0
445*4882a593Smuzhiyun#endif
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun	adr	r5, arm925_crval
448*4882a593Smuzhiyun	ldmia	r5, {r5, r6}
449*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0		@ get control register v4
450*4882a593Smuzhiyun	bic	r0, r0, r5
451*4882a593Smuzhiyun	orr	r0, r0, r6
452*4882a593Smuzhiyun#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
453*4882a593Smuzhiyun	orr	r0, r0, #0x4000			@ .1.. .... .... ....
454*4882a593Smuzhiyun#endif
455*4882a593Smuzhiyun	ret	lr
456*4882a593Smuzhiyun	.size	__arm925_setup, . - __arm925_setup
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun	/*
459*4882a593Smuzhiyun	 *  R
460*4882a593Smuzhiyun	 * .RVI ZFRS BLDP WCAM
461*4882a593Smuzhiyun	 * .011 0001 ..11 1101
462*4882a593Smuzhiyun	 *
463*4882a593Smuzhiyun	 */
464*4882a593Smuzhiyun	.type	arm925_crval, #object
465*4882a593Smuzhiyunarm925_crval:
466*4882a593Smuzhiyun	crval	clear=0x00007f3f, mmuset=0x0000313d, ucset=0x00001130
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun	__INITDATA
469*4882a593Smuzhiyun	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
470*4882a593Smuzhiyun	define_processor_functions arm925, dabort=v4t_early_abort, pabort=legacy_pabort
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun	.section ".rodata"
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun	string	cpu_arch_name, "armv4t"
475*4882a593Smuzhiyun	string	cpu_elf_name, "v4"
476*4882a593Smuzhiyun	string	cpu_arm925_name, "ARM925T"
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun	.align
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun	.section ".proc.info.init", "a"
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun.macro arm925_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache
483*4882a593Smuzhiyun	.type	__\name\()_proc_info,#object
484*4882a593Smuzhiyun__\name\()_proc_info:
485*4882a593Smuzhiyun	.long	\cpu_val
486*4882a593Smuzhiyun	.long	\cpu_mask
487*4882a593Smuzhiyun	.long   PMD_TYPE_SECT | \
488*4882a593Smuzhiyun		PMD_SECT_CACHEABLE | \
489*4882a593Smuzhiyun		PMD_BIT4 | \
490*4882a593Smuzhiyun		PMD_SECT_AP_WRITE | \
491*4882a593Smuzhiyun		PMD_SECT_AP_READ
492*4882a593Smuzhiyun	.long   PMD_TYPE_SECT | \
493*4882a593Smuzhiyun		PMD_BIT4 | \
494*4882a593Smuzhiyun		PMD_SECT_AP_WRITE | \
495*4882a593Smuzhiyun		PMD_SECT_AP_READ
496*4882a593Smuzhiyun	initfn	__arm925_setup, __\name\()_proc_info
497*4882a593Smuzhiyun	.long	cpu_arch_name
498*4882a593Smuzhiyun	.long	cpu_elf_name
499*4882a593Smuzhiyun	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
500*4882a593Smuzhiyun	.long	cpu_arm925_name
501*4882a593Smuzhiyun	.long	arm925_processor_functions
502*4882a593Smuzhiyun	.long	v4wbi_tlb_fns
503*4882a593Smuzhiyun	.long	v4wb_user_fns
504*4882a593Smuzhiyun	.long	arm925_cache_fns
505*4882a593Smuzhiyun	.size	__\name\()_proc_info, . - __\name\()_proc_info
506*4882a593Smuzhiyun.endm
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun	arm925_proc_info arm925, 0x54029250, 0xfffffff0, cpu_arm925_name
509*4882a593Smuzhiyun	arm925_proc_info arm915, 0x54029150, 0xfffffff0, cpu_arm925_name
510