xref: /OK3568_Linux_fs/kernel/arch/arm/mm/proc-arm946.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun *  (Many of cache codes are from proc-arm926.S)
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun#include <linux/linkage.h>
10*4882a593Smuzhiyun#include <linux/init.h>
11*4882a593Smuzhiyun#include <linux/pgtable.h>
12*4882a593Smuzhiyun#include <asm/assembler.h>
13*4882a593Smuzhiyun#include <asm/hwcap.h>
14*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h>
15*4882a593Smuzhiyun#include <asm/ptrace.h>
16*4882a593Smuzhiyun#include "proc-macros.S"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun/*
19*4882a593Smuzhiyun * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache,
20*4882a593Smuzhiyun * comprising 256 lines of 32 bytes (8 words).
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun#define CACHE_DSIZE	(CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */
23*4882a593Smuzhiyun#define CACHE_DLINESIZE	32			/* fixed */
24*4882a593Smuzhiyun#define CACHE_DSEGMENTS	4			/* fixed */
25*4882a593Smuzhiyun#define CACHE_DENTRIES	(CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE)
26*4882a593Smuzhiyun#define CACHE_DLIMIT	(CACHE_DSIZE * 4)	/* benchmark needed */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun	.text
29*4882a593Smuzhiyun/*
30*4882a593Smuzhiyun * cpu_arm946_proc_init()
31*4882a593Smuzhiyun * cpu_arm946_switch_mm()
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * These are not required.
34*4882a593Smuzhiyun */
35*4882a593SmuzhiyunENTRY(cpu_arm946_proc_init)
36*4882a593SmuzhiyunENTRY(cpu_arm946_switch_mm)
37*4882a593Smuzhiyun	ret	lr
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun/*
40*4882a593Smuzhiyun * cpu_arm946_proc_fin()
41*4882a593Smuzhiyun */
42*4882a593SmuzhiyunENTRY(cpu_arm946_proc_fin)
43*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
44*4882a593Smuzhiyun	bic	r0, r0, #0x00001000		@ i-cache
45*4882a593Smuzhiyun	bic	r0, r0, #0x00000004		@ d-cache
46*4882a593Smuzhiyun	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
47*4882a593Smuzhiyun	ret	lr
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun/*
50*4882a593Smuzhiyun * cpu_arm946_reset(loc)
51*4882a593Smuzhiyun * Params  : r0 = address to jump to
52*4882a593Smuzhiyun * Notes   : This sets up everything for a reset
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun	.pushsection	.idmap.text, "ax"
55*4882a593SmuzhiyunENTRY(cpu_arm946_reset)
56*4882a593Smuzhiyun	mov	ip, #0
57*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
58*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
59*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
60*4882a593Smuzhiyun	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
61*4882a593Smuzhiyun	bic	ip, ip, #0x00000005		@ .............c.p
62*4882a593Smuzhiyun	bic	ip, ip, #0x00001000		@ i-cache
63*4882a593Smuzhiyun	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
64*4882a593Smuzhiyun	ret	r0
65*4882a593SmuzhiyunENDPROC(cpu_arm946_reset)
66*4882a593Smuzhiyun	.popsection
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun/*
69*4882a593Smuzhiyun * cpu_arm946_do_idle()
70*4882a593Smuzhiyun */
71*4882a593Smuzhiyun	.align	5
72*4882a593SmuzhiyunENTRY(cpu_arm946_do_idle)
73*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
74*4882a593Smuzhiyun	ret	lr
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun/*
77*4882a593Smuzhiyun *	flush_icache_all()
78*4882a593Smuzhiyun *
79*4882a593Smuzhiyun *	Unconditionally clean and invalidate the entire icache.
80*4882a593Smuzhiyun */
81*4882a593SmuzhiyunENTRY(arm946_flush_icache_all)
82*4882a593Smuzhiyun	mov	r0, #0
83*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
84*4882a593Smuzhiyun	ret	lr
85*4882a593SmuzhiyunENDPROC(arm946_flush_icache_all)
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun/*
88*4882a593Smuzhiyun *	flush_user_cache_all()
89*4882a593Smuzhiyun */
90*4882a593SmuzhiyunENTRY(arm946_flush_user_cache_all)
91*4882a593Smuzhiyun	/* FALLTHROUGH */
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun/*
94*4882a593Smuzhiyun *	flush_kern_cache_all()
95*4882a593Smuzhiyun *
96*4882a593Smuzhiyun *	Clean and invalidate the entire cache.
97*4882a593Smuzhiyun */
98*4882a593SmuzhiyunENTRY(arm946_flush_kern_cache_all)
99*4882a593Smuzhiyun	mov	r2, #VM_EXEC
100*4882a593Smuzhiyun	mov	ip, #0
101*4882a593Smuzhiyun__flush_whole_cache:
102*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
103*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
104*4882a593Smuzhiyun#else
105*4882a593Smuzhiyun	mov	r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments
106*4882a593Smuzhiyun1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries
107*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
108*4882a593Smuzhiyun	subs	r3, r3, #1 << 4
109*4882a593Smuzhiyun	bcs	2b				@ entries n to 0
110*4882a593Smuzhiyun	subs	r1, r1, #1 << 29
111*4882a593Smuzhiyun	bcs	1b				@ segments 3 to 0
112*4882a593Smuzhiyun#endif
113*4882a593Smuzhiyun	tst	r2, #VM_EXEC
114*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 0		@ flush I cache
115*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
116*4882a593Smuzhiyun	ret	lr
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun/*
119*4882a593Smuzhiyun *	flush_user_cache_range(start, end, flags)
120*4882a593Smuzhiyun *
121*4882a593Smuzhiyun *	Clean and invalidate a range of cache entries in the
122*4882a593Smuzhiyun *	specified address range.
123*4882a593Smuzhiyun *
124*4882a593Smuzhiyun *	- start	- start address (inclusive)
125*4882a593Smuzhiyun *	- end	- end address (exclusive)
126*4882a593Smuzhiyun *	- flags	- vm_flags describing address space
127*4882a593Smuzhiyun * (same as arm926)
128*4882a593Smuzhiyun */
129*4882a593SmuzhiyunENTRY(arm946_flush_user_cache_range)
130*4882a593Smuzhiyun	mov	ip, #0
131*4882a593Smuzhiyun	sub	r3, r1, r0			@ calculate total size
132*4882a593Smuzhiyun	cmp	r3, #CACHE_DLIMIT
133*4882a593Smuzhiyun	bhs	__flush_whole_cache
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun1:	tst	r2, #VM_EXEC
136*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
137*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
138*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
139*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
140*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
141*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
142*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
143*4882a593Smuzhiyun#else
144*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
145*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
146*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
147*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
148*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
149*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
150*4882a593Smuzhiyun#endif
151*4882a593Smuzhiyun	cmp	r0, r1
152*4882a593Smuzhiyun	blo	1b
153*4882a593Smuzhiyun	tst	r2, #VM_EXEC
154*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
155*4882a593Smuzhiyun	ret	lr
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun/*
158*4882a593Smuzhiyun *	coherent_kern_range(start, end)
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
161*4882a593Smuzhiyun *	region described by start, end.  If you have non-snooping
162*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
163*4882a593Smuzhiyun *
164*4882a593Smuzhiyun *	- start	- virtual start address
165*4882a593Smuzhiyun *	- end	- virtual end address
166*4882a593Smuzhiyun */
167*4882a593SmuzhiyunENTRY(arm946_coherent_kern_range)
168*4882a593Smuzhiyun	/* FALLTHROUGH */
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun/*
171*4882a593Smuzhiyun *	coherent_user_range(start, end)
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
174*4882a593Smuzhiyun *	region described by start, end.  If you have non-snooping
175*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun *	- start	- virtual start address
178*4882a593Smuzhiyun *	- end	- virtual end address
179*4882a593Smuzhiyun * (same as arm926)
180*4882a593Smuzhiyun */
181*4882a593SmuzhiyunENTRY(arm946_coherent_user_range)
182*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
183*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
184*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
185*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
186*4882a593Smuzhiyun	cmp	r0, r1
187*4882a593Smuzhiyun	blo	1b
188*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
189*4882a593Smuzhiyun	mov	r0, #0
190*4882a593Smuzhiyun	ret	lr
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun/*
193*4882a593Smuzhiyun *	flush_kern_dcache_area(void *addr, size_t size)
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun *	Ensure no D cache aliasing occurs, either with itself or
196*4882a593Smuzhiyun *	the I cache
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun *	- addr	- kernel address
199*4882a593Smuzhiyun *	- size	- region size
200*4882a593Smuzhiyun * (same as arm926)
201*4882a593Smuzhiyun */
202*4882a593SmuzhiyunENTRY(arm946_flush_kern_dcache_area)
203*4882a593Smuzhiyun	add	r1, r0, r1
204*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
205*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
206*4882a593Smuzhiyun	cmp	r0, r1
207*4882a593Smuzhiyun	blo	1b
208*4882a593Smuzhiyun	mov	r0, #0
209*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
210*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
211*4882a593Smuzhiyun	ret	lr
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun/*
214*4882a593Smuzhiyun *	dma_inv_range(start, end)
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun *	Invalidate (discard) the specified virtual address range.
217*4882a593Smuzhiyun *	May not write back any entries.  If 'start' or 'end'
218*4882a593Smuzhiyun *	are not cache line aligned, those lines must be written
219*4882a593Smuzhiyun *	back.
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun *	- start	- virtual start address
222*4882a593Smuzhiyun *	- end	- virtual end address
223*4882a593Smuzhiyun * (same as arm926)
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyunarm946_dma_inv_range:
226*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
227*4882a593Smuzhiyun	tst	r0, #CACHE_DLINESIZE - 1
228*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
229*4882a593Smuzhiyun	tst	r1, #CACHE_DLINESIZE - 1
230*4882a593Smuzhiyun	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
231*4882a593Smuzhiyun#endif
232*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
233*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
234*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
235*4882a593Smuzhiyun	cmp	r0, r1
236*4882a593Smuzhiyun	blo	1b
237*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
238*4882a593Smuzhiyun	ret	lr
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun/*
241*4882a593Smuzhiyun *	dma_clean_range(start, end)
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun *	Clean the specified virtual address range.
244*4882a593Smuzhiyun *
245*4882a593Smuzhiyun *	- start	- virtual start address
246*4882a593Smuzhiyun *	- end	- virtual end address
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun * (same as arm926)
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyunarm946_dma_clean_range:
251*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
252*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
253*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
254*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
255*4882a593Smuzhiyun	cmp	r0, r1
256*4882a593Smuzhiyun	blo	1b
257*4882a593Smuzhiyun#endif
258*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
259*4882a593Smuzhiyun	ret	lr
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun/*
262*4882a593Smuzhiyun *	dma_flush_range(start, end)
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun *	Clean and invalidate the specified virtual address range.
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun *	- start	- virtual start address
267*4882a593Smuzhiyun *	- end	- virtual end address
268*4882a593Smuzhiyun *
269*4882a593Smuzhiyun * (same as arm926)
270*4882a593Smuzhiyun */
271*4882a593SmuzhiyunENTRY(arm946_dma_flush_range)
272*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
273*4882a593Smuzhiyun1:
274*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
275*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
276*4882a593Smuzhiyun#else
277*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
278*4882a593Smuzhiyun#endif
279*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
280*4882a593Smuzhiyun	cmp	r0, r1
281*4882a593Smuzhiyun	blo	1b
282*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
283*4882a593Smuzhiyun	ret	lr
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun/*
286*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
287*4882a593Smuzhiyun *	- start	- kernel virtual start address
288*4882a593Smuzhiyun *	- size	- size of region
289*4882a593Smuzhiyun *	- dir	- DMA direction
290*4882a593Smuzhiyun */
291*4882a593SmuzhiyunENTRY(arm946_dma_map_area)
292*4882a593Smuzhiyun	add	r1, r1, r0
293*4882a593Smuzhiyun	cmp	r2, #DMA_TO_DEVICE
294*4882a593Smuzhiyun	beq	arm946_dma_clean_range
295*4882a593Smuzhiyun	bcs	arm946_dma_inv_range
296*4882a593Smuzhiyun	b	arm946_dma_flush_range
297*4882a593SmuzhiyunENDPROC(arm946_dma_map_area)
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun/*
300*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
301*4882a593Smuzhiyun *	- start	- kernel virtual start address
302*4882a593Smuzhiyun *	- size	- size of region
303*4882a593Smuzhiyun *	- dir	- DMA direction
304*4882a593Smuzhiyun */
305*4882a593SmuzhiyunENTRY(arm946_dma_unmap_area)
306*4882a593Smuzhiyun	ret	lr
307*4882a593SmuzhiyunENDPROC(arm946_dma_unmap_area)
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun	.globl	arm946_flush_kern_cache_louis
310*4882a593Smuzhiyun	.equ	arm946_flush_kern_cache_louis, arm946_flush_kern_cache_all
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
313*4882a593Smuzhiyun	define_cache_functions arm946
314*4882a593Smuzhiyun
315*4882a593SmuzhiyunENTRY(cpu_arm946_dcache_clean_area)
316*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
317*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
318*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
319*4882a593Smuzhiyun	subs	r1, r1, #CACHE_DLINESIZE
320*4882a593Smuzhiyun	bhi	1b
321*4882a593Smuzhiyun#endif
322*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
323*4882a593Smuzhiyun	ret	lr
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun	.type	__arm946_setup, #function
326*4882a593Smuzhiyun__arm946_setup:
327*4882a593Smuzhiyun	mov	r0, #0
328*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
329*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
330*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c3, 0		@ disable memory region 3~7
333*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c4, 0
334*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c5, 0
335*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c6, 0
336*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c7, 0
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun	mov	r0, #0x0000003F			@ base = 0, size = 4GB
339*4882a593Smuzhiyun	mcr	p15, 0, r0, c6,	c0, 0		@ set region 0, default
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
342*4882a593Smuzhiyun	ldr	r7, =CONFIG_DRAM_SIZE		@ size of RAM (must be >= 4KB)
343*4882a593Smuzhiyun	pr_val	r3, r0, r7, #1
344*4882a593Smuzhiyun	mcr	p15, 0, r3, c6, c1, 0
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
347*4882a593Smuzhiyun	ldr	r7, =CONFIG_FLASH_SIZE		@ size of FLASH (must be >= 4KB)
348*4882a593Smuzhiyun	pr_val	r3, r0, r7, #1
349*4882a593Smuzhiyun	mcr	p15, 0, r3, c6, c2, 0
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun	mov	r0, #0x06
352*4882a593Smuzhiyun	mcr	p15, 0, r0, c2, c0, 0		@ region 1,2 d-cacheable
353*4882a593Smuzhiyun	mcr	p15, 0, r0, c2, c0, 1		@ region 1,2 i-cacheable
354*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
355*4882a593Smuzhiyun	mov	r0, #0x00			@ disable whole write buffer
356*4882a593Smuzhiyun#else
357*4882a593Smuzhiyun	mov	r0, #0x02			@ region 1 write bufferred
358*4882a593Smuzhiyun#endif
359*4882a593Smuzhiyun	mcr	p15, 0, r0, c3, c0, 0
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun/*
362*4882a593Smuzhiyun *  Access Permission Settings for future permission control by PU.
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun *				priv.	user
365*4882a593Smuzhiyun * 	region 0 (whole)	rw	--	: b0001
366*4882a593Smuzhiyun * 	region 1 (RAM)		rw	rw	: b0011
367*4882a593Smuzhiyun * 	region 2 (FLASH)	rw	r-	: b0010
368*4882a593Smuzhiyun *	region 3~7 (none)	--	--	: b0000
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun	mov	r0, #0x00000031
371*4882a593Smuzhiyun	orr	r0, r0, #0x00000200
372*4882a593Smuzhiyun	mcr	p15, 0, r0, c5, c0, 2		@ set data access permission
373*4882a593Smuzhiyun	mcr	p15, 0, r0, c5, c0, 3		@ set inst. access permission
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0		@ get control register
376*4882a593Smuzhiyun	orr	r0, r0, #0x00001000		@ I-cache
377*4882a593Smuzhiyun	orr	r0, r0, #0x00000005		@ MPU/D-cache
378*4882a593Smuzhiyun#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
379*4882a593Smuzhiyun	orr	r0, r0, #0x00004000		@ .1.. .... .... ....
380*4882a593Smuzhiyun#endif
381*4882a593Smuzhiyun	ret	lr
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun	.size	__arm946_setup, . - __arm946_setup
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun	__INITDATA
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
388*4882a593Smuzhiyun	define_processor_functions arm946, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun	.section ".rodata"
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun	string	cpu_arch_name, "armv5te"
393*4882a593Smuzhiyun	string	cpu_elf_name, "v5t"
394*4882a593Smuzhiyun	string	cpu_arm946_name, "ARM946E-S"
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun	.align
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun	.section ".proc.info.init", "a"
399*4882a593Smuzhiyun	.type	__arm946_proc_info,#object
400*4882a593Smuzhiyun__arm946_proc_info:
401*4882a593Smuzhiyun	.long	0x41009460
402*4882a593Smuzhiyun	.long	0xff00fff0
403*4882a593Smuzhiyun	.long	0
404*4882a593Smuzhiyun	.long	0
405*4882a593Smuzhiyun	initfn	__arm946_setup, __arm946_proc_info
406*4882a593Smuzhiyun	.long	cpu_arch_name
407*4882a593Smuzhiyun	.long	cpu_elf_name
408*4882a593Smuzhiyun	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
409*4882a593Smuzhiyun	.long	cpu_arm946_name
410*4882a593Smuzhiyun	.long	arm946_processor_functions
411*4882a593Smuzhiyun	.long	0
412*4882a593Smuzhiyun	.long	0
413*4882a593Smuzhiyun	.long	arm946_cache_fns
414*4882a593Smuzhiyun	.size	__arm946_proc_info, . - __arm946_proc_info
415*4882a593Smuzhiyun
416