xref: /OK3568_Linux_fs/kernel/arch/arm/mm/proc-arm940.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/arm940.S: utility functions for ARM940T
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun#include <linux/linkage.h>
8*4882a593Smuzhiyun#include <linux/init.h>
9*4882a593Smuzhiyun#include <linux/pgtable.h>
10*4882a593Smuzhiyun#include <asm/assembler.h>
11*4882a593Smuzhiyun#include <asm/hwcap.h>
12*4882a593Smuzhiyun#include <asm/pgtable-hwdef.h>
13*4882a593Smuzhiyun#include <asm/ptrace.h>
14*4882a593Smuzhiyun#include "proc-macros.S"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
17*4882a593Smuzhiyun#define CACHE_DLINESIZE	16
18*4882a593Smuzhiyun#define CACHE_DSEGMENTS	4
19*4882a593Smuzhiyun#define CACHE_DENTRIES	64
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun	.text
22*4882a593Smuzhiyun/*
23*4882a593Smuzhiyun * cpu_arm940_proc_init()
24*4882a593Smuzhiyun * cpu_arm940_switch_mm()
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * These are not required.
27*4882a593Smuzhiyun */
28*4882a593SmuzhiyunENTRY(cpu_arm940_proc_init)
29*4882a593SmuzhiyunENTRY(cpu_arm940_switch_mm)
30*4882a593Smuzhiyun	ret	lr
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun/*
33*4882a593Smuzhiyun * cpu_arm940_proc_fin()
34*4882a593Smuzhiyun */
35*4882a593SmuzhiyunENTRY(cpu_arm940_proc_fin)
36*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
37*4882a593Smuzhiyun	bic	r0, r0, #0x00001000		@ i-cache
38*4882a593Smuzhiyun	bic	r0, r0, #0x00000004		@ d-cache
39*4882a593Smuzhiyun	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
40*4882a593Smuzhiyun	ret	lr
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun/*
43*4882a593Smuzhiyun * cpu_arm940_reset(loc)
44*4882a593Smuzhiyun * Params  : r0 = address to jump to
45*4882a593Smuzhiyun * Notes   : This sets up everything for a reset
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun	.pushsection	.idmap.text, "ax"
48*4882a593SmuzhiyunENTRY(cpu_arm940_reset)
49*4882a593Smuzhiyun	mov	ip, #0
50*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c5, 0		@ flush I cache
51*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
52*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
53*4882a593Smuzhiyun	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
54*4882a593Smuzhiyun	bic	ip, ip, #0x00000005		@ .............c.p
55*4882a593Smuzhiyun	bic	ip, ip, #0x00001000		@ i-cache
56*4882a593Smuzhiyun	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
57*4882a593Smuzhiyun	ret	r0
58*4882a593SmuzhiyunENDPROC(cpu_arm940_reset)
59*4882a593Smuzhiyun	.popsection
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun/*
62*4882a593Smuzhiyun * cpu_arm940_do_idle()
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun	.align	5
65*4882a593SmuzhiyunENTRY(cpu_arm940_do_idle)
66*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
67*4882a593Smuzhiyun	ret	lr
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun/*
70*4882a593Smuzhiyun *	flush_icache_all()
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun *	Unconditionally clean and invalidate the entire icache.
73*4882a593Smuzhiyun */
74*4882a593SmuzhiyunENTRY(arm940_flush_icache_all)
75*4882a593Smuzhiyun	mov	r0, #0
76*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
77*4882a593Smuzhiyun	ret	lr
78*4882a593SmuzhiyunENDPROC(arm940_flush_icache_all)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun/*
81*4882a593Smuzhiyun *	flush_user_cache_all()
82*4882a593Smuzhiyun */
83*4882a593SmuzhiyunENTRY(arm940_flush_user_cache_all)
84*4882a593Smuzhiyun	/* FALLTHROUGH */
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun/*
87*4882a593Smuzhiyun *	flush_kern_cache_all()
88*4882a593Smuzhiyun *
89*4882a593Smuzhiyun *	Clean and invalidate the entire cache.
90*4882a593Smuzhiyun */
91*4882a593SmuzhiyunENTRY(arm940_flush_kern_cache_all)
92*4882a593Smuzhiyun	mov	r2, #VM_EXEC
93*4882a593Smuzhiyun	/* FALLTHROUGH */
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun/*
96*4882a593Smuzhiyun *	flush_user_cache_range(start, end, flags)
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun *	There is no efficient way to flush a range of cache entries
99*4882a593Smuzhiyun *	in the specified address range. Thus, flushes all.
100*4882a593Smuzhiyun *
101*4882a593Smuzhiyun *	- start	- start address (inclusive)
102*4882a593Smuzhiyun *	- end	- end address (exclusive)
103*4882a593Smuzhiyun *	- flags	- vm_flags describing address space
104*4882a593Smuzhiyun */
105*4882a593SmuzhiyunENTRY(arm940_flush_user_cache_range)
106*4882a593Smuzhiyun	mov	ip, #0
107*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
108*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ flush D cache
109*4882a593Smuzhiyun#else
110*4882a593Smuzhiyun	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
111*4882a593Smuzhiyun1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
112*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
113*4882a593Smuzhiyun	subs	r3, r3, #1 << 26
114*4882a593Smuzhiyun	bcs	2b				@ entries 63 to 0
115*4882a593Smuzhiyun	subs	r1, r1, #1 << 4
116*4882a593Smuzhiyun	bcs	1b				@ segments 3 to 0
117*4882a593Smuzhiyun#endif
118*4882a593Smuzhiyun	tst	r2, #VM_EXEC
119*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
120*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
121*4882a593Smuzhiyun	ret	lr
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun/*
124*4882a593Smuzhiyun *	coherent_kern_range(start, end)
125*4882a593Smuzhiyun *
126*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
127*4882a593Smuzhiyun *	region described by start, end.  If you have non-snooping
128*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
129*4882a593Smuzhiyun *
130*4882a593Smuzhiyun *	- start	- virtual start address
131*4882a593Smuzhiyun *	- end	- virtual end address
132*4882a593Smuzhiyun */
133*4882a593SmuzhiyunENTRY(arm940_coherent_kern_range)
134*4882a593Smuzhiyun	/* FALLTHROUGH */
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun/*
137*4882a593Smuzhiyun *	coherent_user_range(start, end)
138*4882a593Smuzhiyun *
139*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
140*4882a593Smuzhiyun *	region described by start, end.  If you have non-snooping
141*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun *	- start	- virtual start address
144*4882a593Smuzhiyun *	- end	- virtual end address
145*4882a593Smuzhiyun */
146*4882a593SmuzhiyunENTRY(arm940_coherent_user_range)
147*4882a593Smuzhiyun	/* FALLTHROUGH */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun/*
150*4882a593Smuzhiyun *	flush_kern_dcache_area(void *addr, size_t size)
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun *	Ensure no D cache aliasing occurs, either with itself or
153*4882a593Smuzhiyun *	the I cache
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun *	- addr	- kernel address
156*4882a593Smuzhiyun *	- size	- region size
157*4882a593Smuzhiyun */
158*4882a593SmuzhiyunENTRY(arm940_flush_kern_dcache_area)
159*4882a593Smuzhiyun	mov	r0, #0
160*4882a593Smuzhiyun	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
161*4882a593Smuzhiyun1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
162*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D index
163*4882a593Smuzhiyun	subs	r3, r3, #1 << 26
164*4882a593Smuzhiyun	bcs	2b				@ entries 63 to 0
165*4882a593Smuzhiyun	subs	r1, r1, #1 << 4
166*4882a593Smuzhiyun	bcs	1b				@ segments 7 to 0
167*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
168*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
169*4882a593Smuzhiyun	ret	lr
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun/*
172*4882a593Smuzhiyun *	dma_inv_range(start, end)
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun *	There is no efficient way to invalidate a specifid virtual
175*4882a593Smuzhiyun *	address range. Thus, invalidates all.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun *	- start	- virtual start address
178*4882a593Smuzhiyun *	- end	- virtual end address
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyunarm940_dma_inv_range:
181*4882a593Smuzhiyun	mov	ip, #0
182*4882a593Smuzhiyun	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
183*4882a593Smuzhiyun1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
184*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c6, 2		@ flush D entry
185*4882a593Smuzhiyun	subs	r3, r3, #1 << 26
186*4882a593Smuzhiyun	bcs	2b				@ entries 63 to 0
187*4882a593Smuzhiyun	subs	r1, r1, #1 << 4
188*4882a593Smuzhiyun	bcs	1b				@ segments 7 to 0
189*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
190*4882a593Smuzhiyun	ret	lr
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun/*
193*4882a593Smuzhiyun *	dma_clean_range(start, end)
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun *	There is no efficient way to clean a specifid virtual
196*4882a593Smuzhiyun *	address range. Thus, cleans all.
197*4882a593Smuzhiyun *
198*4882a593Smuzhiyun *	- start	- virtual start address
199*4882a593Smuzhiyun *	- end	- virtual end address
200*4882a593Smuzhiyun */
201*4882a593Smuzhiyunarm940_dma_clean_range:
202*4882a593SmuzhiyunENTRY(cpu_arm940_dcache_clean_area)
203*4882a593Smuzhiyun	mov	ip, #0
204*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
205*4882a593Smuzhiyun	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
206*4882a593Smuzhiyun1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
207*4882a593Smuzhiyun2:	mcr	p15, 0, r3, c7, c10, 2		@ clean D entry
208*4882a593Smuzhiyun	subs	r3, r3, #1 << 26
209*4882a593Smuzhiyun	bcs	2b				@ entries 63 to 0
210*4882a593Smuzhiyun	subs	r1, r1, #1 << 4
211*4882a593Smuzhiyun	bcs	1b				@ segments 7 to 0
212*4882a593Smuzhiyun#endif
213*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
214*4882a593Smuzhiyun	ret	lr
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun/*
217*4882a593Smuzhiyun *	dma_flush_range(start, end)
218*4882a593Smuzhiyun *
219*4882a593Smuzhiyun *	There is no efficient way to clean and invalidate a specifid
220*4882a593Smuzhiyun *	virtual address range.
221*4882a593Smuzhiyun *
222*4882a593Smuzhiyun *	- start	- virtual start address
223*4882a593Smuzhiyun *	- end	- virtual end address
224*4882a593Smuzhiyun */
225*4882a593SmuzhiyunENTRY(arm940_dma_flush_range)
226*4882a593Smuzhiyun	mov	ip, #0
227*4882a593Smuzhiyun	mov	r1, #(CACHE_DSEGMENTS - 1) << 4	@ 4 segments
228*4882a593Smuzhiyun1:	orr	r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
229*4882a593Smuzhiyun2:
230*4882a593Smuzhiyun#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
231*4882a593Smuzhiyun	mcr	p15, 0, r3, c7, c14, 2		@ clean/flush D entry
232*4882a593Smuzhiyun#else
233*4882a593Smuzhiyun	mcr	p15, 0, r3, c7, c6, 2		@ invalidate D entry
234*4882a593Smuzhiyun#endif
235*4882a593Smuzhiyun	subs	r3, r3, #1 << 26
236*4882a593Smuzhiyun	bcs	2b				@ entries 63 to 0
237*4882a593Smuzhiyun	subs	r1, r1, #1 << 4
238*4882a593Smuzhiyun	bcs	1b				@ segments 7 to 0
239*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
240*4882a593Smuzhiyun	ret	lr
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun/*
243*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
244*4882a593Smuzhiyun *	- start	- kernel virtual start address
245*4882a593Smuzhiyun *	- size	- size of region
246*4882a593Smuzhiyun *	- dir	- DMA direction
247*4882a593Smuzhiyun */
248*4882a593SmuzhiyunENTRY(arm940_dma_map_area)
249*4882a593Smuzhiyun	add	r1, r1, r0
250*4882a593Smuzhiyun	cmp	r2, #DMA_TO_DEVICE
251*4882a593Smuzhiyun	beq	arm940_dma_clean_range
252*4882a593Smuzhiyun	bcs	arm940_dma_inv_range
253*4882a593Smuzhiyun	b	arm940_dma_flush_range
254*4882a593SmuzhiyunENDPROC(arm940_dma_map_area)
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun/*
257*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
258*4882a593Smuzhiyun *	- start	- kernel virtual start address
259*4882a593Smuzhiyun *	- size	- size of region
260*4882a593Smuzhiyun *	- dir	- DMA direction
261*4882a593Smuzhiyun */
262*4882a593SmuzhiyunENTRY(arm940_dma_unmap_area)
263*4882a593Smuzhiyun	ret	lr
264*4882a593SmuzhiyunENDPROC(arm940_dma_unmap_area)
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun	.globl	arm940_flush_kern_cache_louis
267*4882a593Smuzhiyun	.equ	arm940_flush_kern_cache_louis, arm940_flush_kern_cache_all
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
270*4882a593Smuzhiyun	define_cache_functions arm940
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun	.type	__arm940_setup, #function
273*4882a593Smuzhiyun__arm940_setup:
274*4882a593Smuzhiyun	mov	r0, #0
275*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
276*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c6, 0		@ invalidate D cache
277*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c3, 0		@ disable data area 3~7
280*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c4, 0
281*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c5, 0
282*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c6, 0
283*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c7, 0
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c3, 1		@ disable instruction area 3~7
286*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c4, 1
287*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c5, 1
288*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c6, 1
289*4882a593Smuzhiyun	mcr	p15, 0, r0, c6, c7, 1
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun	mov	r0, #0x0000003F			@ base = 0, size = 4GB
292*4882a593Smuzhiyun	mcr	p15, 0, r0, c6,	c0, 0		@ set area 0, default
293*4882a593Smuzhiyun	mcr	p15, 0, r0, c6,	c0, 1
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun	ldr	r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
296*4882a593Smuzhiyun	ldr	r7, =CONFIG_DRAM_SIZE >> 12	@ size of RAM (must be >= 4KB)
297*4882a593Smuzhiyun	pr_val	r3, r0, r7, #1
298*4882a593Smuzhiyun	mcr	p15, 0, r3, c6,	c1, 0		@ set area 1, RAM
299*4882a593Smuzhiyun	mcr	p15, 0, r3, c6,	c1, 1
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun	ldr	r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
302*4882a593Smuzhiyun	ldr	r7, =CONFIG_FLASH_SIZE		@ size of FLASH (must be >= 4KB)
303*4882a593Smuzhiyun	pr_val	r3, r0, r6, #1
304*4882a593Smuzhiyun	mcr	p15, 0, r3, c6,	c2, 0		@ set area 2, ROM/FLASH
305*4882a593Smuzhiyun	mcr	p15, 0, r3, c6,	c2, 1
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun	mov	r0, #0x06
308*4882a593Smuzhiyun	mcr	p15, 0, r0, c2, c0, 0		@ Region 1&2 cacheable
309*4882a593Smuzhiyun	mcr	p15, 0, r0, c2, c0, 1
310*4882a593Smuzhiyun#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
311*4882a593Smuzhiyun	mov	r0, #0x00			@ disable whole write buffer
312*4882a593Smuzhiyun#else
313*4882a593Smuzhiyun	mov	r0, #0x02			@ Region 1 write bufferred
314*4882a593Smuzhiyun#endif
315*4882a593Smuzhiyun	mcr	p15, 0, r0, c3, c0, 0
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun	mov	r0, #0x10000
318*4882a593Smuzhiyun	sub	r0, r0, #1			@ r0 = 0xffff
319*4882a593Smuzhiyun	mcr	p15, 0, r0, c5, c0, 0		@ all read/write access
320*4882a593Smuzhiyun	mcr	p15, 0, r0, c5, c0, 1
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun	mrc	p15, 0, r0, c1, c0		@ get control register
323*4882a593Smuzhiyun	orr	r0, r0, #0x00001000		@ I-cache
324*4882a593Smuzhiyun	orr	r0, r0, #0x00000005		@ MPU/D-cache
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun	ret	lr
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun	.size	__arm940_setup, . - __arm940_setup
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun	__INITDATA
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
333*4882a593Smuzhiyun	define_processor_functions arm940, dabort=nommu_early_abort, pabort=legacy_pabort, nommu=1
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun	.section ".rodata"
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun	string	cpu_arch_name, "armv4t"
338*4882a593Smuzhiyun	string	cpu_elf_name, "v4"
339*4882a593Smuzhiyun	string	cpu_arm940_name, "ARM940T"
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun	.align
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun	.section ".proc.info.init", "a"
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun	.type	__arm940_proc_info,#object
346*4882a593Smuzhiyun__arm940_proc_info:
347*4882a593Smuzhiyun	.long	0x41009400
348*4882a593Smuzhiyun	.long	0xff00fff0
349*4882a593Smuzhiyun	.long	0
350*4882a593Smuzhiyun	initfn	__arm940_setup, __arm940_proc_info
351*4882a593Smuzhiyun	.long	cpu_arch_name
352*4882a593Smuzhiyun	.long	cpu_elf_name
353*4882a593Smuzhiyun	.long	HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
354*4882a593Smuzhiyun	.long	cpu_arm940_name
355*4882a593Smuzhiyun	.long	arm940_processor_functions
356*4882a593Smuzhiyun	.long	0
357*4882a593Smuzhiyun	.long	0
358*4882a593Smuzhiyun	.long	arm940_cache_fns
359*4882a593Smuzhiyun	.size	__arm940_proc_info, . - __arm940_proc_info
360*4882a593Smuzhiyun
361