xref: /OK3568_Linux_fs/kernel/arch/arm/mm/cache-fa.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/cache-fa.S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Copyright (C) 2005 Faraday Corp.
6*4882a593Smuzhiyun *  Copyright (C) 2008-2009 Paulius Zaleckas <paulius.zaleckas@teltonika.lt>
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun * Based on cache-v4wb.S:
9*4882a593Smuzhiyun *  Copyright (C) 1997-2002 Russell king
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun *  Processors: FA520 FA526 FA626
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun#include <linux/linkage.h>
14*4882a593Smuzhiyun#include <linux/init.h>
15*4882a593Smuzhiyun#include <asm/assembler.h>
16*4882a593Smuzhiyun#include <asm/memory.h>
17*4882a593Smuzhiyun#include <asm/page.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun#include "proc-macros.S"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun/*
22*4882a593Smuzhiyun * The size of one data cache line.
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun#define CACHE_DLINESIZE	16
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun/*
27*4882a593Smuzhiyun * The total size of the data cache.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun#ifdef CONFIG_ARCH_GEMINI
30*4882a593Smuzhiyun#define CACHE_DSIZE	8192
31*4882a593Smuzhiyun#else
32*4882a593Smuzhiyun#define CACHE_DSIZE	16384
33*4882a593Smuzhiyun#endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun/* FIXME: put optimal value here. Current one is just estimation */
36*4882a593Smuzhiyun#define CACHE_DLIMIT	(CACHE_DSIZE * 2)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun/*
39*4882a593Smuzhiyun *	flush_icache_all()
40*4882a593Smuzhiyun *
41*4882a593Smuzhiyun *	Unconditionally clean and invalidate the entire icache.
42*4882a593Smuzhiyun */
43*4882a593SmuzhiyunENTRY(fa_flush_icache_all)
44*4882a593Smuzhiyun	mov	r0, #0
45*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
46*4882a593Smuzhiyun	ret	lr
47*4882a593SmuzhiyunENDPROC(fa_flush_icache_all)
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun/*
50*4882a593Smuzhiyun *	flush_user_cache_all()
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun *	Clean and invalidate all cache entries in a particular address
53*4882a593Smuzhiyun *	space.
54*4882a593Smuzhiyun */
55*4882a593SmuzhiyunENTRY(fa_flush_user_cache_all)
56*4882a593Smuzhiyun	/* FALLTHROUGH */
57*4882a593Smuzhiyun/*
58*4882a593Smuzhiyun *	flush_kern_cache_all()
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun *	Clean and invalidate the entire cache.
61*4882a593Smuzhiyun */
62*4882a593SmuzhiyunENTRY(fa_flush_kern_cache_all)
63*4882a593Smuzhiyun	mov	ip, #0
64*4882a593Smuzhiyun	mov	r2, #VM_EXEC
65*4882a593Smuzhiyun__flush_whole_cache:
66*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c14, 0		@ clean/invalidate D cache
67*4882a593Smuzhiyun	tst	r2, #VM_EXEC
68*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
69*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 6		@ invalidate BTB
70*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ drain write buffer
71*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 4		@ prefetch flush
72*4882a593Smuzhiyun	ret	lr
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun/*
75*4882a593Smuzhiyun *	flush_user_cache_range(start, end, flags)
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun *	Invalidate a range of cache entries in the specified
78*4882a593Smuzhiyun *	address space.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun *	- start - start address (inclusive, page aligned)
81*4882a593Smuzhiyun *	- end	- end address (exclusive, page aligned)
82*4882a593Smuzhiyun *	- flags	- vma_area_struct flags describing address space
83*4882a593Smuzhiyun */
84*4882a593SmuzhiyunENTRY(fa_flush_user_cache_range)
85*4882a593Smuzhiyun	mov	ip, #0
86*4882a593Smuzhiyun	sub	r3, r1, r0			@ calculate total size
87*4882a593Smuzhiyun	cmp	r3, #CACHE_DLIMIT		@ total size >= limit?
88*4882a593Smuzhiyun	bhs	__flush_whole_cache		@ flush whole D cache
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun1:	tst	r2, #VM_EXEC
91*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I line
92*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
93*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
94*4882a593Smuzhiyun	cmp	r0, r1
95*4882a593Smuzhiyun	blo	1b
96*4882a593Smuzhiyun	tst	r2, #VM_EXEC
97*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 6		@ invalidate BTB
98*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c10, 4		@ data write barrier
99*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 4		@ prefetch flush
100*4882a593Smuzhiyun	ret	lr
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun/*
103*4882a593Smuzhiyun *	coherent_kern_range(start, end)
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
106*4882a593Smuzhiyun *	region described by start.  If you have non-snooping
107*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun *	- start  - virtual start address
110*4882a593Smuzhiyun *	- end	 - virtual end address
111*4882a593Smuzhiyun */
112*4882a593SmuzhiyunENTRY(fa_coherent_kern_range)
113*4882a593Smuzhiyun	/* fall through */
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun/*
116*4882a593Smuzhiyun *	coherent_user_range(start, end)
117*4882a593Smuzhiyun *
118*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
119*4882a593Smuzhiyun *	region described by start.  If you have non-snooping
120*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
121*4882a593Smuzhiyun *
122*4882a593Smuzhiyun *	- start  - virtual start address
123*4882a593Smuzhiyun *	- end	 - virtual end address
124*4882a593Smuzhiyun */
125*4882a593SmuzhiyunENTRY(fa_coherent_user_range)
126*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
127*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
128*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
129*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
130*4882a593Smuzhiyun	cmp	r0, r1
131*4882a593Smuzhiyun	blo	1b
132*4882a593Smuzhiyun	mov	r0, #0
133*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 6		@ invalidate BTB
134*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
135*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 4		@ prefetch flush
136*4882a593Smuzhiyun	ret	lr
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun/*
139*4882a593Smuzhiyun *	flush_kern_dcache_area(void *addr, size_t size)
140*4882a593Smuzhiyun *
141*4882a593Smuzhiyun *	Ensure that the data held in the page kaddr is written back
142*4882a593Smuzhiyun *	to the page in question.
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun *	- addr	- kernel address
145*4882a593Smuzhiyun *	- size	- size of region
146*4882a593Smuzhiyun */
147*4882a593SmuzhiyunENTRY(fa_flush_kern_dcache_area)
148*4882a593Smuzhiyun	add	r1, r0, r1
149*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D line
150*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
151*4882a593Smuzhiyun	cmp	r0, r1
152*4882a593Smuzhiyun	blo	1b
153*4882a593Smuzhiyun	mov	r0, #0
154*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
155*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
156*4882a593Smuzhiyun	ret	lr
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun/*
159*4882a593Smuzhiyun *	dma_inv_range(start, end)
160*4882a593Smuzhiyun *
161*4882a593Smuzhiyun *	Invalidate (discard) the specified virtual address range.
162*4882a593Smuzhiyun *	May not write back any entries.  If 'start' or 'end'
163*4882a593Smuzhiyun *	are not cache line aligned, those lines must be written
164*4882a593Smuzhiyun *	back.
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun *	- start  - virtual start address
167*4882a593Smuzhiyun *	- end	 - virtual end address
168*4882a593Smuzhiyun */
169*4882a593Smuzhiyunfa_dma_inv_range:
170*4882a593Smuzhiyun	tst	r0, #CACHE_DLINESIZE - 1
171*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
172*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c14, 1		@ clean & invalidate D entry
173*4882a593Smuzhiyun	tst	r1, #CACHE_DLINESIZE - 1
174*4882a593Smuzhiyun	bic	r1, r1, #CACHE_DLINESIZE - 1
175*4882a593Smuzhiyun	mcrne	p15, 0, r1, c7, c14, 1		@ clean & invalidate D entry
176*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
177*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
178*4882a593Smuzhiyun	cmp	r0, r1
179*4882a593Smuzhiyun	blo	1b
180*4882a593Smuzhiyun	mov	r0, #0
181*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
182*4882a593Smuzhiyun	ret	lr
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun/*
185*4882a593Smuzhiyun *	dma_clean_range(start, end)
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun *	Clean (write back) the specified virtual address range.
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun *	- start  - virtual start address
190*4882a593Smuzhiyun *	- end	 - virtual end address
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyunfa_dma_clean_range:
193*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
194*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
195*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
196*4882a593Smuzhiyun	cmp	r0, r1
197*4882a593Smuzhiyun	blo	1b
198*4882a593Smuzhiyun	mov	r0, #0
199*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
200*4882a593Smuzhiyun	ret	lr
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun/*
203*4882a593Smuzhiyun *	dma_flush_range(start,end)
204*4882a593Smuzhiyun *	- start   - virtual start address of region
205*4882a593Smuzhiyun *	- end     - virtual end address of region
206*4882a593Smuzhiyun */
207*4882a593SmuzhiyunENTRY(fa_dma_flush_range)
208*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
209*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c14, 1		@ clean & invalidate D entry
210*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
211*4882a593Smuzhiyun	cmp	r0, r1
212*4882a593Smuzhiyun	blo	1b
213*4882a593Smuzhiyun	mov	r0, #0
214*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
215*4882a593Smuzhiyun	ret	lr
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun/*
218*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
219*4882a593Smuzhiyun *	- start	- kernel virtual start address
220*4882a593Smuzhiyun *	- size	- size of region
221*4882a593Smuzhiyun *	- dir	- DMA direction
222*4882a593Smuzhiyun */
223*4882a593SmuzhiyunENTRY(fa_dma_map_area)
224*4882a593Smuzhiyun	add	r1, r1, r0
225*4882a593Smuzhiyun	cmp	r2, #DMA_TO_DEVICE
226*4882a593Smuzhiyun	beq	fa_dma_clean_range
227*4882a593Smuzhiyun	bcs	fa_dma_inv_range
228*4882a593Smuzhiyun	b	fa_dma_flush_range
229*4882a593SmuzhiyunENDPROC(fa_dma_map_area)
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun/*
232*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
233*4882a593Smuzhiyun *	- start	- kernel virtual start address
234*4882a593Smuzhiyun *	- size	- size of region
235*4882a593Smuzhiyun *	- dir	- DMA direction
236*4882a593Smuzhiyun */
237*4882a593SmuzhiyunENTRY(fa_dma_unmap_area)
238*4882a593Smuzhiyun	ret	lr
239*4882a593SmuzhiyunENDPROC(fa_dma_unmap_area)
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun	.globl	fa_flush_kern_cache_louis
242*4882a593Smuzhiyun	.equ	fa_flush_kern_cache_louis, fa_flush_kern_cache_all
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun	__INITDATA
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
247*4882a593Smuzhiyun	define_cache_functions fa
248