xref: /OK3568_Linux_fs/kernel/arch/arm/mm/cache-v7m.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/cache-v7m.S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Based on linux/arch/arm/mm/cache-v7.S
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun *  Copyright (C) 2001 Deep Blue Solutions Ltd.
8*4882a593Smuzhiyun *  Copyright (C) 2005 ARM Ltd.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun *  This is the "shell" of the ARMv7M processor support.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun#include <linux/linkage.h>
13*4882a593Smuzhiyun#include <linux/init.h>
14*4882a593Smuzhiyun#include <asm/assembler.h>
15*4882a593Smuzhiyun#include <asm/errno.h>
16*4882a593Smuzhiyun#include <asm/unwind.h>
17*4882a593Smuzhiyun#include <asm/v7m.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun#include "proc-macros.S"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun/* Generic V7M read/write macros for memory mapped cache operations */
22*4882a593Smuzhiyun.macro v7m_cache_read, rt, reg
23*4882a593Smuzhiyun	movw	\rt, #:lower16:BASEADDR_V7M_SCB + \reg
24*4882a593Smuzhiyun	movt	\rt, #:upper16:BASEADDR_V7M_SCB + \reg
25*4882a593Smuzhiyun	ldr     \rt, [\rt]
26*4882a593Smuzhiyun.endm
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun.macro v7m_cacheop, rt, tmp, op, c = al
29*4882a593Smuzhiyun	movw\c	\tmp, #:lower16:BASEADDR_V7M_SCB + \op
30*4882a593Smuzhiyun	movt\c	\tmp, #:upper16:BASEADDR_V7M_SCB + \op
31*4882a593Smuzhiyun	str\c	\rt, [\tmp]
32*4882a593Smuzhiyun.endm
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun.macro	read_ccsidr, rt
36*4882a593Smuzhiyun	v7m_cache_read \rt, V7M_SCB_CCSIDR
37*4882a593Smuzhiyun.endm
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun.macro read_clidr, rt
40*4882a593Smuzhiyun	v7m_cache_read \rt, V7M_SCB_CLIDR
41*4882a593Smuzhiyun.endm
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun.macro	write_csselr, rt, tmp
44*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_CSSELR
45*4882a593Smuzhiyun.endm
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun/*
48*4882a593Smuzhiyun * dcisw: Invalidate data cache by set/way
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun.macro dcisw, rt, tmp
51*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_DCISW
52*4882a593Smuzhiyun.endm
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun/*
55*4882a593Smuzhiyun * dccisw: Clean and invalidate data cache by set/way
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun.macro dccisw, rt, tmp
58*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_DCCISW
59*4882a593Smuzhiyun.endm
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun/*
62*4882a593Smuzhiyun * dccimvac: Clean and invalidate data cache line by MVA to PoC.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
65*4882a593Smuzhiyun.macro dccimvac\c, rt, tmp
66*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_DCCIMVAC, \c
67*4882a593Smuzhiyun.endm
68*4882a593Smuzhiyun.endr
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun/*
71*4882a593Smuzhiyun * dcimvac: Invalidate data cache line by MVA to PoC
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun.irp    c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo
74*4882a593Smuzhiyun.macro dcimvac\c, rt, tmp
75*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c
76*4882a593Smuzhiyun.endm
77*4882a593Smuzhiyun.endr
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun/*
80*4882a593Smuzhiyun * dccmvau: Clean data cache line by MVA to PoU
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun.macro dccmvau, rt, tmp
83*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAU
84*4882a593Smuzhiyun.endm
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun/*
87*4882a593Smuzhiyun * dccmvac: Clean data cache line by MVA to PoC
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun.macro dccmvac,  rt, tmp
90*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_DCCMVAC
91*4882a593Smuzhiyun.endm
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun/*
94*4882a593Smuzhiyun * icimvau: Invalidate instruction caches by MVA to PoU
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun.macro icimvau, rt, tmp
97*4882a593Smuzhiyun	v7m_cacheop \rt, \tmp, V7M_SCB_ICIMVAU
98*4882a593Smuzhiyun.endm
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun/*
101*4882a593Smuzhiyun * Invalidate the icache, inner shareable if SMP, invalidate BTB for UP.
102*4882a593Smuzhiyun * rt data ignored by ICIALLU(IS), so can be used for the address
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun.macro invalidate_icache, rt
105*4882a593Smuzhiyun	v7m_cacheop \rt, \rt, V7M_SCB_ICIALLU
106*4882a593Smuzhiyun	mov \rt, #0
107*4882a593Smuzhiyun.endm
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun/*
110*4882a593Smuzhiyun * Invalidate the BTB, inner shareable if SMP.
111*4882a593Smuzhiyun * rt data ignored by BPIALL, so it can be used for the address
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun.macro invalidate_bp, rt
114*4882a593Smuzhiyun	v7m_cacheop \rt, \rt, V7M_SCB_BPIALL
115*4882a593Smuzhiyun	mov \rt, #0
116*4882a593Smuzhiyun.endm
117*4882a593Smuzhiyun
118*4882a593SmuzhiyunENTRY(v7m_invalidate_l1)
119*4882a593Smuzhiyun	mov	r0, #0
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun	write_csselr r0, r1
122*4882a593Smuzhiyun	read_ccsidr r0
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun	movw	r1, #0x7fff
125*4882a593Smuzhiyun	and	r2, r1, r0, lsr #13
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun	movw	r1, #0x3ff
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun	and	r3, r1, r0, lsr #3      @ NumWays - 1
130*4882a593Smuzhiyun	add	r2, r2, #1              @ NumSets
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun	and	r0, r0, #0x7
133*4882a593Smuzhiyun	add	r0, r0, #4      @ SetShift
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun	clz	r1, r3          @ WayShift
136*4882a593Smuzhiyun	add	r4, r3, #1      @ NumWays
137*4882a593Smuzhiyun1:	sub	r2, r2, #1      @ NumSets--
138*4882a593Smuzhiyun	mov	r3, r4          @ Temp = NumWays
139*4882a593Smuzhiyun2:	subs	r3, r3, #1      @ Temp--
140*4882a593Smuzhiyun	mov	r5, r3, lsl r1
141*4882a593Smuzhiyun	mov	r6, r2, lsl r0
142*4882a593Smuzhiyun	orr	r5, r5, r6      @ Reg = (Temp<<WayShift)|(NumSets<<SetShift)
143*4882a593Smuzhiyun	dcisw	r5, r6
144*4882a593Smuzhiyun	bgt	2b
145*4882a593Smuzhiyun	cmp	r2, #0
146*4882a593Smuzhiyun	bgt	1b
147*4882a593Smuzhiyun	dsb	st
148*4882a593Smuzhiyun	isb
149*4882a593Smuzhiyun	ret	lr
150*4882a593SmuzhiyunENDPROC(v7m_invalidate_l1)
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun/*
153*4882a593Smuzhiyun *	v7m_flush_icache_all()
154*4882a593Smuzhiyun *
155*4882a593Smuzhiyun *	Flush the whole I-cache.
156*4882a593Smuzhiyun *
157*4882a593Smuzhiyun *	Registers:
158*4882a593Smuzhiyun *	r0 - set to 0
159*4882a593Smuzhiyun */
160*4882a593SmuzhiyunENTRY(v7m_flush_icache_all)
161*4882a593Smuzhiyun	invalidate_icache r0
162*4882a593Smuzhiyun	ret	lr
163*4882a593SmuzhiyunENDPROC(v7m_flush_icache_all)
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun/*
166*4882a593Smuzhiyun *	v7m_flush_dcache_all()
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun *	Flush the whole D-cache.
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun *	Corrupted registers: r0-r7, r9-r11
171*4882a593Smuzhiyun */
172*4882a593SmuzhiyunENTRY(v7m_flush_dcache_all)
173*4882a593Smuzhiyun	dmb					@ ensure ordering with previous memory accesses
174*4882a593Smuzhiyun	read_clidr r0
175*4882a593Smuzhiyun	mov	r3, r0, lsr #23			@ move LoC into position
176*4882a593Smuzhiyun	ands	r3, r3, #7 << 1			@ extract LoC*2 from clidr
177*4882a593Smuzhiyun	beq	finished			@ if loc is 0, then no need to clean
178*4882a593Smuzhiyunstart_flush_levels:
179*4882a593Smuzhiyun	mov	r10, #0				@ start clean at cache level 0
180*4882a593Smuzhiyunflush_levels:
181*4882a593Smuzhiyun	add	r2, r10, r10, lsr #1		@ work out 3x current cache level
182*4882a593Smuzhiyun	mov	r1, r0, lsr r2			@ extract cache type bits from clidr
183*4882a593Smuzhiyun	and	r1, r1, #7			@ mask of the bits for current cache only
184*4882a593Smuzhiyun	cmp	r1, #2				@ see what cache we have at this level
185*4882a593Smuzhiyun	blt	skip				@ skip if no cache, or just i-cache
186*4882a593Smuzhiyun#ifdef CONFIG_PREEMPTION
187*4882a593Smuzhiyun	save_and_disable_irqs_notrace r9	@ make cssr&csidr read atomic
188*4882a593Smuzhiyun#endif
189*4882a593Smuzhiyun	write_csselr r10, r1			@ set current cache level
190*4882a593Smuzhiyun	isb					@ isb to sych the new cssr&csidr
191*4882a593Smuzhiyun	read_ccsidr r1				@ read the new csidr
192*4882a593Smuzhiyun#ifdef CONFIG_PREEMPTION
193*4882a593Smuzhiyun	restore_irqs_notrace r9
194*4882a593Smuzhiyun#endif
195*4882a593Smuzhiyun	and	r2, r1, #7			@ extract the length of the cache lines
196*4882a593Smuzhiyun	add	r2, r2, #4			@ add 4 (line length offset)
197*4882a593Smuzhiyun	movw	r4, #0x3ff
198*4882a593Smuzhiyun	ands	r4, r4, r1, lsr #3		@ find maximum number on the way size
199*4882a593Smuzhiyun	clz	r5, r4				@ find bit position of way size increment
200*4882a593Smuzhiyun	movw	r7, #0x7fff
201*4882a593Smuzhiyun	ands	r7, r7, r1, lsr #13		@ extract max number of the index size
202*4882a593Smuzhiyunloop1:
203*4882a593Smuzhiyun	mov	r9, r7				@ create working copy of max index
204*4882a593Smuzhiyunloop2:
205*4882a593Smuzhiyun	lsl	r6, r4, r5
206*4882a593Smuzhiyun	orr	r11, r10, r6			@ factor way and cache number into r11
207*4882a593Smuzhiyun	lsl	r6, r9, r2
208*4882a593Smuzhiyun	orr	r11, r11, r6			@ factor index number into r11
209*4882a593Smuzhiyun	dccisw	r11, r6				@ clean/invalidate by set/way
210*4882a593Smuzhiyun	subs	r9, r9, #1			@ decrement the index
211*4882a593Smuzhiyun	bge	loop2
212*4882a593Smuzhiyun	subs	r4, r4, #1			@ decrement the way
213*4882a593Smuzhiyun	bge	loop1
214*4882a593Smuzhiyunskip:
215*4882a593Smuzhiyun	add	r10, r10, #2			@ increment cache number
216*4882a593Smuzhiyun	cmp	r3, r10
217*4882a593Smuzhiyun	bgt	flush_levels
218*4882a593Smuzhiyunfinished:
219*4882a593Smuzhiyun	mov	r10, #0				@ switch back to cache level 0
220*4882a593Smuzhiyun	write_csselr r10, r3			@ select current cache level in cssr
221*4882a593Smuzhiyun	dsb	st
222*4882a593Smuzhiyun	isb
223*4882a593Smuzhiyun	ret	lr
224*4882a593SmuzhiyunENDPROC(v7m_flush_dcache_all)
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun/*
227*4882a593Smuzhiyun *	v7m_flush_cache_all()
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun *	Flush the entire cache system.
230*4882a593Smuzhiyun *  The data cache flush is now achieved using atomic clean / invalidates
231*4882a593Smuzhiyun *  working outwards from L1 cache. This is done using Set/Way based cache
232*4882a593Smuzhiyun *  maintenance instructions.
233*4882a593Smuzhiyun *  The instruction cache can still be invalidated back to the point of
234*4882a593Smuzhiyun *  unification in a single instruction.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun */
237*4882a593SmuzhiyunENTRY(v7m_flush_kern_cache_all)
238*4882a593Smuzhiyun	stmfd	sp!, {r4-r7, r9-r11, lr}
239*4882a593Smuzhiyun	bl	v7m_flush_dcache_all
240*4882a593Smuzhiyun	invalidate_icache r0
241*4882a593Smuzhiyun	ldmfd	sp!, {r4-r7, r9-r11, lr}
242*4882a593Smuzhiyun	ret	lr
243*4882a593SmuzhiyunENDPROC(v7m_flush_kern_cache_all)
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun/*
246*4882a593Smuzhiyun *	v7m_flush_cache_all()
247*4882a593Smuzhiyun *
248*4882a593Smuzhiyun *	Flush all TLB entries in a particular address space
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun *	- mm    - mm_struct describing address space
251*4882a593Smuzhiyun */
252*4882a593SmuzhiyunENTRY(v7m_flush_user_cache_all)
253*4882a593Smuzhiyun	/*FALLTHROUGH*/
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun/*
256*4882a593Smuzhiyun *	v7m_flush_cache_range(start, end, flags)
257*4882a593Smuzhiyun *
258*4882a593Smuzhiyun *	Flush a range of TLB entries in the specified address space.
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun *	- start - start address (may not be aligned)
261*4882a593Smuzhiyun *	- end   - end address (exclusive, may not be aligned)
262*4882a593Smuzhiyun *	- flags	- vm_area_struct flags describing address space
263*4882a593Smuzhiyun *
264*4882a593Smuzhiyun *	It is assumed that:
265*4882a593Smuzhiyun *	- we have a VIPT cache.
266*4882a593Smuzhiyun */
267*4882a593SmuzhiyunENTRY(v7m_flush_user_cache_range)
268*4882a593Smuzhiyun	ret	lr
269*4882a593SmuzhiyunENDPROC(v7m_flush_user_cache_all)
270*4882a593SmuzhiyunENDPROC(v7m_flush_user_cache_range)
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun/*
273*4882a593Smuzhiyun *	v7m_coherent_kern_range(start,end)
274*4882a593Smuzhiyun *
275*4882a593Smuzhiyun *	Ensure that the I and D caches are coherent within specified
276*4882a593Smuzhiyun *	region.  This is typically used when code has been written to
277*4882a593Smuzhiyun *	a memory region, and will be executed.
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun *	- start   - virtual start address of region
280*4882a593Smuzhiyun *	- end     - virtual end address of region
281*4882a593Smuzhiyun *
282*4882a593Smuzhiyun *	It is assumed that:
283*4882a593Smuzhiyun *	- the Icache does not read data from the write buffer
284*4882a593Smuzhiyun */
285*4882a593SmuzhiyunENTRY(v7m_coherent_kern_range)
286*4882a593Smuzhiyun	/* FALLTHROUGH */
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun/*
289*4882a593Smuzhiyun *	v7m_coherent_user_range(start,end)
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun *	Ensure that the I and D caches are coherent within specified
292*4882a593Smuzhiyun *	region.  This is typically used when code has been written to
293*4882a593Smuzhiyun *	a memory region, and will be executed.
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun *	- start   - virtual start address of region
296*4882a593Smuzhiyun *	- end     - virtual end address of region
297*4882a593Smuzhiyun *
298*4882a593Smuzhiyun *	It is assumed that:
299*4882a593Smuzhiyun *	- the Icache does not read data from the write buffer
300*4882a593Smuzhiyun */
301*4882a593SmuzhiyunENTRY(v7m_coherent_user_range)
302*4882a593Smuzhiyun UNWIND(.fnstart		)
303*4882a593Smuzhiyun	dcache_line_size r2, r3
304*4882a593Smuzhiyun	sub	r3, r2, #1
305*4882a593Smuzhiyun	bic	r12, r0, r3
306*4882a593Smuzhiyun1:
307*4882a593Smuzhiyun/*
308*4882a593Smuzhiyun * We use open coded version of dccmvau otherwise USER() would
309*4882a593Smuzhiyun * point at movw instruction.
310*4882a593Smuzhiyun */
311*4882a593Smuzhiyun	dccmvau	r12, r3
312*4882a593Smuzhiyun	add	r12, r12, r2
313*4882a593Smuzhiyun	cmp	r12, r1
314*4882a593Smuzhiyun	blo	1b
315*4882a593Smuzhiyun	dsb	ishst
316*4882a593Smuzhiyun	icache_line_size r2, r3
317*4882a593Smuzhiyun	sub	r3, r2, #1
318*4882a593Smuzhiyun	bic	r12, r0, r3
319*4882a593Smuzhiyun2:
320*4882a593Smuzhiyun	icimvau r12, r3
321*4882a593Smuzhiyun	add	r12, r12, r2
322*4882a593Smuzhiyun	cmp	r12, r1
323*4882a593Smuzhiyun	blo	2b
324*4882a593Smuzhiyun	invalidate_bp r0
325*4882a593Smuzhiyun	dsb	ishst
326*4882a593Smuzhiyun	isb
327*4882a593Smuzhiyun	ret	lr
328*4882a593Smuzhiyun UNWIND(.fnend		)
329*4882a593SmuzhiyunENDPROC(v7m_coherent_kern_range)
330*4882a593SmuzhiyunENDPROC(v7m_coherent_user_range)
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun/*
333*4882a593Smuzhiyun *	v7m_flush_kern_dcache_area(void *addr, size_t size)
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun *	Ensure that the data held in the page kaddr is written back
336*4882a593Smuzhiyun *	to the page in question.
337*4882a593Smuzhiyun *
338*4882a593Smuzhiyun *	- addr	- kernel address
339*4882a593Smuzhiyun *	- size	- region size
340*4882a593Smuzhiyun */
341*4882a593SmuzhiyunENTRY(v7m_flush_kern_dcache_area)
342*4882a593Smuzhiyun	dcache_line_size r2, r3
343*4882a593Smuzhiyun	add	r1, r0, r1
344*4882a593Smuzhiyun	sub	r3, r2, #1
345*4882a593Smuzhiyun	bic	r0, r0, r3
346*4882a593Smuzhiyun1:
347*4882a593Smuzhiyun	dccimvac r0, r3		@ clean & invalidate D line / unified line
348*4882a593Smuzhiyun	add	r0, r0, r2
349*4882a593Smuzhiyun	cmp	r0, r1
350*4882a593Smuzhiyun	blo	1b
351*4882a593Smuzhiyun	dsb	st
352*4882a593Smuzhiyun	ret	lr
353*4882a593SmuzhiyunENDPROC(v7m_flush_kern_dcache_area)
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun/*
356*4882a593Smuzhiyun *	v7m_dma_inv_range(start,end)
357*4882a593Smuzhiyun *
358*4882a593Smuzhiyun *	Invalidate the data cache within the specified region; we will
359*4882a593Smuzhiyun *	be performing a DMA operation in this region and we want to
360*4882a593Smuzhiyun *	purge old data in the cache.
361*4882a593Smuzhiyun *
362*4882a593Smuzhiyun *	- start   - virtual start address of region
363*4882a593Smuzhiyun *	- end     - virtual end address of region
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyunv7m_dma_inv_range:
366*4882a593Smuzhiyun	dcache_line_size r2, r3
367*4882a593Smuzhiyun	sub	r3, r2, #1
368*4882a593Smuzhiyun	tst	r0, r3
369*4882a593Smuzhiyun	bic	r0, r0, r3
370*4882a593Smuzhiyun	dccimvacne r0, r3
371*4882a593Smuzhiyun	addne	r0, r0, r2
372*4882a593Smuzhiyun	subne	r3, r2, #1	@ restore r3, corrupted by v7m's dccimvac
373*4882a593Smuzhiyun	tst	r1, r3
374*4882a593Smuzhiyun	bic	r1, r1, r3
375*4882a593Smuzhiyun	dccimvacne r1, r3
376*4882a593Smuzhiyun	cmp	r0, r1
377*4882a593Smuzhiyun1:
378*4882a593Smuzhiyun	dcimvaclo r0, r3
379*4882a593Smuzhiyun	addlo	r0, r0, r2
380*4882a593Smuzhiyun	cmplo	r0, r1
381*4882a593Smuzhiyun	blo	1b
382*4882a593Smuzhiyun	dsb	st
383*4882a593Smuzhiyun	ret	lr
384*4882a593SmuzhiyunENDPROC(v7m_dma_inv_range)
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun/*
387*4882a593Smuzhiyun *	v7m_dma_clean_range(start,end)
388*4882a593Smuzhiyun *	- start   - virtual start address of region
389*4882a593Smuzhiyun *	- end     - virtual end address of region
390*4882a593Smuzhiyun */
391*4882a593Smuzhiyunv7m_dma_clean_range:
392*4882a593Smuzhiyun	dcache_line_size r2, r3
393*4882a593Smuzhiyun	sub	r3, r2, #1
394*4882a593Smuzhiyun	bic	r0, r0, r3
395*4882a593Smuzhiyun1:
396*4882a593Smuzhiyun	dccmvac r0, r3			@ clean D / U line
397*4882a593Smuzhiyun	add	r0, r0, r2
398*4882a593Smuzhiyun	cmp	r0, r1
399*4882a593Smuzhiyun	blo	1b
400*4882a593Smuzhiyun	dsb	st
401*4882a593Smuzhiyun	ret	lr
402*4882a593SmuzhiyunENDPROC(v7m_dma_clean_range)
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun/*
405*4882a593Smuzhiyun *	v7m_dma_flush_range(start,end)
406*4882a593Smuzhiyun *	- start   - virtual start address of region
407*4882a593Smuzhiyun *	- end     - virtual end address of region
408*4882a593Smuzhiyun */
409*4882a593SmuzhiyunENTRY(v7m_dma_flush_range)
410*4882a593Smuzhiyun	dcache_line_size r2, r3
411*4882a593Smuzhiyun	sub	r3, r2, #1
412*4882a593Smuzhiyun	bic	r0, r0, r3
413*4882a593Smuzhiyun1:
414*4882a593Smuzhiyun	dccimvac r0, r3			 @ clean & invalidate D / U line
415*4882a593Smuzhiyun	add	r0, r0, r2
416*4882a593Smuzhiyun	cmp	r0, r1
417*4882a593Smuzhiyun	blo	1b
418*4882a593Smuzhiyun	dsb	st
419*4882a593Smuzhiyun	ret	lr
420*4882a593SmuzhiyunENDPROC(v7m_dma_flush_range)
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun/*
423*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
424*4882a593Smuzhiyun *	- start	- kernel virtual start address
425*4882a593Smuzhiyun *	- size	- size of region
426*4882a593Smuzhiyun *	- dir	- DMA direction
427*4882a593Smuzhiyun */
428*4882a593SmuzhiyunENTRY(v7m_dma_map_area)
429*4882a593Smuzhiyun	add	r1, r1, r0
430*4882a593Smuzhiyun	teq	r2, #DMA_FROM_DEVICE
431*4882a593Smuzhiyun	beq	v7m_dma_inv_range
432*4882a593Smuzhiyun	b	v7m_dma_clean_range
433*4882a593SmuzhiyunENDPROC(v7m_dma_map_area)
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun/*
436*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
437*4882a593Smuzhiyun *	- start	- kernel virtual start address
438*4882a593Smuzhiyun *	- size	- size of region
439*4882a593Smuzhiyun *	- dir	- DMA direction
440*4882a593Smuzhiyun */
441*4882a593SmuzhiyunENTRY(v7m_dma_unmap_area)
442*4882a593Smuzhiyun	add	r1, r1, r0
443*4882a593Smuzhiyun	teq	r2, #DMA_TO_DEVICE
444*4882a593Smuzhiyun	bne	v7m_dma_inv_range
445*4882a593Smuzhiyun	ret	lr
446*4882a593SmuzhiyunENDPROC(v7m_dma_unmap_area)
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun	.globl	v7m_flush_kern_cache_louis
449*4882a593Smuzhiyun	.equ	v7m_flush_kern_cache_louis, v7m_flush_kern_cache_all
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun	__INITDATA
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
454*4882a593Smuzhiyun	define_cache_functions v7m
455