xref: /OK3568_Linux_fs/kernel/arch/arm/mm/cache-v4wt.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/cache-v4wt.S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Copyright (C) 1997-2002 Russell king
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun *  ARMv4 write through cache operations support.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun *  We assume that the write buffer is not enabled.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun#include <linux/linkage.h>
12*4882a593Smuzhiyun#include <linux/init.h>
13*4882a593Smuzhiyun#include <asm/assembler.h>
14*4882a593Smuzhiyun#include <asm/page.h>
15*4882a593Smuzhiyun#include "proc-macros.S"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun/*
18*4882a593Smuzhiyun * The size of one data cache line.
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun#define CACHE_DLINESIZE	32
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun/*
23*4882a593Smuzhiyun * The number of data cache segments.
24*4882a593Smuzhiyun */
25*4882a593Smuzhiyun#define CACHE_DSEGMENTS	8
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun/*
28*4882a593Smuzhiyun * The number of lines in a cache segment.
29*4882a593Smuzhiyun */
30*4882a593Smuzhiyun#define CACHE_DENTRIES	64
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun/*
33*4882a593Smuzhiyun * This is the size at which it becomes more efficient to
34*4882a593Smuzhiyun * clean the whole cache, rather than using the individual
35*4882a593Smuzhiyun * cache line maintenance instructions.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * *** This needs benchmarking
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun#define CACHE_DLIMIT	16384
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun/*
42*4882a593Smuzhiyun *	flush_icache_all()
43*4882a593Smuzhiyun *
44*4882a593Smuzhiyun *	Unconditionally clean and invalidate the entire icache.
45*4882a593Smuzhiyun */
46*4882a593SmuzhiyunENTRY(v4wt_flush_icache_all)
47*4882a593Smuzhiyun	mov	r0, #0
48*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
49*4882a593Smuzhiyun	ret	lr
50*4882a593SmuzhiyunENDPROC(v4wt_flush_icache_all)
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun/*
53*4882a593Smuzhiyun *	flush_user_cache_all()
54*4882a593Smuzhiyun *
55*4882a593Smuzhiyun *	Invalidate all cache entries in a particular address
56*4882a593Smuzhiyun *	space.
57*4882a593Smuzhiyun */
58*4882a593SmuzhiyunENTRY(v4wt_flush_user_cache_all)
59*4882a593Smuzhiyun	/* FALLTHROUGH */
60*4882a593Smuzhiyun/*
61*4882a593Smuzhiyun *	flush_kern_cache_all()
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun *	Clean and invalidate the entire cache.
64*4882a593Smuzhiyun */
65*4882a593SmuzhiyunENTRY(v4wt_flush_kern_cache_all)
66*4882a593Smuzhiyun	mov	r2, #VM_EXEC
67*4882a593Smuzhiyun	mov	ip, #0
68*4882a593Smuzhiyun__flush_whole_cache:
69*4882a593Smuzhiyun	tst	r2, #VM_EXEC
70*4882a593Smuzhiyun	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
71*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c6, 0		@ invalidate D cache
72*4882a593Smuzhiyun	ret	lr
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun/*
75*4882a593Smuzhiyun *	flush_user_cache_range(start, end, flags)
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun *	Clean and invalidate a range of cache entries in the specified
78*4882a593Smuzhiyun *	address space.
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun *	- start - start address (inclusive, page aligned)
81*4882a593Smuzhiyun *	- end	- end address (exclusive, page aligned)
82*4882a593Smuzhiyun *	- flags	- vma_area_struct flags describing address space
83*4882a593Smuzhiyun */
84*4882a593SmuzhiyunENTRY(v4wt_flush_user_cache_range)
85*4882a593Smuzhiyun	sub	r3, r1, r0			@ calculate total size
86*4882a593Smuzhiyun	cmp	r3, #CACHE_DLIMIT
87*4882a593Smuzhiyun	bhs	__flush_whole_cache
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
90*4882a593Smuzhiyun	tst	r2, #VM_EXEC
91*4882a593Smuzhiyun	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
92*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
93*4882a593Smuzhiyun	cmp	r0, r1
94*4882a593Smuzhiyun	blo	1b
95*4882a593Smuzhiyun	ret	lr
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun/*
98*4882a593Smuzhiyun *	coherent_kern_range(start, end)
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
101*4882a593Smuzhiyun *	region described by start.  If you have non-snooping
102*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun *	- start  - virtual start address
105*4882a593Smuzhiyun *	- end	 - virtual end address
106*4882a593Smuzhiyun */
107*4882a593SmuzhiyunENTRY(v4wt_coherent_kern_range)
108*4882a593Smuzhiyun	/* FALLTRHOUGH */
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun/*
111*4882a593Smuzhiyun *	coherent_user_range(start, end)
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
114*4882a593Smuzhiyun *	region described by start.  If you have non-snooping
115*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
116*4882a593Smuzhiyun *
117*4882a593Smuzhiyun *	- start  - virtual start address
118*4882a593Smuzhiyun *	- end	 - virtual end address
119*4882a593Smuzhiyun */
120*4882a593SmuzhiyunENTRY(v4wt_coherent_user_range)
121*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
122*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
123*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
124*4882a593Smuzhiyun	cmp	r0, r1
125*4882a593Smuzhiyun	blo	1b
126*4882a593Smuzhiyun	mov	r0, #0
127*4882a593Smuzhiyun	ret	lr
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun/*
130*4882a593Smuzhiyun *	flush_kern_dcache_area(void *addr, size_t size)
131*4882a593Smuzhiyun *
132*4882a593Smuzhiyun *	Ensure no D cache aliasing occurs, either with itself or
133*4882a593Smuzhiyun *	the I cache
134*4882a593Smuzhiyun *
135*4882a593Smuzhiyun *	- addr	- kernel address
136*4882a593Smuzhiyun *	- size	- region size
137*4882a593Smuzhiyun */
138*4882a593SmuzhiyunENTRY(v4wt_flush_kern_dcache_area)
139*4882a593Smuzhiyun	mov	r2, #0
140*4882a593Smuzhiyun	mcr	p15, 0, r2, c7, c5, 0		@ invalidate I cache
141*4882a593Smuzhiyun	add	r1, r0, r1
142*4882a593Smuzhiyun	/* fallthrough */
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun/*
145*4882a593Smuzhiyun *	dma_inv_range(start, end)
146*4882a593Smuzhiyun *
147*4882a593Smuzhiyun *	Invalidate (discard) the specified virtual address range.
148*4882a593Smuzhiyun *	May not write back any entries.  If 'start' or 'end'
149*4882a593Smuzhiyun *	are not cache line aligned, those lines must be written
150*4882a593Smuzhiyun *	back.
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun *	- start  - virtual start address
153*4882a593Smuzhiyun *	- end	 - virtual end address
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyunv4wt_dma_inv_range:
156*4882a593Smuzhiyun	bic	r0, r0, #CACHE_DLINESIZE - 1
157*4882a593Smuzhiyun1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
158*4882a593Smuzhiyun	add	r0, r0, #CACHE_DLINESIZE
159*4882a593Smuzhiyun	cmp	r0, r1
160*4882a593Smuzhiyun	blo	1b
161*4882a593Smuzhiyun	ret	lr
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun/*
164*4882a593Smuzhiyun *	dma_flush_range(start, end)
165*4882a593Smuzhiyun *
166*4882a593Smuzhiyun *	Clean and invalidate the specified virtual address range.
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun *	- start  - virtual start address
169*4882a593Smuzhiyun *	- end	 - virtual end address
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun	.globl	v4wt_dma_flush_range
172*4882a593Smuzhiyun	.equ	v4wt_dma_flush_range, v4wt_dma_inv_range
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun/*
175*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
176*4882a593Smuzhiyun *	- start	- kernel virtual start address
177*4882a593Smuzhiyun *	- size	- size of region
178*4882a593Smuzhiyun *	- dir	- DMA direction
179*4882a593Smuzhiyun */
180*4882a593SmuzhiyunENTRY(v4wt_dma_unmap_area)
181*4882a593Smuzhiyun	add	r1, r1, r0
182*4882a593Smuzhiyun	teq	r2, #DMA_TO_DEVICE
183*4882a593Smuzhiyun	bne	v4wt_dma_inv_range
184*4882a593Smuzhiyun	/* FALLTHROUGH */
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun/*
187*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
188*4882a593Smuzhiyun *	- start	- kernel virtual start address
189*4882a593Smuzhiyun *	- size	- size of region
190*4882a593Smuzhiyun *	- dir	- DMA direction
191*4882a593Smuzhiyun */
192*4882a593SmuzhiyunENTRY(v4wt_dma_map_area)
193*4882a593Smuzhiyun	ret	lr
194*4882a593SmuzhiyunENDPROC(v4wt_dma_unmap_area)
195*4882a593SmuzhiyunENDPROC(v4wt_dma_map_area)
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun	.globl	v4wt_flush_kern_cache_louis
198*4882a593Smuzhiyun	.equ	v4wt_flush_kern_cache_louis, v4wt_flush_kern_cache_all
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun	__INITDATA
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
203*4882a593Smuzhiyun	define_cache_functions v4wt
204