xref: /OK3568_Linux_fs/kernel/arch/arm/mm/cache-v4.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun *  linux/arch/arm/mm/cache-v4.S
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun *  Copyright (C) 1997-2002 Russell king
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun#include <linux/linkage.h>
8*4882a593Smuzhiyun#include <linux/init.h>
9*4882a593Smuzhiyun#include <asm/assembler.h>
10*4882a593Smuzhiyun#include <asm/page.h>
11*4882a593Smuzhiyun#include "proc-macros.S"
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun/*
14*4882a593Smuzhiyun *	flush_icache_all()
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun *	Unconditionally clean and invalidate the entire icache.
17*4882a593Smuzhiyun */
18*4882a593SmuzhiyunENTRY(v4_flush_icache_all)
19*4882a593Smuzhiyun	ret	lr
20*4882a593SmuzhiyunENDPROC(v4_flush_icache_all)
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun/*
23*4882a593Smuzhiyun *	flush_user_cache_all()
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun *	Invalidate all cache entries in a particular address
26*4882a593Smuzhiyun *	space.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun *	- mm	- mm_struct describing address space
29*4882a593Smuzhiyun */
30*4882a593SmuzhiyunENTRY(v4_flush_user_cache_all)
31*4882a593Smuzhiyun	/* FALLTHROUGH */
32*4882a593Smuzhiyun/*
33*4882a593Smuzhiyun *	flush_kern_cache_all()
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun *	Clean and invalidate the entire cache.
36*4882a593Smuzhiyun */
37*4882a593SmuzhiyunENTRY(v4_flush_kern_cache_all)
38*4882a593Smuzhiyun#ifdef CONFIG_CPU_CP15
39*4882a593Smuzhiyun	mov	r0, #0
40*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
41*4882a593Smuzhiyun	ret	lr
42*4882a593Smuzhiyun#else
43*4882a593Smuzhiyun	/* FALLTHROUGH */
44*4882a593Smuzhiyun#endif
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun/*
47*4882a593Smuzhiyun *	flush_user_cache_range(start, end, flags)
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun *	Invalidate a range of cache entries in the specified
50*4882a593Smuzhiyun *	address space.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun *	- start - start address (may not be aligned)
53*4882a593Smuzhiyun *	- end	- end address (exclusive, may not be aligned)
54*4882a593Smuzhiyun *	- flags	- vma_area_struct flags describing address space
55*4882a593Smuzhiyun */
56*4882a593SmuzhiyunENTRY(v4_flush_user_cache_range)
57*4882a593Smuzhiyun#ifdef CONFIG_CPU_CP15
58*4882a593Smuzhiyun	mov	ip, #0
59*4882a593Smuzhiyun	mcr	p15, 0, ip, c7, c7, 0		@ flush ID cache
60*4882a593Smuzhiyun	ret	lr
61*4882a593Smuzhiyun#else
62*4882a593Smuzhiyun	/* FALLTHROUGH */
63*4882a593Smuzhiyun#endif
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun/*
66*4882a593Smuzhiyun *	coherent_kern_range(start, end)
67*4882a593Smuzhiyun *
68*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
69*4882a593Smuzhiyun *	region described by start.  If you have non-snooping
70*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
71*4882a593Smuzhiyun *
72*4882a593Smuzhiyun *	- start  - virtual start address
73*4882a593Smuzhiyun *	- end	 - virtual end address
74*4882a593Smuzhiyun */
75*4882a593SmuzhiyunENTRY(v4_coherent_kern_range)
76*4882a593Smuzhiyun	/* FALLTHROUGH */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun/*
79*4882a593Smuzhiyun *	coherent_user_range(start, end)
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun *	Ensure coherency between the Icache and the Dcache in the
82*4882a593Smuzhiyun *	region described by start.  If you have non-snooping
83*4882a593Smuzhiyun *	Harvard caches, you need to implement this function.
84*4882a593Smuzhiyun *
85*4882a593Smuzhiyun *	- start  - virtual start address
86*4882a593Smuzhiyun *	- end	 - virtual end address
87*4882a593Smuzhiyun */
88*4882a593SmuzhiyunENTRY(v4_coherent_user_range)
89*4882a593Smuzhiyun	mov	r0, #0
90*4882a593Smuzhiyun	ret	lr
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun/*
93*4882a593Smuzhiyun *	flush_kern_dcache_area(void *addr, size_t size)
94*4882a593Smuzhiyun *
95*4882a593Smuzhiyun *	Ensure no D cache aliasing occurs, either with itself or
96*4882a593Smuzhiyun *	the I cache
97*4882a593Smuzhiyun *
98*4882a593Smuzhiyun *	- addr	- kernel address
99*4882a593Smuzhiyun *	- size	- region size
100*4882a593Smuzhiyun */
101*4882a593SmuzhiyunENTRY(v4_flush_kern_dcache_area)
102*4882a593Smuzhiyun	/* FALLTHROUGH */
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun/*
105*4882a593Smuzhiyun *	dma_flush_range(start, end)
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun *	Clean and invalidate the specified virtual address range.
108*4882a593Smuzhiyun *
109*4882a593Smuzhiyun *	- start  - virtual start address
110*4882a593Smuzhiyun *	- end	 - virtual end address
111*4882a593Smuzhiyun */
112*4882a593SmuzhiyunENTRY(v4_dma_flush_range)
113*4882a593Smuzhiyun#ifdef CONFIG_CPU_CP15
114*4882a593Smuzhiyun	mov	r0, #0
115*4882a593Smuzhiyun	mcr	p15, 0, r0, c7, c7, 0		@ flush ID cache
116*4882a593Smuzhiyun#endif
117*4882a593Smuzhiyun	ret	lr
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun/*
120*4882a593Smuzhiyun *	dma_unmap_area(start, size, dir)
121*4882a593Smuzhiyun *	- start	- kernel virtual start address
122*4882a593Smuzhiyun *	- size	- size of region
123*4882a593Smuzhiyun *	- dir	- DMA direction
124*4882a593Smuzhiyun */
125*4882a593SmuzhiyunENTRY(v4_dma_unmap_area)
126*4882a593Smuzhiyun	teq	r2, #DMA_TO_DEVICE
127*4882a593Smuzhiyun	bne	v4_dma_flush_range
128*4882a593Smuzhiyun	/* FALLTHROUGH */
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun/*
131*4882a593Smuzhiyun *	dma_map_area(start, size, dir)
132*4882a593Smuzhiyun *	- start	- kernel virtual start address
133*4882a593Smuzhiyun *	- size	- size of region
134*4882a593Smuzhiyun *	- dir	- DMA direction
135*4882a593Smuzhiyun */
136*4882a593SmuzhiyunENTRY(v4_dma_map_area)
137*4882a593Smuzhiyun	ret	lr
138*4882a593SmuzhiyunENDPROC(v4_dma_unmap_area)
139*4882a593SmuzhiyunENDPROC(v4_dma_map_area)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun	.globl	v4_flush_kern_cache_louis
142*4882a593Smuzhiyun	.equ	v4_flush_kern_cache_louis, v4_flush_kern_cache_all
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun	__INITDATA
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
147*4882a593Smuzhiyun	define_cache_functions v4
148