xref: /OK3568_Linux_fs/kernel/arch/arm64/mm/cache.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun/* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun/*
3*4882a593Smuzhiyun * Cache maintenance
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2001 Deep Blue Solutions Ltd.
6*4882a593Smuzhiyun * Copyright (C) 2012 ARM Ltd.
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun#include <linux/errno.h>
10*4882a593Smuzhiyun#include <linux/linkage.h>
11*4882a593Smuzhiyun#include <linux/init.h>
12*4882a593Smuzhiyun#include <asm/assembler.h>
13*4882a593Smuzhiyun#include <asm/cpufeature.h>
14*4882a593Smuzhiyun#include <asm/alternative.h>
15*4882a593Smuzhiyun#include <asm/asm-uaccess.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun/*
18*4882a593Smuzhiyun *	flush_icache_range(start,end)
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun *	Ensure that the I and D caches are coherent within specified region.
21*4882a593Smuzhiyun *	This is typically used when code has been written to a memory region,
22*4882a593Smuzhiyun *	and will be executed.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun *	- start   - virtual start address of region
25*4882a593Smuzhiyun *	- end     - virtual end address of region
26*4882a593Smuzhiyun */
27*4882a593SmuzhiyunSYM_FUNC_START(__flush_icache_range)
28*4882a593Smuzhiyun	/* FALLTHROUGH */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun/*
31*4882a593Smuzhiyun *	__flush_cache_user_range(start,end)
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun *	Ensure that the I and D caches are coherent within specified region.
34*4882a593Smuzhiyun *	This is typically used when code has been written to a memory region,
35*4882a593Smuzhiyun *	and will be executed.
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun *	- start   - virtual start address of region
38*4882a593Smuzhiyun *	- end     - virtual end address of region
39*4882a593Smuzhiyun */
40*4882a593SmuzhiyunSYM_FUNC_START(__flush_cache_user_range)
41*4882a593Smuzhiyun	uaccess_ttbr0_enable x2, x3, x4
42*4882a593Smuzhiyunalternative_if ARM64_HAS_CACHE_IDC
43*4882a593Smuzhiyun	dsb	ishst
44*4882a593Smuzhiyun	b	7f
45*4882a593Smuzhiyunalternative_else_nop_endif
46*4882a593Smuzhiyun	dcache_line_size x2, x3
47*4882a593Smuzhiyun	sub	x3, x2, #1
48*4882a593Smuzhiyun	bic	x4, x0, x3
49*4882a593Smuzhiyun1:
50*4882a593Smuzhiyunuser_alt 9f, "dc cvau, x4",  "dc civac, x4",  ARM64_WORKAROUND_CLEAN_CACHE
51*4882a593Smuzhiyun	add	x4, x4, x2
52*4882a593Smuzhiyun	cmp	x4, x1
53*4882a593Smuzhiyun	b.lo	1b
54*4882a593Smuzhiyun	dsb	ish
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun7:
57*4882a593Smuzhiyunalternative_if ARM64_HAS_CACHE_DIC
58*4882a593Smuzhiyun	isb
59*4882a593Smuzhiyun	b	8f
60*4882a593Smuzhiyunalternative_else_nop_endif
61*4882a593Smuzhiyun	invalidate_icache_by_line x0, x1, x2, x3, 9f
62*4882a593Smuzhiyun8:	mov	x0, #0
63*4882a593Smuzhiyun1:
64*4882a593Smuzhiyun	uaccess_ttbr0_disable x1, x2
65*4882a593Smuzhiyun	ret
66*4882a593Smuzhiyun9:
67*4882a593Smuzhiyun	mov	x0, #-EFAULT
68*4882a593Smuzhiyun	b	1b
69*4882a593SmuzhiyunSYM_FUNC_END(__flush_icache_range)
70*4882a593SmuzhiyunSYM_FUNC_END(__flush_cache_user_range)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun/*
73*4882a593Smuzhiyun *	invalidate_icache_range(start,end)
74*4882a593Smuzhiyun *
75*4882a593Smuzhiyun *	Ensure that the I cache is invalid within specified region.
76*4882a593Smuzhiyun *
77*4882a593Smuzhiyun *	- start   - virtual start address of region
78*4882a593Smuzhiyun *	- end     - virtual end address of region
79*4882a593Smuzhiyun */
80*4882a593SmuzhiyunSYM_FUNC_START(invalidate_icache_range)
81*4882a593Smuzhiyunalternative_if ARM64_HAS_CACHE_DIC
82*4882a593Smuzhiyun	mov	x0, xzr
83*4882a593Smuzhiyun	isb
84*4882a593Smuzhiyun	ret
85*4882a593Smuzhiyunalternative_else_nop_endif
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun	uaccess_ttbr0_enable x2, x3, x4
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun	invalidate_icache_by_line x0, x1, x2, x3, 2f
90*4882a593Smuzhiyun	mov	x0, xzr
91*4882a593Smuzhiyun1:
92*4882a593Smuzhiyun	uaccess_ttbr0_disable x1, x2
93*4882a593Smuzhiyun	ret
94*4882a593Smuzhiyun2:
95*4882a593Smuzhiyun	mov	x0, #-EFAULT
96*4882a593Smuzhiyun	b	1b
97*4882a593SmuzhiyunSYM_FUNC_END(invalidate_icache_range)
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun/*
100*4882a593Smuzhiyun *	__flush_dcache_area(kaddr, size)
101*4882a593Smuzhiyun *
102*4882a593Smuzhiyun *	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
103*4882a593Smuzhiyun *	are cleaned and invalidated to the PoC.
104*4882a593Smuzhiyun *
105*4882a593Smuzhiyun *	- kaddr   - kernel address
106*4882a593Smuzhiyun *	- size    - size in question
107*4882a593Smuzhiyun */
108*4882a593SmuzhiyunSYM_FUNC_START_PI(__flush_dcache_area)
109*4882a593Smuzhiyun	dcache_by_line_op civac, sy, x0, x1, x2, x3
110*4882a593Smuzhiyun	ret
111*4882a593SmuzhiyunSYM_FUNC_END_PI(__flush_dcache_area)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun/*
114*4882a593Smuzhiyun *	__clean_dcache_area_pou(kaddr, size)
115*4882a593Smuzhiyun *
116*4882a593Smuzhiyun * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
117*4882a593Smuzhiyun * 	are cleaned to the PoU.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun *	- kaddr   - kernel address
120*4882a593Smuzhiyun *	- size    - size in question
121*4882a593Smuzhiyun */
122*4882a593SmuzhiyunSYM_FUNC_START(__clean_dcache_area_pou)
123*4882a593Smuzhiyunalternative_if ARM64_HAS_CACHE_IDC
124*4882a593Smuzhiyun	dsb	ishst
125*4882a593Smuzhiyun	ret
126*4882a593Smuzhiyunalternative_else_nop_endif
127*4882a593Smuzhiyun	dcache_by_line_op cvau, ish, x0, x1, x2, x3
128*4882a593Smuzhiyun	ret
129*4882a593SmuzhiyunSYM_FUNC_END(__clean_dcache_area_pou)
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun/*
132*4882a593Smuzhiyun *	__inval_dcache_area(kaddr, size)
133*4882a593Smuzhiyun *
134*4882a593Smuzhiyun * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
135*4882a593Smuzhiyun * 	are invalidated. Any partial lines at the ends of the interval are
136*4882a593Smuzhiyun *	also cleaned to PoC to prevent data loss.
137*4882a593Smuzhiyun *
138*4882a593Smuzhiyun *	- kaddr   - kernel address
139*4882a593Smuzhiyun *	- size    - size in question
140*4882a593Smuzhiyun */
141*4882a593SmuzhiyunSYM_FUNC_START_LOCAL(__dma_inv_area)
142*4882a593SmuzhiyunSYM_FUNC_START_PI(__inval_dcache_area)
143*4882a593Smuzhiyun	/* FALLTHROUGH */
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun/*
146*4882a593Smuzhiyun *	__dma_inv_area(start, size)
147*4882a593Smuzhiyun *	- start   - virtual start address of region
148*4882a593Smuzhiyun *	- size    - size in question
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun	add	x1, x1, x0
151*4882a593Smuzhiyun	dcache_line_size x2, x3
152*4882a593Smuzhiyun	sub	x3, x2, #1
153*4882a593Smuzhiyun	tst	x1, x3				// end cache line aligned?
154*4882a593Smuzhiyun	bic	x1, x1, x3
155*4882a593Smuzhiyun	b.eq	1f
156*4882a593Smuzhiyun	dc	civac, x1			// clean & invalidate D / U line
157*4882a593Smuzhiyun1:	tst	x0, x3				// start cache line aligned?
158*4882a593Smuzhiyun	bic	x0, x0, x3
159*4882a593Smuzhiyun	b.eq	2f
160*4882a593Smuzhiyun	dc	civac, x0			// clean & invalidate D / U line
161*4882a593Smuzhiyun	b	3f
162*4882a593Smuzhiyun2:	dc	ivac, x0			// invalidate D / U line
163*4882a593Smuzhiyun3:	add	x0, x0, x2
164*4882a593Smuzhiyun	cmp	x0, x1
165*4882a593Smuzhiyun	b.lo	2b
166*4882a593Smuzhiyun	dsb	sy
167*4882a593Smuzhiyun	ret
168*4882a593SmuzhiyunSYM_FUNC_END_PI(__inval_dcache_area)
169*4882a593SmuzhiyunSYM_FUNC_END(__dma_inv_area)
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun/*
172*4882a593Smuzhiyun *	__clean_dcache_area_poc(kaddr, size)
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
175*4882a593Smuzhiyun * 	are cleaned to the PoC.
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun *	- kaddr   - kernel address
178*4882a593Smuzhiyun *	- size    - size in question
179*4882a593Smuzhiyun */
180*4882a593SmuzhiyunSYM_FUNC_START_LOCAL(__dma_clean_area)
181*4882a593SmuzhiyunSYM_FUNC_START_PI(__clean_dcache_area_poc)
182*4882a593Smuzhiyun	/* FALLTHROUGH */
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun/*
185*4882a593Smuzhiyun *	__dma_clean_area(start, size)
186*4882a593Smuzhiyun *	- start   - virtual start address of region
187*4882a593Smuzhiyun *	- size    - size in question
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun	dcache_by_line_op cvac, sy, x0, x1, x2, x3
190*4882a593Smuzhiyun	ret
191*4882a593SmuzhiyunSYM_FUNC_END_PI(__clean_dcache_area_poc)
192*4882a593SmuzhiyunSYM_FUNC_END(__dma_clean_area)
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun/*
195*4882a593Smuzhiyun *	__clean_dcache_area_pop(kaddr, size)
196*4882a593Smuzhiyun *
197*4882a593Smuzhiyun * 	Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
198*4882a593Smuzhiyun * 	are cleaned to the PoP.
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun *	- kaddr   - kernel address
201*4882a593Smuzhiyun *	- size    - size in question
202*4882a593Smuzhiyun */
203*4882a593SmuzhiyunSYM_FUNC_START_PI(__clean_dcache_area_pop)
204*4882a593Smuzhiyun	alternative_if_not ARM64_HAS_DCPOP
205*4882a593Smuzhiyun	b	__clean_dcache_area_poc
206*4882a593Smuzhiyun	alternative_else_nop_endif
207*4882a593Smuzhiyun	dcache_by_line_op cvap, sy, x0, x1, x2, x3
208*4882a593Smuzhiyun	ret
209*4882a593SmuzhiyunSYM_FUNC_END_PI(__clean_dcache_area_pop)
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun/*
212*4882a593Smuzhiyun *	__dma_flush_area(start, size)
213*4882a593Smuzhiyun *
214*4882a593Smuzhiyun *	clean & invalidate D / U line
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun *	- start   - virtual start address of region
217*4882a593Smuzhiyun *	- size    - size in question
218*4882a593Smuzhiyun */
219*4882a593SmuzhiyunSYM_FUNC_START_PI(__dma_flush_area)
220*4882a593Smuzhiyun	dcache_by_line_op civac, sy, x0, x1, x2, x3
221*4882a593Smuzhiyun	ret
222*4882a593SmuzhiyunSYM_FUNC_END_PI(__dma_flush_area)
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun/*
225*4882a593Smuzhiyun *	__dma_map_area(start, size, dir)
226*4882a593Smuzhiyun *	- start	- kernel virtual start address
227*4882a593Smuzhiyun *	- size	- size of region
228*4882a593Smuzhiyun *	- dir	- DMA direction
229*4882a593Smuzhiyun */
230*4882a593SmuzhiyunSYM_FUNC_START_PI(__dma_map_area)
231*4882a593Smuzhiyun	cmp	w2, #DMA_FROM_DEVICE
232*4882a593Smuzhiyun	b.eq	__dma_flush_area
233*4882a593Smuzhiyun	b	__dma_clean_area
234*4882a593SmuzhiyunSYM_FUNC_END_PI(__dma_map_area)
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun/*
237*4882a593Smuzhiyun *	__dma_unmap_area(start, size, dir)
238*4882a593Smuzhiyun *	- start	- kernel virtual start address
239*4882a593Smuzhiyun *	- size	- size of region
240*4882a593Smuzhiyun *	- dir	- DMA direction
241*4882a593Smuzhiyun */
242*4882a593SmuzhiyunSYM_FUNC_START_PI(__dma_unmap_area)
243*4882a593Smuzhiyun	cmp	w2, #DMA_TO_DEVICE
244*4882a593Smuzhiyun	b.ne	__dma_inv_area
245*4882a593Smuzhiyun	ret
246*4882a593SmuzhiyunSYM_FUNC_END_PI(__dma_unmap_area)
247