xref: /OK3568_Linux_fs/u-boot/arch/arm/cpu/armv8/cache.S (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
12#include <asm/macro.h>
13#include <asm/system.h>
14#include <linux/linkage.h>
15
16/*
17 * void __asm_dcache_level(level)
18 *
19 * flush or invalidate one level cache.
20 *
21 * x0: cache level
22 * x1: 0 clean & invalidate, 1 invalidate only
23 * x2~x9: clobbered
24 */
25.pushsection .text.__asm_dcache_level, "ax"
26ENTRY(__asm_dcache_level)
27	lsl	x12, x0, #1
28	msr	csselr_el1, x12		/* select cache level */
29	isb				/* sync change of cssidr_el1 */
30	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
31	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
32	add	x2, x2, #4		/* x2 <- log2(cache line size) */
33	mov	x3, #0x3ff
34	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
35	clz	w5, w3			/* bit position of #ways */
36	mov	x4, #0x7fff
37	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
38	/* x12 <- cache level << 1 */
39	/* x2 <- line length offset */
40	/* x3 <- number of cache ways - 1 */
41	/* x4 <- number of cache sets - 1 */
42	/* x5 <- bit position of #ways */
43
44loop_set:
45	mov	x6, x3			/* x6 <- working copy of #ways */
46loop_way:
47	lsl	x7, x6, x5
48	orr	x9, x12, x7		/* map way and level to cisw value */
49	lsl	x7, x4, x2
50	orr	x9, x9, x7		/* map set number to cisw value */
51	tbz	w1, #0, 1f
52	dc	isw, x9
53	b	2f
541:	dc	cisw, x9		/* clean & invalidate by set/way */
552:	subs	x6, x6, #1		/* decrement the way */
56	b.ge	loop_way
57	subs	x4, x4, #1		/* decrement the set */
58	b.ge	loop_set
59
60	ret
61ENDPROC(__asm_dcache_level)
62.popsection
63
64/*
65 * void __asm_flush_dcache_all(int invalidate_only)
66 *
67 * x0: 0 clean & invalidate, 1 invalidate only
68 *
69 * flush or invalidate all data cache by SET/WAY.
70 */
71.pushsection .text.__asm_dcache_all, "ax"
72ENTRY(__asm_dcache_all)
73	mov	x1, x0
74	dsb	sy
75	mrs	x10, clidr_el1		/* read clidr_el1 */
76	lsr	x11, x10, #24
77	and	x11, x11, #0x7		/* x11 <- loc */
78	cbz	x11, finished		/* if loc is 0, exit */
79	mov	x15, lr
80	mov	x0, #0			/* start flush at cache level 0 */
81	/* x0  <- cache level */
82	/* x10 <- clidr_el1 */
83	/* x11 <- loc */
84	/* x15 <- return address */
85
86loop_level:
87	lsl	x12, x0, #1
88	add	x12, x12, x0		/* x0 <- tripled cache level */
89	lsr	x12, x10, x12
90	and	x12, x12, #7		/* x12 <- cache type */
91	cmp	x12, #2
92	b.lt	skip			/* skip if no cache or icache */
93	bl	__asm_dcache_level	/* x1 = 0 flush, 1 invalidate */
94skip:
95	add	x0, x0, #1		/* increment cache level */
96	cmp	x11, x0
97	b.gt	loop_level
98
99	mov	x0, #0
100	msr	csselr_el1, x0		/* restore csselr_el1 */
101	dsb	sy
102	isb
103	mov	lr, x15
104
105finished:
106	ret
107ENDPROC(__asm_dcache_all)
108.popsection
109
110.pushsection .text.__asm_flush_dcache_all, "ax"
111ENTRY(__asm_flush_dcache_all)
112	mov	x0, #0
113	b	__asm_dcache_all
114ENDPROC(__asm_flush_dcache_all)
115.popsection
116
117.pushsection .text.__asm_invalidate_dcache_all, "ax"
118ENTRY(__asm_invalidate_dcache_all)
119	mov	x0, #0x1
120	b	__asm_dcache_all
121ENDPROC(__asm_invalidate_dcache_all)
122.popsection
123
124/*
125 * void __asm_flush_dcache_range(start, end)
126 *
127 * clean & invalidate data cache in the range
128 *
129 * x0: start address
130 * x1: end address
131 */
132.pushsection .text.__asm_flush_dcache_range, "ax"
133ENTRY(__asm_flush_dcache_range)
134	isb
135	mrs	x3, ctr_el0
136	lsr	x3, x3, #16
137	and	x3, x3, #0xf
138	mov	x2, #4
139	lsl	x2, x2, x3		/* cache line size */
140
141	/* x2 <- minimal cache line size in cache system */
142	sub	x3, x2, #1
143	bic	x0, x0, x3
1441:	dc	civac, x0	/* clean & invalidate data or unified cache */
145	add	x0, x0, x2
146	cmp	x0, x1
147	b.lo	1b
148	dsb	sy
149	isb
150	ret
151ENDPROC(__asm_flush_dcache_range)
152.popsection
153/*
154 * void __asm_invalidate_dcache_range(start, end)
155 *
156 * invalidate data cache in the range
157 *
158 * x0: start address
159 * x1: end address
160 */
161.pushsection .text.__asm_invalidate_dcache_range, "ax"
162ENTRY(__asm_invalidate_dcache_range)
163	mrs	x3, ctr_el0
164	ubfm	x3, x3, #16, #19
165	mov	x2, #4
166	lsl	x2, x2, x3		/* cache line size */
167
168	/* x2 <- minimal cache line size in cache system */
169	sub	x3, x2, #1
170	bic	x0, x0, x3
1711:	dc	ivac, x0	/* invalidate data or unified cache */
172	add	x0, x0, x2
173	cmp	x0, x1
174	b.lo	1b
175	dsb	sy
176	isb
177	ret
178ENDPROC(__asm_invalidate_dcache_range)
179.popsection
180
181/*
182 * void __asm_invalidate_icache_all(void)
183 *
184 * invalidate all tlb entries.
185 */
186.pushsection .text.__asm_invalidate_icache_all, "ax"
187ENTRY(__asm_invalidate_icache_all)
188	ic	ialluis
189	isb	sy
190	ret
191ENDPROC(__asm_invalidate_icache_all)
192.popsection
193
194.pushsection .text.__asm_invalidate_l3_dcache, "ax"
195ENTRY(__asm_invalidate_l3_dcache)
196	mov	x0, #0			/* return status as success */
197	ret
198ENDPROC(__asm_invalidate_l3_dcache)
199	.weak	__asm_invalidate_l3_dcache
200.popsection
201
202.pushsection .text.__asm_flush_l3_dcache, "ax"
203ENTRY(__asm_flush_l3_dcache)
204	mov	x0, #0			/* return status as success */
205	ret
206ENDPROC(__asm_flush_l3_dcache)
207	.weak	__asm_flush_l3_dcache
208.popsection
209
210.pushsection .text.__asm_invalidate_l3_icache, "ax"
211ENTRY(__asm_invalidate_l3_icache)
212	mov	x0, #0			/* return status as success */
213	ret
214ENDPROC(__asm_invalidate_l3_icache)
215	.weak	__asm_invalidate_l3_icache
216.popsection
217
218/*
219 * void __asm_switch_ttbr(ulong new_ttbr)
220 *
221 * Safely switches to a new page table.
222 */
223.pushsection .text.__asm_switch_ttbr, "ax"
224ENTRY(__asm_switch_ttbr)
225	/* x2 = SCTLR (alive throghout the function) */
226	switch_el x4, 3f, 2f, 1f
2273:	mrs	x2, sctlr_el3
228	b	0f
2292:	mrs	x2, sctlr_el2
230	b	0f
2311:	mrs	x2, sctlr_el1
2320:
233
234	/* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
235	movn	x1, #(CR_M | CR_C | CR_I)
236	and	x1, x2, x1
237	switch_el x4, 3f, 2f, 1f
2383:	msr	sctlr_el3, x1
239	b	0f
2402:	msr	sctlr_el2, x1
241	b	0f
2421:	msr	sctlr_el1, x1
2430:	isb
244
245	/* This call only clobbers x30 (lr) and x9 (unused) */
246	mov	x3, x30
247	bl	__asm_invalidate_tlb_all
248
249	/* From here on we're running safely with caches disabled */
250
251	/* Set TTBR to our first argument */
252	switch_el x4, 3f, 2f, 1f
2533:	msr	ttbr0_el3, x0
254	b	0f
2552:	msr	ttbr0_el2, x0
256	b	0f
2571:	msr	ttbr0_el1, x0
2580:	isb
259
260	/* Restore original SCTLR and thus enable caches again */
261	switch_el x4, 3f, 2f, 1f
2623:	msr	sctlr_el3, x2
263	b	0f
2642:	msr	sctlr_el2, x2
265	b	0f
2661:	msr	sctlr_el1, x2
2670:	isb
268
269	ret	x3
270ENDPROC(__asm_switch_ttbr)
271.popsection
272