xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/cache.S (revision 88590148fa8b7e2d7ca910a7a03b5c5700af58e4)
1/*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * This file is based on sample code from ARMv8 ARM.
6 *
7 * SPDX-License-Identifier:	GPL-2.0+
8 */
9
10#include <asm-offsets.h>
11#include <config.h>
12#include <version.h>
13#include <asm/macro.h>
14#include <linux/linkage.h>
15
16/*
17 * void __asm_flush_dcache_level(level)
18 *
19 * clean and invalidate one level cache.
20 *
21 * x0: cache level
22 * x1: 0 flush & invalidate, 1 invalidate only
23 * x2~x9: clobbered
24 */
25ENTRY(__asm_flush_dcache_level)
26	lsl	x12, x0, #1
27	msr	csselr_el1, x12		/* select cache level */
28	isb				/* sync change of cssidr_el1 */
29	mrs	x6, ccsidr_el1		/* read the new cssidr_el1 */
30	and	x2, x6, #7		/* x2 <- log2(cache line size)-4 */
31	add	x2, x2, #4		/* x2 <- log2(cache line size) */
32	mov	x3, #0x3ff
33	and	x3, x3, x6, lsr #3	/* x3 <- max number of #ways */
34	add	w4, w3, w3
35	sub	w4, w4, 1		/* round up log2(#ways + 1) */
36	clz	w5, w4			/* bit position of #ways */
37	mov	x4, #0x7fff
38	and	x4, x4, x6, lsr #13	/* x4 <- max number of #sets */
39	/* x12 <- cache level << 1 */
40	/* x2 <- line length offset */
41	/* x3 <- number of cache ways - 1 */
42	/* x4 <- number of cache sets - 1 */
43	/* x5 <- bit position of #ways */
44
45loop_set:
46	mov	x6, x3			/* x6 <- working copy of #ways */
47loop_way:
48	lsl	x7, x6, x5
49	orr	x9, x12, x7		/* map way and level to cisw value */
50	lsl	x7, x4, x2
51	orr	x9, x9, x7		/* map set number to cisw value */
52	tbz	w1, #0, 1f
53	dc	isw, x9
54	b	2f
551:	dc	cisw, x9		/* clean & invalidate by set/way */
562:	subs	x6, x6, #1		/* decrement the way */
57	b.ge	loop_way
58	subs	x4, x4, #1		/* decrement the set */
59	b.ge	loop_set
60
61	ret
62ENDPROC(__asm_flush_dcache_level)
63
64/*
65 * void __asm_flush_dcache_all(int invalidate_only)
66 *
67 * x0: 0 flush & invalidate, 1 invalidate only
68 *
69 * clean and invalidate all data cache by SET/WAY.
70 */
71ENTRY(__asm_dcache_all)
72	mov	x1, x0
73	dsb	sy
74	mrs	x10, clidr_el1		/* read clidr_el1 */
75	lsr	x11, x10, #24
76	and	x11, x11, #0x7		/* x11 <- loc */
77	cbz	x11, finished		/* if loc is 0, exit */
78	mov	x15, lr
79	mov	x0, #0			/* start flush at cache level 0 */
80	/* x0  <- cache level */
81	/* x10 <- clidr_el1 */
82	/* x11 <- loc */
83	/* x15 <- return address */
84
85loop_level:
86	lsl	x12, x0, #1
87	add	x12, x12, x0		/* x0 <- tripled cache level */
88	lsr	x12, x10, x12
89	and	x12, x12, #7		/* x12 <- cache type */
90	cmp	x12, #2
91	b.lt	skip			/* skip if no cache or icache */
92	bl	__asm_flush_dcache_level	/* x1 = 0 flush, 1 invalidate */
93skip:
94	add	x0, x0, #1		/* increment cache level */
95	cmp	x11, x0
96	b.gt	loop_level
97
98	mov	x0, #0
99	msr	csselr_el1, x0		/* resotre csselr_el1 */
100	dsb	sy
101	isb
102	mov	lr, x15
103
104finished:
105	ret
106ENDPROC(__asm_dcache_all)
107
108ENTRY(__asm_flush_dcache_all)
109	mov	x16, lr
110	mov	x0, #0
111	bl	__asm_dcache_all
112	mov	lr, x16
113	ret
114ENDPROC(__asm_flush_dcache_all)
115
116ENTRY(__asm_invalidate_dcache_all)
117	mov	x16, lr
118	mov	x0, #0xffff
119	bl	__asm_dcache_all
120	mov	lr, x16
121	ret
122ENDPROC(__asm_invalidate_dcache_all)
123
124/*
125 * void __asm_flush_dcache_range(start, end)
126 *
127 * clean & invalidate data cache in the range
128 *
129 * x0: start address
130 * x1: end address
131 */
132ENTRY(__asm_flush_dcache_range)
133	mrs	x3, ctr_el0
134	lsr	x3, x3, #16
135	and	x3, x3, #0xf
136	mov	x2, #4
137	lsl	x2, x2, x3		/* cache line size */
138
139	/* x2 <- minimal cache line size in cache system */
140	sub	x3, x2, #1
141	bic	x0, x0, x3
1421:	dc	civac, x0	/* clean & invalidate data or unified cache */
143	add	x0, x0, x2
144	cmp	x0, x1
145	b.lo	1b
146	dsb	sy
147	ret
148ENDPROC(__asm_flush_dcache_range)
149
150/*
151 * void __asm_invalidate_icache_all(void)
152 *
153 * invalidate all tlb entries.
154 */
155ENTRY(__asm_invalidate_icache_all)
156	ic	ialluis
157	isb	sy
158	ret
159ENDPROC(__asm_invalidate_icache_all)
160