xref: /rk3399_ARM-atf/lib/aarch64/cache_helpers.S (revision 5c633bdff3f23c00fcfb91c26b709e1b66b84d21)
1/*
2 * Copyright (c) 2013-2014, ARM Limited and Contributors. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
6 *
7 * Redistributions of source code must retain the above copyright notice, this
8 * list of conditions and the following disclaimer.
9 *
10 * Redistributions in binary form must reproduce the above copyright notice,
11 * this list of conditions and the following disclaimer in the documentation
12 * and/or other materials provided with the distribution.
13 *
14 * Neither the name of ARM nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific
16 * prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
19 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <arch.h>
32#include <asm_macros.S>
33
34	.globl	flush_dcache_range
35	.globl	inv_dcache_range
36	.globl	dcsw_op_louis
37	.globl	dcsw_op_all
38
39	/* ------------------------------------------
40	 * Clean+Invalidate from base address till
41	 * size. 'x0' = addr, 'x1' = size
42	 * ------------------------------------------
43	 */
44func flush_dcache_range
45	dcache_line_size x2, x3
46	add	x1, x0, x1
47	sub	x3, x2, #1
48	bic	x0, x0, x3
49flush_loop:
50	dc	civac, x0
51	add	x0, x0, x2
52	cmp	x0, x1
53	b.lo    flush_loop
54	dsb	sy
55	ret
56
57
58	/* ------------------------------------------
59	 * Invalidate from base address till
60	 * size. 'x0' = addr, 'x1' = size
61	 * ------------------------------------------
62	 */
63func inv_dcache_range
64	dcache_line_size x2, x3
65	add	x1, x0, x1
66	sub	x3, x2, #1
67	bic	x0, x0, x3
68inv_loop:
69	dc	ivac, x0
70	add	x0, x0, x2
71	cmp	x0, x1
72	b.lo    inv_loop
73	dsb	sy
74	ret
75
76
77	/* ---------------------------------------------------------------
78	 * Data cache operations by set/way to the level specified
79	 *
80	 * The main function, do_dcsw_op requires:
81	 * x0: The operation type (0-2), as defined in arch.h
82	 * x3: The last cache level to operate on
83	 * x9: clidr_el1
84	 * and will carry out the operation on each data cache from level 0
85	 * to the level in x3 in sequence
86	 *
87	 * The dcsw_op macro sets up the x3 and x9 parameters based on
88	 * clidr_el1 cache information before invoking the main function
89	 * ---------------------------------------------------------------
90	 */
91
92	.macro	dcsw_op shift, fw, ls
93	mrs	x9, clidr_el1
94	ubfx	x3, x9, \shift, \fw
95	lsl	x3, x3, \ls
96	b	do_dcsw_op
97	.endm
98
99func do_dcsw_op
100	cbz	x3, exit
101	mov	x10, xzr
102	adr	x14, dcsw_loop_table	// compute inner loop address
103	add	x14, x14, x0, lsl #5	// inner loop is 8x32-bit instructions
104	mov	x0, x9
105	mov	w8, #1
106loop1:
107	add	x2, x10, x10, lsr #1	// work out 3x current cache level
108	lsr	x1, x0, x2		// extract cache type bits from clidr
109	and	x1, x1, #7		// mask the bits for current cache only
110	cmp	x1, #2			// see what cache we have at this level
111	b.lt	level_done		// nothing to do if no cache or icache
112
113	msr	csselr_el1, x10		// select current cache level in csselr
114	isb				// isb to sych the new cssr&csidr
115	mrs	x1, ccsidr_el1		// read the new ccsidr
116	and	x2, x1, #7		// extract the length of the cache lines
117	add	x2, x2, #4		// add 4 (line length offset)
118	ubfx	x4, x1, #3, #10		// maximum way number
119	clz	w5, w4			// bit position of way size increment
120	lsl	w9, w4, w5		// w9 = aligned max way number
121	lsl	w16, w8, w5		// w16 = way number loop decrement
122	orr	w9, w10, w9		// w9 = combine way and cache number
123	ubfx	w6, w1, #13, #15	// w6 = max set number
124	lsl	w17, w8, w2		// w17 = set number loop decrement
125	dsb	sy			// barrier before we start this level
126	br	x14			// jump to DC operation specific loop
127
128	.macro	dcsw_loop _op
129loop2_\_op:
130	lsl	w7, w6, w2		// w7 = aligned max set number
131
132loop3_\_op:
133	orr	w11, w9, w7		// combine cache, way and set number
134	dc	\_op, x11
135	subs	w7, w7, w17		// decrement set number
136	b.ge	loop3_\_op
137
138	subs	x9, x9, x16		// decrement way number
139	b.ge	loop2_\_op
140
141	b	level_done
142	.endm
143
144level_done:
145	add	x10, x10, #2		// increment cache number
146	cmp	x3, x10
147	b.gt    loop1
148	msr	csselr_el1, xzr		// select cache level 0 in csselr
149	dsb	sy			// barrier to complete final cache operation
150	isb
151exit:
152	ret
153
154dcsw_loop_table:
155	dcsw_loop isw
156	dcsw_loop cisw
157	dcsw_loop csw
158
159
160func dcsw_op_louis
161	dcsw_op #LOUIS_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
162
163
164func dcsw_op_all
165	dcsw_op #LOC_SHIFT, #CLIDR_FIELD_WIDTH, #LEVEL_SHIFT
166