xref: /rk3399_rockchip-uboot/arch/arm/mach-uniphier/arm32/cache-uniphier.c (revision 6f579db75411973200224307d6a84d82fc01bb96)
1 /*
2  * Copyright (C) 2012-2014 Panasonic Corporation
3  * Copyright (C) 2015-2016 Socionext Inc.
4  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <asm/armv7.h>
13 #include <asm/processor.h>
14 
15 #include "cache-uniphier.h"
16 #include "ssc-regs.h"
17 
18 #define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
19 		((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
20 #define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
21 		((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
22 
23 /* uniphier_cache_sync - perform a sync point for a particular cache level */
24 static void uniphier_cache_sync(void)
25 {
26 	/* drain internal buffers */
27 	writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
28 	/* need a read back to confirm */
29 	readl(UNIPHIER_SSCOPE);
30 }
31 
32 /**
33  * uniphier_cache_maint_common - run a queue operation
34  *
35  * @start: start address of range operation (don't care for "all" operation)
36  * @size: data size of range operation (don't care for "all" operation)
37  * @ways: target ways (don't care for operations other than pre-fetch, touch
38  * @operation: flags to specify the desired cache operation
39  */
40 static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
41 					u32 operation)
42 {
43 	/* clear the complete notification flag */
44 	writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
45 
46 	do {
47 		/* set cache operation */
48 		writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
49 
50 		/* set address range if needed */
51 		if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
52 			writel(start, UNIPHIER_SSCOQAD);
53 			writel(size, UNIPHIER_SSCOQSZ);
54 		}
55 
56 		/* set target ways if needed */
57 		if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
58 			writel(ways, UNIPHIER_SSCOQWN);
59 	} while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
60 			  (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
61 
62 	/* wait until the operation is completed */
63 	while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
64 		cpu_relax();
65 }
66 
67 static void uniphier_cache_maint_all(u32 operation)
68 {
69 	uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
70 
71 	uniphier_cache_sync();
72 }
73 
74 static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
75 				       u32 operation)
76 {
77 	u32 size;
78 
79 	/*
80 	 * If the start address is not aligned,
81 	 * perform a cache operation for the first cache-line
82 	 */
83 	start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
84 
85 	size = end - start;
86 
87 	if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
88 		/* this means cache operation for all range */
89 		uniphier_cache_maint_all(operation);
90 		return;
91 	}
92 
93 	/*
94 	 * If the end address is not aligned,
95 	 * perform a cache operation for the last cache-line
96 	 */
97 	size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
98 
99 	while (size) {
100 		u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
101 
102 		uniphier_cache_maint_common(start, chunk_size, ways,
103 					    UNIPHIER_SSCOQM_S_RANGE | operation);
104 
105 		start += chunk_size;
106 		size -= chunk_size;
107 	}
108 
109 	uniphier_cache_sync();
110 }
111 
112 void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
113 {
114 	uniphier_cache_maint_range(start, end, ways,
115 				   UNIPHIER_SSCOQM_TID_WAY |
116 				   UNIPHIER_SSCOQM_CM_PREFETCH);
117 }
118 
119 void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
120 {
121 	uniphier_cache_maint_range(start, end, ways,
122 				   UNIPHIER_SSCOQM_TID_WAY |
123 				   UNIPHIER_SSCOQM_CM_TOUCH);
124 }
125 
126 void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
127 {
128 	uniphier_cache_maint_range(start, end, ways,
129 				   UNIPHIER_SSCOQM_TID_WAY |
130 				   UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
131 }
132 
133 static void uniphier_cache_endisable(int enable)
134 {
135 	u32 tmp;
136 
137 	tmp = readl(UNIPHIER_SSCC);
138 	if (enable)
139 		tmp |= UNIPHIER_SSCC_ON;
140 	else
141 		tmp &= ~UNIPHIER_SSCC_ON;
142 	writel(tmp, UNIPHIER_SSCC);
143 }
144 
145 void uniphier_cache_enable(void)
146 {
147 	uniphier_cache_endisable(1);
148 }
149 
150 void uniphier_cache_disable(void)
151 {
152 	uniphier_cache_endisable(0);
153 }
154 
155 #ifdef CONFIG_UNIPHIER_L2CACHE_ON
156 void v7_outer_cache_flush_all(void)
157 {
158 	uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
159 }
160 
161 void v7_outer_cache_inval_all(void)
162 {
163 	uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
164 }
165 
166 void v7_outer_cache_flush_range(u32 start, u32 end)
167 {
168 	uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
169 }
170 
171 void v7_outer_cache_inval_range(u32 start, u32 end)
172 {
173 	if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
174 		start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
175 		uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
176 					   UNIPHIER_SSCOQM_CM_FLUSH);
177 		start += UNIPHIER_SSC_LINE_SIZE;
178 	}
179 
180 	if (start >= end) {
181 		uniphier_cache_sync();
182 		return;
183 	}
184 
185 	if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
186 		end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
187 		uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
188 					   UNIPHIER_SSCOQM_CM_FLUSH);
189 	}
190 
191 	if (start >= end) {
192 		uniphier_cache_sync();
193 		return;
194 	}
195 
196 	uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
197 }
198 
199 void v7_outer_cache_enable(void)
200 {
201 	writel(U32_MAX, UNIPHIER_SSCLPDAWCR);	/* activate all ways */
202 	uniphier_cache_enable();
203 }
204 
205 void v7_outer_cache_disable(void)
206 {
207 	uniphier_cache_disable();
208 }
209 #endif
210 
211 void enable_caches(void)
212 {
213 	dcache_enable();
214 }
215