xref: /rk3399_rockchip-uboot/arch/arm/mach-uniphier/arm32/cache-uniphier.c (revision 95a1feca2e852ade552495e3688c5ef2afae68aa)
1 /*
2  * Copyright (C) 2012-2014 Panasonic Corporation
3  * Copyright (C) 2015-2016 Socionext Inc.
4  *   Author: Masahiro Yamada <yamada.masahiro@socionext.com>
5  *
6  * SPDX-License-Identifier:	GPL-2.0+
7  */
8 
9 #include <common.h>
10 #include <linux/io.h>
11 #include <linux/kernel.h>
12 #include <asm/armv7.h>
13 #include <asm/processor.h>
14 
15 #include "cache-uniphier.h"
16 #include "ssc-regs.h"
17 
18 #define UNIPHIER_SSCOQAD_IS_NEEDED(op) \
19 		((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
20 #define UNIPHIER_SSCOQWM_IS_NEEDED(op) \
21 		((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
22 
23 /* uniphier_cache_sync - perform a sync point for a particular cache level */
24 static void uniphier_cache_sync(void)
25 {
26 	/* drain internal buffers */
27 	writel(UNIPHIER_SSCOPE_CM_SYNC, UNIPHIER_SSCOPE);
28 	/* need a read back to confirm */
29 	readl(UNIPHIER_SSCOPE);
30 }
31 
32 /**
33  * uniphier_cache_maint_common - run a queue operation
34  *
35  * @start: start address of range operation (don't care for "all" operation)
36  * @size: data size of range operation (don't care for "all" operation)
37  * @ways: target ways (don't care for operations other than pre-fetch, touch
38  * @operation: flags to specify the desired cache operation
39  */
40 static void uniphier_cache_maint_common(u32 start, u32 size, u32 ways,
41 					u32 operation)
42 {
43 	/* clear the complete notification flag */
44 	writel(UNIPHIER_SSCOLPQS_EF, UNIPHIER_SSCOLPQS);
45 
46 	do {
47 		/* set cache operation */
48 		writel(UNIPHIER_SSCOQM_CE | operation, UNIPHIER_SSCOQM);
49 
50 		/* set address range if needed */
51 		if (likely(UNIPHIER_SSCOQAD_IS_NEEDED(operation))) {
52 			writel(start, UNIPHIER_SSCOQAD);
53 			writel(size, UNIPHIER_SSCOQSZ);
54 		}
55 
56 		/* set target ways if needed */
57 		if (unlikely(UNIPHIER_SSCOQWM_IS_NEEDED(operation)))
58 			writel(ways, UNIPHIER_SSCOQWN);
59 	} while (unlikely(readl(UNIPHIER_SSCOPPQSEF) &
60 			  (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
61 
62 	/* wait until the operation is completed */
63 	while (likely(readl(UNIPHIER_SSCOLPQS) != UNIPHIER_SSCOLPQS_EF))
64 		cpu_relax();
65 }
66 
67 static void uniphier_cache_maint_all(u32 operation)
68 {
69 	uniphier_cache_maint_common(0, 0, 0, UNIPHIER_SSCOQM_S_ALL | operation);
70 
71 	uniphier_cache_sync();
72 }
73 
74 static void uniphier_cache_maint_range(u32 start, u32 end, u32 ways,
75 				       u32 operation)
76 {
77 	u32 size;
78 
79 	/*
80 	 * If the start address is not aligned,
81 	 * perform a cache operation for the first cache-line
82 	 */
83 	start = start & ~(UNIPHIER_SSC_LINE_SIZE - 1);
84 
85 	size = end - start;
86 
87 	if (unlikely(size >= (u32)(-UNIPHIER_SSC_LINE_SIZE))) {
88 		/* this means cache operation for all range */
89 		uniphier_cache_maint_all(operation);
90 		return;
91 	}
92 
93 	/*
94 	 * If the end address is not aligned,
95 	 * perform a cache operation for the last cache-line
96 	 */
97 	size = ALIGN(size, UNIPHIER_SSC_LINE_SIZE);
98 
99 	while (size) {
100 		u32 chunk_size = min_t(u32, size, UNIPHIER_SSC_RANGE_OP_MAX_SIZE);
101 
102 		uniphier_cache_maint_common(start, chunk_size, ways,
103 					    UNIPHIER_SSCOQM_S_RANGE | operation);
104 
105 		start += chunk_size;
106 		size -= chunk_size;
107 	}
108 
109 	uniphier_cache_sync();
110 }
111 
112 void uniphier_cache_prefetch_range(u32 start, u32 end, u32 ways)
113 {
114 	uniphier_cache_maint_range(start, end, ways,
115 				   UNIPHIER_SSCOQM_TID_WAY |
116 				   UNIPHIER_SSCOQM_CM_PREFETCH);
117 }
118 
119 void uniphier_cache_touch_range(u32 start, u32 end, u32 ways)
120 {
121 	uniphier_cache_maint_range(start, end, ways,
122 				   UNIPHIER_SSCOQM_TID_WAY |
123 				   UNIPHIER_SSCOQM_CM_TOUCH);
124 }
125 
126 void uniphier_cache_touch_zero_range(u32 start, u32 end, u32 ways)
127 {
128 	uniphier_cache_maint_range(start, end, ways,
129 				   UNIPHIER_SSCOQM_TID_WAY |
130 				   UNIPHIER_SSCOQM_CM_TOUCH_ZERO);
131 }
132 
133 #ifdef CONFIG_UNIPHIER_L2CACHE_ON
134 void v7_outer_cache_flush_all(void)
135 {
136 	uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
137 }
138 
139 void v7_outer_cache_inval_all(void)
140 {
141 	uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
142 }
143 
144 void v7_outer_cache_flush_range(u32 start, u32 end)
145 {
146 	uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_FLUSH);
147 }
148 
149 void v7_outer_cache_inval_range(u32 start, u32 end)
150 {
151 	if (start & (UNIPHIER_SSC_LINE_SIZE - 1)) {
152 		start &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
153 		uniphier_cache_maint_range(start, UNIPHIER_SSC_LINE_SIZE, 0,
154 					   UNIPHIER_SSCOQM_CM_FLUSH);
155 		start += UNIPHIER_SSC_LINE_SIZE;
156 	}
157 
158 	if (start >= end) {
159 		uniphier_cache_sync();
160 		return;
161 	}
162 
163 	if (end & (UNIPHIER_SSC_LINE_SIZE - 1)) {
164 		end &= ~(UNIPHIER_SSC_LINE_SIZE - 1);
165 		uniphier_cache_maint_range(end, UNIPHIER_SSC_LINE_SIZE, 0,
166 					   UNIPHIER_SSCOQM_CM_FLUSH);
167 	}
168 
169 	if (start >= end) {
170 		uniphier_cache_sync();
171 		return;
172 	}
173 
174 	uniphier_cache_maint_range(start, end, 0, UNIPHIER_SSCOQM_CM_INV);
175 }
176 
177 void v7_outer_cache_enable(void)
178 {
179 	u32 tmp;
180 
181 	writel(U32_MAX, UNIPHIER_SSCLPDAWCR);	/* activate all ways */
182 	tmp = readl(UNIPHIER_SSCC);
183 	tmp |= UNIPHIER_SSCC_ON;
184 	writel(tmp, UNIPHIER_SSCC);
185 }
186 #endif
187 
188 void v7_outer_cache_disable(void)
189 {
190 	u32 tmp;
191 
192 	tmp = readl(UNIPHIER_SSCC);
193 	tmp &= ~UNIPHIER_SSCC_ON;
194 	writel(tmp, UNIPHIER_SSCC);
195 }
196 
197 void enable_caches(void)
198 {
199 	dcache_enable();
200 }
201