xref: /rk3399_rockchip-uboot/arch/arm/lib/cache-cp15.c (revision 827e2ae92e2103f82dab5b54228ad24e40db6263)
1 /*
2  * (C) Copyright 2002
3  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 #include <common.h>
9 #include <asm/system.h>
10 #include <asm/cache.h>
11 #include <linux/compiler.h>
12 
13 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
14 
15 DECLARE_GLOBAL_DATA_PTR;
16 
17 __weak void arm_init_before_mmu(void)
18 {
19 }
20 
21 __weak void arm_init_domains(void)
22 {
23 }
24 
25 void set_section_dcache(int section, enum dcache_option option)
26 {
27 #ifdef CONFIG_ARMV7_LPAE
28 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
29 	/* Need to set the access flag to not fault */
30 	u64 value = TTB_SECT_AP | TTB_SECT_AF;
31 #else
32 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
33 	u32 value = TTB_SECT_AP;
34 #endif
35 
36 	/* Add the page offset */
37 	value |= ((u32)section << MMU_SECTION_SHIFT);
38 
39 	/* Add caching bits */
40 	value |= option;
41 
42 	/* Set PTE */
43 	page_table[section] = value;
44 }
45 
46 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
47 {
48 	debug("%s: Warning: not implemented\n", __func__);
49 }
50 
51 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
52 				     enum dcache_option option)
53 {
54 #ifdef CONFIG_ARMV7_LPAE
55 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
56 #else
57 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
58 #endif
59 	unsigned long startpt, stoppt;
60 	unsigned long upto, end;
61 
62 	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
63 	start = start >> MMU_SECTION_SHIFT;
64 #ifdef CONFIG_ARMV7_LPAE
65 	debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
66 	      option);
67 #else
68 	debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
69 	      option);
70 #endif
71 	for (upto = start; upto < end; upto++)
72 		set_section_dcache(upto, option);
73 
74 	/*
75 	 * Make sure range is cache line aligned
76 	 * Only CPU maintains page tables, hence it is safe to always
77 	 * flush complete cache lines...
78 	 */
79 
80 	startpt = (unsigned long)&page_table[start];
81 	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
82 	stoppt = (unsigned long)&page_table[end];
83 	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
84 	mmu_page_table_flush(startpt, stoppt);
85 }
86 
87 __weak void dram_bank_mmu_setup(int bank)
88 {
89 	bd_t *bd = gd->bd;
90 	int	i;
91 
92 	debug("%s: bank: %d\n", __func__, bank);
93 	for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
94 	     i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
95 		 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
96 	     i++) {
97 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
98 		set_section_dcache(i, DCACHE_WRITETHROUGH);
99 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
100 		set_section_dcache(i, DCACHE_WRITEALLOC);
101 #else
102 		set_section_dcache(i, DCACHE_WRITEBACK);
103 #endif
104 	}
105 }
106 
107 /* to activate the MMU we need to set up virtual memory: use 1M areas */
108 static inline void mmu_setup(void)
109 {
110 	int i;
111 	u32 reg;
112 
113 #ifndef CONFIG_SPL_BUILD
114 	/* bootrom and ddr didn't initial dcache,
115 	 * skip this to save boot time.
116 	 */
117 	arm_init_before_mmu();
118 #endif
119 	/* Set up an identity-mapping for all 4GB, rw for everyone */
120 	for (i = 0; i < ((4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT); i++)
121 		set_section_dcache(i, DCACHE_OFF);
122 
123 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
124 		dram_bank_mmu_setup(i);
125 	}
126 
127 #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
128 	/* Set up 4 PTE entries pointing to our 4 1GB page tables */
129 	for (i = 0; i < 4; i++) {
130 		u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
131 		u64 tpt = gd->arch.tlb_addr + (4096 * i);
132 		page_table[i] = tpt | TTB_PAGETABLE;
133 	}
134 
135 	reg = TTBCR_EAE;
136 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
137 	reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
138 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
139 	reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
140 #else
141 	reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
142 #endif
143 
144 	if (is_hyp()) {
145 		/* Set HTCR to enable LPAE */
146 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
147 			: : "r" (reg) : "memory");
148 		/* Set HTTBR0 */
149 		asm volatile("mcrr p15, 4, %0, %1, c2"
150 			:
151 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
152 			: "memory");
153 		/* Set HMAIR */
154 		asm volatile("mcr p15, 4, %0, c10, c2, 0"
155 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
156 	} else {
157 		/* Set TTBCR to enable LPAE */
158 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
159 			: : "r" (reg) : "memory");
160 		/* Set 64-bit TTBR0 */
161 		asm volatile("mcrr p15, 0, %0, %1, c2"
162 			:
163 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
164 			: "memory");
165 		/* Set MAIR */
166 		asm volatile("mcr p15, 0, %0, c10, c2, 0"
167 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
168 	}
169 #elif defined(CONFIG_CPU_V7)
170 	if (is_hyp()) {
171 		/* Set HTCR to disable LPAE */
172 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
173 			: : "r" (0) : "memory");
174 	} else {
175 		/* Set TTBCR to disable LPAE */
176 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
177 			: : "r" (0) : "memory");
178 	}
179 	/* Set TTBR0 */
180 	reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
181 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
182 	reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
183 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
184 	reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
185 #else
186 	reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
187 #endif
188 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
189 		     : : "r" (reg) : "memory");
190 #else
191 	/* Copy the page table address to cp15 */
192 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
193 		     : : "r" (gd->arch.tlb_addr) : "memory");
194 #endif
195 	/* Set the access control to all-supervisor */
196 	asm volatile("mcr p15, 0, %0, c3, c0, 0"
197 		     : : "r" (~0));
198 
199 	arm_init_domains();
200 
201 	/* and enable the mmu */
202 	reg = get_cr();	/* get control reg. */
203 	set_cr(reg | CR_M);
204 }
205 
206 static int mmu_enabled(void)
207 {
208 	return get_cr() & CR_M;
209 }
210 
211 /* cache_bit must be either CR_I or CR_C */
212 static void cache_enable(uint32_t cache_bit)
213 {
214 	uint32_t reg;
215 
216 	/* The data cache is not active unless the mmu is enabled too */
217 	if ((cache_bit == CR_C) && !mmu_enabled())
218 		mmu_setup();
219 	reg = get_cr();	/* get control reg. */
220 	set_cr(reg | cache_bit);
221 }
222 
223 /* cache_bit must be either CR_I or CR_C */
224 static void cache_disable(uint32_t cache_bit)
225 {
226 	uint32_t reg;
227 
228 	reg = get_cr();
229 
230 	if (cache_bit == CR_C) {
231 		/* if cache isn;t enabled no need to disable */
232 		if ((reg & CR_C) != CR_C)
233 			return;
234 		/* if disabling data cache, disable mmu too */
235 		cache_bit |= CR_M;
236 	}
237 	reg = get_cr();
238 
239 	if (cache_bit == (CR_C | CR_M))
240 		flush_dcache_all();
241 	set_cr(reg & ~cache_bit);
242 }
243 #endif
244 
245 #ifdef CONFIG_SYS_ICACHE_OFF
246 void icache_enable (void)
247 {
248 	return;
249 }
250 
251 void icache_disable (void)
252 {
253 	return;
254 }
255 
256 int icache_status (void)
257 {
258 	return 0;					/* always off */
259 }
260 #else
261 void icache_enable(void)
262 {
263 	cache_enable(CR_I);
264 }
265 
266 void icache_disable(void)
267 {
268 	cache_disable(CR_I);
269 }
270 
271 int icache_status(void)
272 {
273 	return (get_cr() & CR_I) != 0;
274 }
275 #endif
276 
277 #ifdef CONFIG_SYS_DCACHE_OFF
278 void dcache_enable (void)
279 {
280 	return;
281 }
282 
283 void dcache_disable (void)
284 {
285 	return;
286 }
287 
288 int dcache_status (void)
289 {
290 	return 0;					/* always off */
291 }
292 #else
293 void dcache_enable(void)
294 {
295 	cache_enable(CR_C);
296 }
297 
298 void dcache_disable(void)
299 {
300 	cache_disable(CR_C);
301 }
302 
303 int dcache_status(void)
304 {
305 	return (get_cr() & CR_C) != 0;
306 }
307 #endif
308