xref: /OK3568_Linux_fs/u-boot/arch/arm/lib/cache-cp15.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * (C) Copyright 2002
3  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
4  *
5  * SPDX-License-Identifier:	GPL-2.0+
6  */
7 
8 #include <common.h>
9 #include <asm/system.h>
10 #include <asm/cache.h>
11 #include <linux/compiler.h>
12 
13 #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
14 
15 DECLARE_GLOBAL_DATA_PTR;
16 
arm_init_before_mmu(void)17 __weak void arm_init_before_mmu(void)
18 {
19 }
20 
arm_init_domains(void)21 __weak void arm_init_domains(void)
22 {
23 }
24 
set_section_dcache(int section,enum dcache_option option)25 void set_section_dcache(int section, enum dcache_option option)
26 {
27 #ifdef CONFIG_ARMV7_LPAE
28 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
29 	/* Need to set the access flag to not fault */
30 	u64 value = TTB_SECT_AP | TTB_SECT_AF;
31 #else
32 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
33 	u32 value = TTB_SECT_AP;
34 #endif
35 
36 	/* Add the page offset */
37 	value |= ((u32)section << MMU_SECTION_SHIFT);
38 
39 	/* Add caching bits */
40 	value |= option;
41 
42 	/* Set PTE */
43 	page_table[section] = value;
44 }
45 
mmu_page_table_flush(unsigned long start,unsigned long stop)46 __weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
47 {
48 	debug("%s: Warning: not implemented\n", __func__);
49 }
50 
mmu_set_region_dcache_behaviour(phys_addr_t start,size_t size,enum dcache_option option)51 void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
52 				     enum dcache_option option)
53 {
54 #ifdef CONFIG_ARMV7_LPAE
55 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
56 #else
57 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
58 #endif
59 	unsigned long startpt, stoppt;
60 	unsigned long upto, end;
61 
62 	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
63 	start = start >> MMU_SECTION_SHIFT;
64 #ifdef CONFIG_ARMV7_LPAE
65 	debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
66 	      option);
67 #else
68 	debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
69 	      option);
70 #endif
71 	for (upto = start; upto < end; upto++)
72 		set_section_dcache(upto, option);
73 
74 	/*
75 	 * Make sure range is cache line aligned
76 	 * Only CPU maintains page tables, hence it is safe to always
77 	 * flush complete cache lines...
78 	 */
79 
80 	startpt = (unsigned long)&page_table[start];
81 	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
82 	stoppt = (unsigned long)&page_table[end];
83 	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
84 	mmu_page_table_flush(startpt, stoppt);
85 }
86 
dram_bank_mmu_setup(int bank)87 __weak void dram_bank_mmu_setup(int bank)
88 {
89 	bd_t *bd = gd->bd;
90 	int	i;
91 
92 	debug("%s: bank: %d\n", __func__, bank);
93 	for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
94 	     i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
95 		 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
96 	     i++) {
97 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
98 		set_section_dcache(i, DCACHE_WRITETHROUGH);
99 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
100 		set_section_dcache(i, DCACHE_WRITEALLOC);
101 #else
102 		set_section_dcache(i, DCACHE_WRITEBACK);
103 #endif
104 	}
105 }
106 
107 /* to activate the MMU we need to set up virtual memory: use 1M areas */
mmu_setup(void)108 static inline void mmu_setup(void)
109 {
110 	int i, end;
111 	u32 reg;
112 
113 #ifndef CONFIG_SPL_BUILD
114 	/* bootrom and ddr didn't initial dcache,
115 	 * skip this to save boot time.
116 	 */
117 	arm_init_before_mmu();
118 #endif
119 
120 	/*
121 	 * SPL thunder-boot:
122 	 * only map periph device region to save boot time.
123 	 */
124 #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_KERNEL_BOOT) && \
125     defined(CONFIG_PERIPH_DEVICE_START_ADDR)
126 	i = CONFIG_PERIPH_DEVICE_START_ADDR >> MMU_SECTION_SHIFT;
127 	end = CONFIG_PERIPH_DEVICE_END_ADDR >> MMU_SECTION_SHIFT;
128 #else
129 	i = 0;
130 	end = (4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT;
131 #endif
132 	/* Set up an identity-mapping for all 4GB, rw for everyone */
133 	for (; i < end; i++)
134 		set_section_dcache(i, DCACHE_OFF);
135 
136 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
137 		dram_bank_mmu_setup(i);
138 	}
139 
140 #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
141 	/* Set up 4 PTE entries pointing to our 4 1GB page tables */
142 	for (i = 0; i < 4; i++) {
143 		u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
144 		u64 tpt = gd->arch.tlb_addr + (4096 * i);
145 		page_table[i] = tpt | TTB_PAGETABLE;
146 	}
147 
148 	reg = TTBCR_EAE;
149 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
150 	reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
151 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
152 	reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
153 #else
154 	reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
155 #endif
156 
157 	if (is_hyp()) {
158 		/* Set HTCR to enable LPAE */
159 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
160 			: : "r" (reg) : "memory");
161 		/* Set HTTBR0 */
162 		asm volatile("mcrr p15, 4, %0, %1, c2"
163 			:
164 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
165 			: "memory");
166 		/* Set HMAIR */
167 		asm volatile("mcr p15, 4, %0, c10, c2, 0"
168 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
169 	} else {
170 		/* Set TTBCR to enable LPAE */
171 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
172 			: : "r" (reg) : "memory");
173 		/* Set 64-bit TTBR0 */
174 		asm volatile("mcrr p15, 0, %0, %1, c2"
175 			:
176 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
177 			: "memory");
178 		/* Set MAIR */
179 		asm volatile("mcr p15, 0, %0, c10, c2, 0"
180 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
181 	}
182 #elif defined(CONFIG_CPU_V7)
183 	if (is_hyp()) {
184 		/* Set HTCR to disable LPAE */
185 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
186 			: : "r" (0) : "memory");
187 	} else {
188 		/* Set TTBCR to disable LPAE */
189 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
190 			: : "r" (0) : "memory");
191 	}
192 	/* Set TTBR0 */
193 	reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
194 #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
195 	reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
196 #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
197 	reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
198 #else
199 	reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
200 #endif
201 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
202 		     : : "r" (reg) : "memory");
203 #else
204 	/* Copy the page table address to cp15 */
205 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
206 		     : : "r" (gd->arch.tlb_addr) : "memory");
207 #endif
208 	/* Set the access control to all-supervisor */
209 	asm volatile("mcr p15, 0, %0, c3, c0, 0"
210 		     : : "r" (~0));
211 
212 	arm_init_domains();
213 
214 	/* and enable the mmu */
215 	reg = get_cr();	/* get control reg. */
216 	set_cr(reg | CR_M);
217 }
218 
mmu_enabled(void)219 static int mmu_enabled(void)
220 {
221 	return get_cr() & CR_M;
222 }
223 
224 /* cache_bit must be either CR_I or CR_C */
cache_enable(uint32_t cache_bit)225 static void cache_enable(uint32_t cache_bit)
226 {
227 	uint32_t reg;
228 
229 	/* The data cache is not active unless the mmu is enabled too */
230 	if ((cache_bit == CR_C) && !mmu_enabled())
231 		mmu_setup();
232 	reg = get_cr();	/* get control reg. */
233 	set_cr(reg | cache_bit);
234 }
235 
236 /* cache_bit must be either CR_I or CR_C */
cache_disable(uint32_t cache_bit)237 static void cache_disable(uint32_t cache_bit)
238 {
239 	uint32_t reg;
240 
241 	reg = get_cr();
242 
243 	if (cache_bit == CR_C) {
244 		/* if cache isn;t enabled no need to disable */
245 		if ((reg & CR_C) != CR_C)
246 			return;
247 		/* if disabling data cache, disable mmu too */
248 		cache_bit |= CR_M;
249 	}
250 	reg = get_cr();
251 
252 	if (cache_bit == (CR_C | CR_M))
253 		flush_dcache_all();
254 	set_cr(reg & ~cache_bit);
255 }
256 #endif
257 
258 #ifdef CONFIG_SYS_ICACHE_OFF
icache_enable(void)259 void icache_enable (void)
260 {
261 	return;
262 }
263 
icache_disable(void)264 void icache_disable (void)
265 {
266 	return;
267 }
268 
icache_status(void)269 int icache_status (void)
270 {
271 	return 0;					/* always off */
272 }
273 #else
icache_enable(void)274 void icache_enable(void)
275 {
276 	cache_enable(CR_I);
277 }
278 
icache_disable(void)279 void icache_disable(void)
280 {
281 	cache_disable(CR_I);
282 }
283 
icache_status(void)284 int icache_status(void)
285 {
286 	return (get_cr() & CR_I) != 0;
287 }
288 #endif
289 
290 #ifdef CONFIG_SYS_DCACHE_OFF
dcache_enable(void)291 void dcache_enable (void)
292 {
293 	return;
294 }
295 
dcache_disable(void)296 void dcache_disable (void)
297 {
298 	return;
299 }
300 
dcache_status(void)301 int dcache_status (void)
302 {
303 	return 0;					/* always off */
304 }
305 #else
dcache_enable(void)306 void dcache_enable(void)
307 {
308 	cache_enable(CR_C);
309 }
310 
dcache_disable(void)311 void dcache_disable(void)
312 {
313 	cache_disable(CR_C);
314 }
315 
dcache_status(void)316 int dcache_status(void)
317 {
318 	return (get_cr() & CR_C) != 0;
319 }
320 #endif
321