xref: /OK3568_Linux_fs/u-boot/arch/arm/lib/cache-cp15.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * (C) Copyright 2002
3*4882a593Smuzhiyun  * Wolfgang Denk, DENX Software Engineering, wd@denx.de.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0+
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <common.h>
9*4882a593Smuzhiyun #include <asm/system.h>
10*4882a593Smuzhiyun #include <asm/cache.h>
11*4882a593Smuzhiyun #include <linux/compiler.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #if !(defined(CONFIG_SYS_ICACHE_OFF) && defined(CONFIG_SYS_DCACHE_OFF))
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun DECLARE_GLOBAL_DATA_PTR;
16*4882a593Smuzhiyun 
arm_init_before_mmu(void)17*4882a593Smuzhiyun __weak void arm_init_before_mmu(void)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun }
20*4882a593Smuzhiyun 
arm_init_domains(void)21*4882a593Smuzhiyun __weak void arm_init_domains(void)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun 
set_section_dcache(int section,enum dcache_option option)25*4882a593Smuzhiyun void set_section_dcache(int section, enum dcache_option option)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun #ifdef CONFIG_ARMV7_LPAE
28*4882a593Smuzhiyun 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
29*4882a593Smuzhiyun 	/* Need to set the access flag to not fault */
30*4882a593Smuzhiyun 	u64 value = TTB_SECT_AP | TTB_SECT_AF;
31*4882a593Smuzhiyun #else
32*4882a593Smuzhiyun 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
33*4882a593Smuzhiyun 	u32 value = TTB_SECT_AP;
34*4882a593Smuzhiyun #endif
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	/* Add the page offset */
37*4882a593Smuzhiyun 	value |= ((u32)section << MMU_SECTION_SHIFT);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	/* Add caching bits */
40*4882a593Smuzhiyun 	value |= option;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	/* Set PTE */
43*4882a593Smuzhiyun 	page_table[section] = value;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
mmu_page_table_flush(unsigned long start,unsigned long stop)46*4882a593Smuzhiyun __weak void mmu_page_table_flush(unsigned long start, unsigned long stop)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	debug("%s: Warning: not implemented\n", __func__);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
mmu_set_region_dcache_behaviour(phys_addr_t start,size_t size,enum dcache_option option)51*4882a593Smuzhiyun void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
52*4882a593Smuzhiyun 				     enum dcache_option option)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun #ifdef CONFIG_ARMV7_LPAE
55*4882a593Smuzhiyun 	u64 *page_table = (u64 *)gd->arch.tlb_addr;
56*4882a593Smuzhiyun #else
57*4882a593Smuzhiyun 	u32 *page_table = (u32 *)gd->arch.tlb_addr;
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun 	unsigned long startpt, stoppt;
60*4882a593Smuzhiyun 	unsigned long upto, end;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	end = ALIGN(start + size, MMU_SECTION_SIZE) >> MMU_SECTION_SHIFT;
63*4882a593Smuzhiyun 	start = start >> MMU_SECTION_SHIFT;
64*4882a593Smuzhiyun #ifdef CONFIG_ARMV7_LPAE
65*4882a593Smuzhiyun 	debug("%s: start=%pa, size=%zu, option=%llx\n", __func__, &start, size,
66*4882a593Smuzhiyun 	      option);
67*4882a593Smuzhiyun #else
68*4882a593Smuzhiyun 	debug("%s: start=%pa, size=%zu, option=0x%x\n", __func__, &start, size,
69*4882a593Smuzhiyun 	      option);
70*4882a593Smuzhiyun #endif
71*4882a593Smuzhiyun 	for (upto = start; upto < end; upto++)
72*4882a593Smuzhiyun 		set_section_dcache(upto, option);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	/*
75*4882a593Smuzhiyun 	 * Make sure range is cache line aligned
76*4882a593Smuzhiyun 	 * Only CPU maintains page tables, hence it is safe to always
77*4882a593Smuzhiyun 	 * flush complete cache lines...
78*4882a593Smuzhiyun 	 */
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	startpt = (unsigned long)&page_table[start];
81*4882a593Smuzhiyun 	startpt &= ~(CONFIG_SYS_CACHELINE_SIZE - 1);
82*4882a593Smuzhiyun 	stoppt = (unsigned long)&page_table[end];
83*4882a593Smuzhiyun 	stoppt = ALIGN(stoppt, CONFIG_SYS_CACHELINE_SIZE);
84*4882a593Smuzhiyun 	mmu_page_table_flush(startpt, stoppt);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
dram_bank_mmu_setup(int bank)87*4882a593Smuzhiyun __weak void dram_bank_mmu_setup(int bank)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	bd_t *bd = gd->bd;
90*4882a593Smuzhiyun 	int	i;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	debug("%s: bank: %d\n", __func__, bank);
93*4882a593Smuzhiyun 	for (i = bd->bi_dram[bank].start >> MMU_SECTION_SHIFT;
94*4882a593Smuzhiyun 	     i < (bd->bi_dram[bank].start >> MMU_SECTION_SHIFT) +
95*4882a593Smuzhiyun 		 (bd->bi_dram[bank].size >> MMU_SECTION_SHIFT);
96*4882a593Smuzhiyun 	     i++) {
97*4882a593Smuzhiyun #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
98*4882a593Smuzhiyun 		set_section_dcache(i, DCACHE_WRITETHROUGH);
99*4882a593Smuzhiyun #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
100*4882a593Smuzhiyun 		set_section_dcache(i, DCACHE_WRITEALLOC);
101*4882a593Smuzhiyun #else
102*4882a593Smuzhiyun 		set_section_dcache(i, DCACHE_WRITEBACK);
103*4882a593Smuzhiyun #endif
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* to activate the MMU we need to set up virtual memory: use 1M areas */
mmu_setup(void)108*4882a593Smuzhiyun static inline void mmu_setup(void)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	int i, end;
111*4882a593Smuzhiyun 	u32 reg;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD
114*4882a593Smuzhiyun 	/* bootrom and ddr didn't initial dcache,
115*4882a593Smuzhiyun 	 * skip this to save boot time.
116*4882a593Smuzhiyun 	 */
117*4882a593Smuzhiyun 	arm_init_before_mmu();
118*4882a593Smuzhiyun #endif
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	/*
121*4882a593Smuzhiyun 	 * SPL thunder-boot:
122*4882a593Smuzhiyun 	 * only map periph device region to save boot time.
123*4882a593Smuzhiyun 	 */
124*4882a593Smuzhiyun #if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_KERNEL_BOOT) && \
125*4882a593Smuzhiyun     defined(CONFIG_PERIPH_DEVICE_START_ADDR)
126*4882a593Smuzhiyun 	i = CONFIG_PERIPH_DEVICE_START_ADDR >> MMU_SECTION_SHIFT;
127*4882a593Smuzhiyun 	end = CONFIG_PERIPH_DEVICE_END_ADDR >> MMU_SECTION_SHIFT;
128*4882a593Smuzhiyun #else
129*4882a593Smuzhiyun 	i = 0;
130*4882a593Smuzhiyun 	end = (4096ULL * 1024 * 1024) >> MMU_SECTION_SHIFT;
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun 	/* Set up an identity-mapping for all 4GB, rw for everyone */
133*4882a593Smuzhiyun 	for (; i < end; i++)
134*4882a593Smuzhiyun 		set_section_dcache(i, DCACHE_OFF);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
137*4882a593Smuzhiyun 		dram_bank_mmu_setup(i);
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #if defined(CONFIG_ARMV7_LPAE) && __LINUX_ARM_ARCH__ != 4
141*4882a593Smuzhiyun 	/* Set up 4 PTE entries pointing to our 4 1GB page tables */
142*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
143*4882a593Smuzhiyun 		u64 *page_table = (u64 *)(gd->arch.tlb_addr + (4096 * 4));
144*4882a593Smuzhiyun 		u64 tpt = gd->arch.tlb_addr + (4096 * i);
145*4882a593Smuzhiyun 		page_table[i] = tpt | TTB_PAGETABLE;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	reg = TTBCR_EAE;
149*4882a593Smuzhiyun #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
150*4882a593Smuzhiyun 	reg |= TTBCR_ORGN0_WT | TTBCR_IRGN0_WT;
151*4882a593Smuzhiyun #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
152*4882a593Smuzhiyun 	reg |= TTBCR_ORGN0_WBWA | TTBCR_IRGN0_WBWA;
153*4882a593Smuzhiyun #else
154*4882a593Smuzhiyun 	reg |= TTBCR_ORGN0_WBNWA | TTBCR_IRGN0_WBNWA;
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	if (is_hyp()) {
158*4882a593Smuzhiyun 		/* Set HTCR to enable LPAE */
159*4882a593Smuzhiyun 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
160*4882a593Smuzhiyun 			: : "r" (reg) : "memory");
161*4882a593Smuzhiyun 		/* Set HTTBR0 */
162*4882a593Smuzhiyun 		asm volatile("mcrr p15, 4, %0, %1, c2"
163*4882a593Smuzhiyun 			:
164*4882a593Smuzhiyun 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
165*4882a593Smuzhiyun 			: "memory");
166*4882a593Smuzhiyun 		/* Set HMAIR */
167*4882a593Smuzhiyun 		asm volatile("mcr p15, 4, %0, c10, c2, 0"
168*4882a593Smuzhiyun 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
169*4882a593Smuzhiyun 	} else {
170*4882a593Smuzhiyun 		/* Set TTBCR to enable LPAE */
171*4882a593Smuzhiyun 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
172*4882a593Smuzhiyun 			: : "r" (reg) : "memory");
173*4882a593Smuzhiyun 		/* Set 64-bit TTBR0 */
174*4882a593Smuzhiyun 		asm volatile("mcrr p15, 0, %0, %1, c2"
175*4882a593Smuzhiyun 			:
176*4882a593Smuzhiyun 			: "r"(gd->arch.tlb_addr + (4096 * 4)), "r"(0)
177*4882a593Smuzhiyun 			: "memory");
178*4882a593Smuzhiyun 		/* Set MAIR */
179*4882a593Smuzhiyun 		asm volatile("mcr p15, 0, %0, c10, c2, 0"
180*4882a593Smuzhiyun 			: : "r" (MEMORY_ATTRIBUTES) : "memory");
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun #elif defined(CONFIG_CPU_V7)
183*4882a593Smuzhiyun 	if (is_hyp()) {
184*4882a593Smuzhiyun 		/* Set HTCR to disable LPAE */
185*4882a593Smuzhiyun 		asm volatile("mcr p15, 4, %0, c2, c0, 2"
186*4882a593Smuzhiyun 			: : "r" (0) : "memory");
187*4882a593Smuzhiyun 	} else {
188*4882a593Smuzhiyun 		/* Set TTBCR to disable LPAE */
189*4882a593Smuzhiyun 		asm volatile("mcr p15, 0, %0, c2, c0, 2"
190*4882a593Smuzhiyun 			: : "r" (0) : "memory");
191*4882a593Smuzhiyun 	}
192*4882a593Smuzhiyun 	/* Set TTBR0 */
193*4882a593Smuzhiyun 	reg = gd->arch.tlb_addr & TTBR0_BASE_ADDR_MASK;
194*4882a593Smuzhiyun #if defined(CONFIG_SYS_ARM_CACHE_WRITETHROUGH)
195*4882a593Smuzhiyun 	reg |= TTBR0_RGN_WT | TTBR0_IRGN_WT;
196*4882a593Smuzhiyun #elif defined(CONFIG_SYS_ARM_CACHE_WRITEALLOC)
197*4882a593Smuzhiyun 	reg |= TTBR0_RGN_WBWA | TTBR0_IRGN_WBWA;
198*4882a593Smuzhiyun #else
199*4882a593Smuzhiyun 	reg |= TTBR0_RGN_WB | TTBR0_IRGN_WB;
200*4882a593Smuzhiyun #endif
201*4882a593Smuzhiyun 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
202*4882a593Smuzhiyun 		     : : "r" (reg) : "memory");
203*4882a593Smuzhiyun #else
204*4882a593Smuzhiyun 	/* Copy the page table address to cp15 */
205*4882a593Smuzhiyun 	asm volatile("mcr p15, 0, %0, c2, c0, 0"
206*4882a593Smuzhiyun 		     : : "r" (gd->arch.tlb_addr) : "memory");
207*4882a593Smuzhiyun #endif
208*4882a593Smuzhiyun 	/* Set the access control to all-supervisor */
209*4882a593Smuzhiyun 	asm volatile("mcr p15, 0, %0, c3, c0, 0"
210*4882a593Smuzhiyun 		     : : "r" (~0));
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	arm_init_domains();
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* and enable the mmu */
215*4882a593Smuzhiyun 	reg = get_cr();	/* get control reg. */
216*4882a593Smuzhiyun 	set_cr(reg | CR_M);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
mmu_enabled(void)219*4882a593Smuzhiyun static int mmu_enabled(void)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	return get_cr() & CR_M;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun /* cache_bit must be either CR_I or CR_C */
cache_enable(uint32_t cache_bit)225*4882a593Smuzhiyun static void cache_enable(uint32_t cache_bit)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun 	uint32_t reg;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* The data cache is not active unless the mmu is enabled too */
230*4882a593Smuzhiyun 	if ((cache_bit == CR_C) && !mmu_enabled())
231*4882a593Smuzhiyun 		mmu_setup();
232*4882a593Smuzhiyun 	reg = get_cr();	/* get control reg. */
233*4882a593Smuzhiyun 	set_cr(reg | cache_bit);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun /* cache_bit must be either CR_I or CR_C */
cache_disable(uint32_t cache_bit)237*4882a593Smuzhiyun static void cache_disable(uint32_t cache_bit)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun 	uint32_t reg;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	reg = get_cr();
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	if (cache_bit == CR_C) {
244*4882a593Smuzhiyun 		/* if cache isn;t enabled no need to disable */
245*4882a593Smuzhiyun 		if ((reg & CR_C) != CR_C)
246*4882a593Smuzhiyun 			return;
247*4882a593Smuzhiyun 		/* if disabling data cache, disable mmu too */
248*4882a593Smuzhiyun 		cache_bit |= CR_M;
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 	reg = get_cr();
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (cache_bit == (CR_C | CR_M))
253*4882a593Smuzhiyun 		flush_dcache_all();
254*4882a593Smuzhiyun 	set_cr(reg & ~cache_bit);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun #endif
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun #ifdef CONFIG_SYS_ICACHE_OFF
icache_enable(void)259*4882a593Smuzhiyun void icache_enable (void)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	return;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
icache_disable(void)264*4882a593Smuzhiyun void icache_disable (void)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
icache_status(void)269*4882a593Smuzhiyun int icache_status (void)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return 0;					/* always off */
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun #else
icache_enable(void)274*4882a593Smuzhiyun void icache_enable(void)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	cache_enable(CR_I);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
icache_disable(void)279*4882a593Smuzhiyun void icache_disable(void)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	cache_disable(CR_I);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
icache_status(void)284*4882a593Smuzhiyun int icache_status(void)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	return (get_cr() & CR_I) != 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun #endif
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun #ifdef CONFIG_SYS_DCACHE_OFF
dcache_enable(void)291*4882a593Smuzhiyun void dcache_enable (void)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun 	return;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
dcache_disable(void)296*4882a593Smuzhiyun void dcache_disable (void)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun 	return;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
dcache_status(void)301*4882a593Smuzhiyun int dcache_status (void)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	return 0;					/* always off */
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun #else
dcache_enable(void)306*4882a593Smuzhiyun void dcache_enable(void)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	cache_enable(CR_C);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
dcache_disable(void)311*4882a593Smuzhiyun void dcache_disable(void)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	cache_disable(CR_C);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
dcache_status(void)316*4882a593Smuzhiyun int dcache_status(void)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	return (get_cr() & CR_C) != 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun #endif
321