xref: /rk3399_rockchip-uboot/arch/arm/cpu/armv8/cache_v8.c (revision 0691484ac1efb1981dfd1b38df9646128bafff32)
10ae76531SDavid Feng /*
20ae76531SDavid Feng  * (C) Copyright 2013
30ae76531SDavid Feng  * David Feng <fenghua@phytium.com.cn>
40ae76531SDavid Feng  *
50ae76531SDavid Feng  * SPDX-License-Identifier:	GPL-2.0+
60ae76531SDavid Feng  */
70ae76531SDavid Feng 
80ae76531SDavid Feng #include <common.h>
90ae76531SDavid Feng #include <asm/system.h>
100ae76531SDavid Feng #include <asm/armv8/mmu.h>
110ae76531SDavid Feng 
120ae76531SDavid Feng DECLARE_GLOBAL_DATA_PTR;
130ae76531SDavid Feng 
140ae76531SDavid Feng #ifndef CONFIG_SYS_DCACHE_OFF
1594f7ff36SSergey Temerkhanov 
1694f7ff36SSergey Temerkhanov #ifdef CONFIG_SYS_FULL_VA
1794f7ff36SSergey Temerkhanov static void set_ptl1_entry(u64 index, u64 ptl2_entry)
1894f7ff36SSergey Temerkhanov {
1994f7ff36SSergey Temerkhanov 	u64 *pgd = (u64 *)gd->arch.tlb_addr;
2094f7ff36SSergey Temerkhanov 	u64 value;
2194f7ff36SSergey Temerkhanov 
2294f7ff36SSergey Temerkhanov 	value = ptl2_entry | PTL1_TYPE_TABLE;
2394f7ff36SSergey Temerkhanov 	pgd[index] = value;
2494f7ff36SSergey Temerkhanov }
2594f7ff36SSergey Temerkhanov 
2694f7ff36SSergey Temerkhanov static void set_ptl2_block(u64 ptl1, u64 bfn, u64 address, u64 memory_attrs)
2794f7ff36SSergey Temerkhanov {
2894f7ff36SSergey Temerkhanov 	u64 *pmd = (u64 *)ptl1;
2994f7ff36SSergey Temerkhanov 	u64 value;
3094f7ff36SSergey Temerkhanov 
3194f7ff36SSergey Temerkhanov 	value = address | PTL2_TYPE_BLOCK | PTL2_BLOCK_AF;
3294f7ff36SSergey Temerkhanov 	value |= memory_attrs;
3394f7ff36SSergey Temerkhanov 	pmd[bfn] = value;
3494f7ff36SSergey Temerkhanov }
3594f7ff36SSergey Temerkhanov 
3694f7ff36SSergey Temerkhanov static struct mm_region mem_map[] = CONFIG_SYS_MEM_MAP;
3794f7ff36SSergey Temerkhanov 
3894f7ff36SSergey Temerkhanov #define PTL1_ENTRIES CONFIG_SYS_PTL1_ENTRIES
3994f7ff36SSergey Temerkhanov #define PTL2_ENTRIES CONFIG_SYS_PTL2_ENTRIES
4094f7ff36SSergey Temerkhanov 
41*0691484aSAlexander Graf static u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
42*0691484aSAlexander Graf {
43*0691484aSAlexander Graf 	u64 max_addr = 0;
44*0691484aSAlexander Graf 	u64 ips, va_bits;
45*0691484aSAlexander Graf 	u64 tcr;
46*0691484aSAlexander Graf 	int i;
47*0691484aSAlexander Graf 
48*0691484aSAlexander Graf 	/* Find the largest address we need to support */
49*0691484aSAlexander Graf 	for (i = 0; i < ARRAY_SIZE(mem_map); i++)
50*0691484aSAlexander Graf 		max_addr = max(max_addr, mem_map[i].base + mem_map[i].size);
51*0691484aSAlexander Graf 
52*0691484aSAlexander Graf 	/* Calculate the maximum physical (and thus virtual) address */
53*0691484aSAlexander Graf 	if (max_addr > (1ULL << 44)) {
54*0691484aSAlexander Graf 		ips = 5;
55*0691484aSAlexander Graf 		va_bits = 48;
56*0691484aSAlexander Graf 	} else  if (max_addr > (1ULL << 42)) {
57*0691484aSAlexander Graf 		ips = 4;
58*0691484aSAlexander Graf 		va_bits = 44;
59*0691484aSAlexander Graf 	} else  if (max_addr > (1ULL << 40)) {
60*0691484aSAlexander Graf 		ips = 3;
61*0691484aSAlexander Graf 		va_bits = 42;
62*0691484aSAlexander Graf 	} else  if (max_addr > (1ULL << 36)) {
63*0691484aSAlexander Graf 		ips = 2;
64*0691484aSAlexander Graf 		va_bits = 40;
65*0691484aSAlexander Graf 	} else  if (max_addr > (1ULL << 32)) {
66*0691484aSAlexander Graf 		ips = 1;
67*0691484aSAlexander Graf 		va_bits = 36;
68*0691484aSAlexander Graf 	} else {
69*0691484aSAlexander Graf 		ips = 0;
70*0691484aSAlexander Graf 		va_bits = 32;
71*0691484aSAlexander Graf 	}
72*0691484aSAlexander Graf 
73*0691484aSAlexander Graf 	if (el == 1) {
74*0691484aSAlexander Graf 		tcr = TCR_EL1_RSVD | (ips << 32);
75*0691484aSAlexander Graf 	} else if (el == 2) {
76*0691484aSAlexander Graf 		tcr = TCR_EL2_RSVD | (ips << 16);
77*0691484aSAlexander Graf 	} else {
78*0691484aSAlexander Graf 		tcr = TCR_EL3_RSVD | (ips << 16);
79*0691484aSAlexander Graf 	}
80*0691484aSAlexander Graf 
81*0691484aSAlexander Graf 	/* PTWs cacheable, inner/outer WBWA and inner shareable */
82*0691484aSAlexander Graf 	tcr |= TCR_TG0_64K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
83*0691484aSAlexander Graf 	tcr |= TCR_T0SZ(VA_BITS);
84*0691484aSAlexander Graf 
85*0691484aSAlexander Graf 	if (pips)
86*0691484aSAlexander Graf 		*pips = ips;
87*0691484aSAlexander Graf 	if (pva_bits)
88*0691484aSAlexander Graf 		*pva_bits = va_bits;
89*0691484aSAlexander Graf 
90*0691484aSAlexander Graf 	return tcr;
91*0691484aSAlexander Graf }
92*0691484aSAlexander Graf 
9394f7ff36SSergey Temerkhanov static void setup_pgtables(void)
9494f7ff36SSergey Temerkhanov {
9594f7ff36SSergey Temerkhanov 	int l1_e, l2_e;
9694f7ff36SSergey Temerkhanov 	unsigned long pmd = 0;
9794f7ff36SSergey Temerkhanov 	unsigned long address;
9894f7ff36SSergey Temerkhanov 
9994f7ff36SSergey Temerkhanov 	/* Setup the PMD pointers */
10094f7ff36SSergey Temerkhanov 	for (l1_e = 0; l1_e < CONFIG_SYS_MEM_MAP_SIZE; l1_e++) {
10194f7ff36SSergey Temerkhanov 		gd->arch.pmd_addr[l1_e] = gd->arch.tlb_addr +
10294f7ff36SSergey Temerkhanov 						PTL1_ENTRIES * sizeof(u64);
10394f7ff36SSergey Temerkhanov 		gd->arch.pmd_addr[l1_e] += PTL2_ENTRIES * sizeof(u64) * l1_e;
10494f7ff36SSergey Temerkhanov 		gd->arch.pmd_addr[l1_e] = ALIGN(gd->arch.pmd_addr[l1_e],
10594f7ff36SSergey Temerkhanov 						0x10000UL);
10694f7ff36SSergey Temerkhanov 	}
10794f7ff36SSergey Temerkhanov 
10894f7ff36SSergey Temerkhanov 	/* Setup the page tables */
10994f7ff36SSergey Temerkhanov 	for (l1_e = 0; l1_e < PTL1_ENTRIES; l1_e++) {
11094f7ff36SSergey Temerkhanov 		if (mem_map[pmd].base ==
11194f7ff36SSergey Temerkhanov 			(uintptr_t)l1_e << PTL2_BITS) {
11294f7ff36SSergey Temerkhanov 			set_ptl1_entry(l1_e, gd->arch.pmd_addr[pmd]);
11394f7ff36SSergey Temerkhanov 
11494f7ff36SSergey Temerkhanov 			for (l2_e = 0; l2_e < PTL2_ENTRIES; l2_e++) {
11594f7ff36SSergey Temerkhanov 				address = mem_map[pmd].base
11694f7ff36SSergey Temerkhanov 					+ (uintptr_t)l2_e * BLOCK_SIZE;
11794f7ff36SSergey Temerkhanov 				set_ptl2_block(gd->arch.pmd_addr[pmd], l2_e,
11894f7ff36SSergey Temerkhanov 					       address, mem_map[pmd].attrs);
11994f7ff36SSergey Temerkhanov 			}
12094f7ff36SSergey Temerkhanov 
12194f7ff36SSergey Temerkhanov 			pmd++;
12294f7ff36SSergey Temerkhanov 		} else {
12394f7ff36SSergey Temerkhanov 			set_ptl1_entry(l1_e, 0);
12494f7ff36SSergey Temerkhanov 		}
12594f7ff36SSergey Temerkhanov 	}
12694f7ff36SSergey Temerkhanov }
12794f7ff36SSergey Temerkhanov 
12894f7ff36SSergey Temerkhanov #else
12994f7ff36SSergey Temerkhanov 
13099799220SAlison Wang inline void set_pgtable_section(u64 *page_table, u64 index, u64 section,
131d764129dSAlison Wang 			 u64 memory_type, u64 attribute)
1320ae76531SDavid Feng {
1330ae76531SDavid Feng 	u64 value;
1340ae76531SDavid Feng 
13522932ffcSYork Sun 	value = section | PMD_TYPE_SECT | PMD_SECT_AF;
1360ae76531SDavid Feng 	value |= PMD_ATTRINDX(memory_type);
137d764129dSAlison Wang 	value |= attribute;
13899799220SAlison Wang 	page_table[index] = value;
13999799220SAlison Wang }
14099799220SAlison Wang 
14199799220SAlison Wang inline void set_pgtable_table(u64 *page_table, u64 index, u64 *table_addr)
14299799220SAlison Wang {
14399799220SAlison Wang 	u64 value;
14499799220SAlison Wang 
14599799220SAlison Wang 	value = (u64)table_addr | PMD_TYPE_TABLE;
14622932ffcSYork Sun 	page_table[index] = value;
1470ae76531SDavid Feng }
14894f7ff36SSergey Temerkhanov #endif
1490ae76531SDavid Feng 
1500ae76531SDavid Feng /* to activate the MMU we need to set up virtual memory */
1513c6af3baSStephen Warren __weak void mmu_setup(void)
1520ae76531SDavid Feng {
15394f7ff36SSergey Temerkhanov #ifndef CONFIG_SYS_FULL_VA
1540ae76531SDavid Feng 	bd_t *bd = gd->bd;
1558b19dff5SThierry Reding 	u64 *page_table = (u64 *)gd->arch.tlb_addr, i, j;
15694f7ff36SSergey Temerkhanov #endif
1578b19dff5SThierry Reding 	int el;
1580ae76531SDavid Feng 
15994f7ff36SSergey Temerkhanov #ifdef CONFIG_SYS_FULL_VA
16094f7ff36SSergey Temerkhanov 	unsigned long coreid = read_mpidr() & CONFIG_COREID_MASK;
16194f7ff36SSergey Temerkhanov 
16294f7ff36SSergey Temerkhanov 	/* Set up page tables only on BSP */
16394f7ff36SSergey Temerkhanov 	if (coreid == BSP_COREID)
16494f7ff36SSergey Temerkhanov 		setup_pgtables();
165*0691484aSAlexander Graf 
166*0691484aSAlexander Graf 	el = current_el();
167*0691484aSAlexander Graf 	set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
168*0691484aSAlexander Graf 			  MEMORY_ATTRIBUTES);
16994f7ff36SSergey Temerkhanov #else
1700ae76531SDavid Feng 	/* Setup an identity-mapping for all spaces */
17122932ffcSYork Sun 	for (i = 0; i < (PGTABLE_SIZE >> 3); i++) {
17222932ffcSYork Sun 		set_pgtable_section(page_table, i, i << SECTION_SHIFT,
17399799220SAlison Wang 				    MT_DEVICE_NGNRNE, PMD_SECT_NON_SHARE);
17422932ffcSYork Sun 	}
1750ae76531SDavid Feng 
1760ae76531SDavid Feng 	/* Setup an identity-mapping for all RAM space */
1770ae76531SDavid Feng 	for (i = 0; i < CONFIG_NR_DRAM_BANKS; i++) {
1780ae76531SDavid Feng 		ulong start = bd->bi_dram[i].start;
1790ae76531SDavid Feng 		ulong end = bd->bi_dram[i].start + bd->bi_dram[i].size;
1800ae76531SDavid Feng 		for (j = start >> SECTION_SHIFT;
1810ae76531SDavid Feng 		     j < end >> SECTION_SHIFT; j++) {
18222932ffcSYork Sun 			set_pgtable_section(page_table, j, j << SECTION_SHIFT,
18399799220SAlison Wang 					    MT_NORMAL, PMD_SECT_NON_SHARE);
1840ae76531SDavid Feng 		}
1850ae76531SDavid Feng 	}
1860ae76531SDavid Feng 
1870ae76531SDavid Feng 	/* load TTBR0 */
1880ae76531SDavid Feng 	el = current_el();
189f5222cfdSYork Sun 	if (el == 1) {
19022932ffcSYork Sun 		set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
191ad3d6e88SThierry Reding 				  TCR_EL1_RSVD | TCR_FLAGS | TCR_EL1_IPS_BITS,
19222932ffcSYork Sun 				  MEMORY_ATTRIBUTES);
193f5222cfdSYork Sun 	} else if (el == 2) {
19422932ffcSYork Sun 		set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
195ad3d6e88SThierry Reding 				  TCR_EL2_RSVD | TCR_FLAGS | TCR_EL2_IPS_BITS,
19622932ffcSYork Sun 				  MEMORY_ATTRIBUTES);
197f5222cfdSYork Sun 	} else {
19822932ffcSYork Sun 		set_ttbr_tcr_mair(el, gd->arch.tlb_addr,
199ad3d6e88SThierry Reding 				  TCR_EL3_RSVD | TCR_FLAGS | TCR_EL3_IPS_BITS,
20022932ffcSYork Sun 				  MEMORY_ATTRIBUTES);
201f5222cfdSYork Sun 	}
202*0691484aSAlexander Graf #endif
203*0691484aSAlexander Graf 
2040ae76531SDavid Feng 	/* enable the mmu */
2050ae76531SDavid Feng 	set_sctlr(get_sctlr() | CR_M);
2060ae76531SDavid Feng }
2070ae76531SDavid Feng 
2080ae76531SDavid Feng /*
2090ae76531SDavid Feng  * Performs a invalidation of the entire data cache at all levels
2100ae76531SDavid Feng  */
2110ae76531SDavid Feng void invalidate_dcache_all(void)
2120ae76531SDavid Feng {
2131e6ad55cSYork Sun 	__asm_invalidate_dcache_all();
2140ae76531SDavid Feng }
2150ae76531SDavid Feng 
2160ae76531SDavid Feng /*
217dcd468b8SYork Sun  * Performs a clean & invalidation of the entire data cache at all levels.
218dcd468b8SYork Sun  * This function needs to be inline to avoid using stack.
219dcd468b8SYork Sun  * __asm_flush_l3_cache return status of timeout
2200ae76531SDavid Feng  */
221dcd468b8SYork Sun inline void flush_dcache_all(void)
2220ae76531SDavid Feng {
223dcd468b8SYork Sun 	int ret;
224dcd468b8SYork Sun 
2250ae76531SDavid Feng 	__asm_flush_dcache_all();
226dcd468b8SYork Sun 	ret = __asm_flush_l3_cache();
227dcd468b8SYork Sun 	if (ret)
228dcd468b8SYork Sun 		debug("flushing dcache returns 0x%x\n", ret);
229dcd468b8SYork Sun 	else
230dcd468b8SYork Sun 		debug("flushing dcache successfully.\n");
2310ae76531SDavid Feng }
2320ae76531SDavid Feng 
2330ae76531SDavid Feng /*
2340ae76531SDavid Feng  * Invalidates range in all levels of D-cache/unified cache
2350ae76531SDavid Feng  */
2360ae76531SDavid Feng void invalidate_dcache_range(unsigned long start, unsigned long stop)
2370ae76531SDavid Feng {
2380ae76531SDavid Feng 	__asm_flush_dcache_range(start, stop);
2390ae76531SDavid Feng }
2400ae76531SDavid Feng 
2410ae76531SDavid Feng /*
2420ae76531SDavid Feng  * Flush range(clean & invalidate) from all levels of D-cache/unified cache
2430ae76531SDavid Feng  */
2440ae76531SDavid Feng void flush_dcache_range(unsigned long start, unsigned long stop)
2450ae76531SDavid Feng {
2460ae76531SDavid Feng 	__asm_flush_dcache_range(start, stop);
2470ae76531SDavid Feng }
2480ae76531SDavid Feng 
2490ae76531SDavid Feng void dcache_enable(void)
2500ae76531SDavid Feng {
2510ae76531SDavid Feng 	/* The data cache is not active unless the mmu is enabled */
2520ae76531SDavid Feng 	if (!(get_sctlr() & CR_M)) {
2530ae76531SDavid Feng 		invalidate_dcache_all();
2540ae76531SDavid Feng 		__asm_invalidate_tlb_all();
2550ae76531SDavid Feng 		mmu_setup();
2560ae76531SDavid Feng 	}
2570ae76531SDavid Feng 
2580ae76531SDavid Feng 	set_sctlr(get_sctlr() | CR_C);
2590ae76531SDavid Feng }
2600ae76531SDavid Feng 
2610ae76531SDavid Feng void dcache_disable(void)
2620ae76531SDavid Feng {
2630ae76531SDavid Feng 	uint32_t sctlr;
2640ae76531SDavid Feng 
2650ae76531SDavid Feng 	sctlr = get_sctlr();
2660ae76531SDavid Feng 
2670ae76531SDavid Feng 	/* if cache isn't enabled no need to disable */
2680ae76531SDavid Feng 	if (!(sctlr & CR_C))
2690ae76531SDavid Feng 		return;
2700ae76531SDavid Feng 
2710ae76531SDavid Feng 	set_sctlr(sctlr & ~(CR_C|CR_M));
2720ae76531SDavid Feng 
2730ae76531SDavid Feng 	flush_dcache_all();
2740ae76531SDavid Feng 	__asm_invalidate_tlb_all();
2750ae76531SDavid Feng }
2760ae76531SDavid Feng 
2770ae76531SDavid Feng int dcache_status(void)
2780ae76531SDavid Feng {
2790ae76531SDavid Feng 	return (get_sctlr() & CR_C) != 0;
2800ae76531SDavid Feng }
2810ae76531SDavid Feng 
282dad17fd5SSiva Durga Prasad Paladugu u64 *__weak arch_get_page_table(void) {
283dad17fd5SSiva Durga Prasad Paladugu 	puts("No page table offset defined\n");
284dad17fd5SSiva Durga Prasad Paladugu 
285dad17fd5SSiva Durga Prasad Paladugu 	return NULL;
286dad17fd5SSiva Durga Prasad Paladugu }
287dad17fd5SSiva Durga Prasad Paladugu 
28894f7ff36SSergey Temerkhanov #ifndef CONFIG_SYS_FULL_VA
289dad17fd5SSiva Durga Prasad Paladugu void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
290dad17fd5SSiva Durga Prasad Paladugu 				     enum dcache_option option)
291dad17fd5SSiva Durga Prasad Paladugu {
292dad17fd5SSiva Durga Prasad Paladugu 	u64 *page_table = arch_get_page_table();
293dad17fd5SSiva Durga Prasad Paladugu 	u64 upto, end;
294dad17fd5SSiva Durga Prasad Paladugu 
295dad17fd5SSiva Durga Prasad Paladugu 	if (page_table == NULL)
296dad17fd5SSiva Durga Prasad Paladugu 		return;
297dad17fd5SSiva Durga Prasad Paladugu 
298dad17fd5SSiva Durga Prasad Paladugu 	end = ALIGN(start + size, (1 << MMU_SECTION_SHIFT)) >>
299dad17fd5SSiva Durga Prasad Paladugu 	      MMU_SECTION_SHIFT;
300dad17fd5SSiva Durga Prasad Paladugu 	start = start >> MMU_SECTION_SHIFT;
301dad17fd5SSiva Durga Prasad Paladugu 	for (upto = start; upto < end; upto++) {
302dad17fd5SSiva Durga Prasad Paladugu 		page_table[upto] &= ~PMD_ATTRINDX_MASK;
303dad17fd5SSiva Durga Prasad Paladugu 		page_table[upto] |= PMD_ATTRINDX(option);
304dad17fd5SSiva Durga Prasad Paladugu 	}
305dad17fd5SSiva Durga Prasad Paladugu 	asm volatile("dsb sy");
306dad17fd5SSiva Durga Prasad Paladugu 	__asm_invalidate_tlb_all();
307dad17fd5SSiva Durga Prasad Paladugu 	asm volatile("dsb sy");
308dad17fd5SSiva Durga Prasad Paladugu 	asm volatile("isb");
309dad17fd5SSiva Durga Prasad Paladugu 	start = start << MMU_SECTION_SHIFT;
310dad17fd5SSiva Durga Prasad Paladugu 	end = end << MMU_SECTION_SHIFT;
311dad17fd5SSiva Durga Prasad Paladugu 	flush_dcache_range(start, end);
312dad17fd5SSiva Durga Prasad Paladugu 	asm volatile("dsb sy");
313dad17fd5SSiva Durga Prasad Paladugu }
31494f7ff36SSergey Temerkhanov #endif
31594f7ff36SSergey Temerkhanov 
3160ae76531SDavid Feng #else	/* CONFIG_SYS_DCACHE_OFF */
3170ae76531SDavid Feng 
3180ae76531SDavid Feng void invalidate_dcache_all(void)
3190ae76531SDavid Feng {
3200ae76531SDavid Feng }
3210ae76531SDavid Feng 
3220ae76531SDavid Feng void flush_dcache_all(void)
3230ae76531SDavid Feng {
3240ae76531SDavid Feng }
3250ae76531SDavid Feng 
3260ae76531SDavid Feng void dcache_enable(void)
3270ae76531SDavid Feng {
3280ae76531SDavid Feng }
3290ae76531SDavid Feng 
3300ae76531SDavid Feng void dcache_disable(void)
3310ae76531SDavid Feng {
3320ae76531SDavid Feng }
3330ae76531SDavid Feng 
3340ae76531SDavid Feng int dcache_status(void)
3350ae76531SDavid Feng {
3360ae76531SDavid Feng 	return 0;
3370ae76531SDavid Feng }
3380ae76531SDavid Feng 
339dad17fd5SSiva Durga Prasad Paladugu void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
340dad17fd5SSiva Durga Prasad Paladugu 				     enum dcache_option option)
341dad17fd5SSiva Durga Prasad Paladugu {
342dad17fd5SSiva Durga Prasad Paladugu }
343dad17fd5SSiva Durga Prasad Paladugu 
3440ae76531SDavid Feng #endif	/* CONFIG_SYS_DCACHE_OFF */
3450ae76531SDavid Feng 
3460ae76531SDavid Feng #ifndef CONFIG_SYS_ICACHE_OFF
3470ae76531SDavid Feng 
3480ae76531SDavid Feng void icache_enable(void)
3490ae76531SDavid Feng {
3501e6ad55cSYork Sun 	__asm_invalidate_icache_all();
3510ae76531SDavid Feng 	set_sctlr(get_sctlr() | CR_I);
3520ae76531SDavid Feng }
3530ae76531SDavid Feng 
3540ae76531SDavid Feng void icache_disable(void)
3550ae76531SDavid Feng {
3560ae76531SDavid Feng 	set_sctlr(get_sctlr() & ~CR_I);
3570ae76531SDavid Feng }
3580ae76531SDavid Feng 
3590ae76531SDavid Feng int icache_status(void)
3600ae76531SDavid Feng {
3610ae76531SDavid Feng 	return (get_sctlr() & CR_I) != 0;
3620ae76531SDavid Feng }
3630ae76531SDavid Feng 
3640ae76531SDavid Feng void invalidate_icache_all(void)
3650ae76531SDavid Feng {
3660ae76531SDavid Feng 	__asm_invalidate_icache_all();
3670ae76531SDavid Feng }
3680ae76531SDavid Feng 
3690ae76531SDavid Feng #else	/* CONFIG_SYS_ICACHE_OFF */
3700ae76531SDavid Feng 
3710ae76531SDavid Feng void icache_enable(void)
3720ae76531SDavid Feng {
3730ae76531SDavid Feng }
3740ae76531SDavid Feng 
3750ae76531SDavid Feng void icache_disable(void)
3760ae76531SDavid Feng {
3770ae76531SDavid Feng }
3780ae76531SDavid Feng 
3790ae76531SDavid Feng int icache_status(void)
3800ae76531SDavid Feng {
3810ae76531SDavid Feng 	return 0;
3820ae76531SDavid Feng }
3830ae76531SDavid Feng 
3840ae76531SDavid Feng void invalidate_icache_all(void)
3850ae76531SDavid Feng {
3860ae76531SDavid Feng }
3870ae76531SDavid Feng 
3880ae76531SDavid Feng #endif	/* CONFIG_SYS_ICACHE_OFF */
3890ae76531SDavid Feng 
3900ae76531SDavid Feng /*
3910ae76531SDavid Feng  * Enable dCache & iCache, whether cache is actually enabled
3920ae76531SDavid Feng  * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
3930ae76531SDavid Feng  */
3942f78eae5SYork Sun void __weak enable_caches(void)
3950ae76531SDavid Feng {
3960ae76531SDavid Feng 	icache_enable();
3970ae76531SDavid Feng 	dcache_enable();
3980ae76531SDavid Feng }
399