1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * (C) Copyright 2008 Texas Insturments 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * (C) Copyright 2002 5*4882a593Smuzhiyun * Sysgo Real-Time Solutions, GmbH <www.elinos.com> 6*4882a593Smuzhiyun * Marius Groeger <mgroeger@sysgo.de> 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * (C) Copyright 2002 9*4882a593Smuzhiyun * Gary Jennejohn, DENX Software Engineering, <garyj@denx.de> 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0+ 12*4882a593Smuzhiyun */ 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * CPU specific code 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun 18*4882a593Smuzhiyun #include <common.h> 19*4882a593Smuzhiyun #include <command.h> 20*4882a593Smuzhiyun #include <asm/system.h> 21*4882a593Smuzhiyun #include <asm/cache.h> 22*4882a593Smuzhiyun #include <asm/armv7.h> 23*4882a593Smuzhiyun #include <linux/compiler.h> 24*4882a593Smuzhiyun cpu_cache_initialization(void)25*4882a593Smuzhiyunvoid __weak cpu_cache_initialization(void){} 26*4882a593Smuzhiyun cleanup_before_linux_select(int flags)27*4882a593Smuzhiyunint cleanup_before_linux_select(int flags) 28*4882a593Smuzhiyun { 29*4882a593Smuzhiyun /* 30*4882a593Smuzhiyun * this function is called just before we call linux 31*4882a593Smuzhiyun * it prepares the processor for linux 32*4882a593Smuzhiyun * 33*4882a593Smuzhiyun * we turn off caches etc ... 34*4882a593Smuzhiyun */ 35*4882a593Smuzhiyun #ifndef CONFIG_SPL_BUILD 36*4882a593Smuzhiyun disable_interrupts(); 37*4882a593Smuzhiyun #endif 38*4882a593Smuzhiyun disable_async_abort(); 39*4882a593Smuzhiyun 40*4882a593Smuzhiyun if (flags & CBL_DISABLE_CACHES) { 41*4882a593Smuzhiyun /* 42*4882a593Smuzhiyun * turn off D-cache 43*4882a593Smuzhiyun * dcache_disable() in turn flushes the d-cache and disables MMU 44*4882a593Smuzhiyun */ 45*4882a593Smuzhiyun dcache_disable(); 46*4882a593Smuzhiyun v7_outer_cache_disable(); 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun /* 49*4882a593Smuzhiyun * After D-cache is flushed and before it is disabled there may 50*4882a593Smuzhiyun * be some new valid entries brought into the cache. We are 51*4882a593Smuzhiyun * sure that these lines are not dirty and will not affect our 52*4882a593Smuzhiyun * execution. (because unwinding the call-stack and setting a 53*4882a593Smuzhiyun * bit in CP15 SCTRL is all we did during this. We have not 54*4882a593Smuzhiyun * pushed anything on to the stack. Neither have we affected 55*4882a593Smuzhiyun * any static data) So just invalidate the entire d-cache again 56*4882a593Smuzhiyun * to avoid coherency problems for kernel 57*4882a593Smuzhiyun */ 58*4882a593Smuzhiyun invalidate_dcache_all(); 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun icache_disable(); 61*4882a593Smuzhiyun invalidate_icache_all(); 62*4882a593Smuzhiyun } else { 63*4882a593Smuzhiyun /* 64*4882a593Smuzhiyun * Turn off I-cache and invalidate it 65*4882a593Smuzhiyun */ 66*4882a593Smuzhiyun icache_disable(); 67*4882a593Smuzhiyun invalidate_icache_all(); 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun flush_dcache_all(); 70*4882a593Smuzhiyun invalidate_icache_all(); 71*4882a593Smuzhiyun icache_enable(); 72*4882a593Smuzhiyun } 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun /* 75*4882a593Smuzhiyun * Some CPU need more cache attention before starting the kernel. 76*4882a593Smuzhiyun */ 77*4882a593Smuzhiyun cpu_cache_initialization(); 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun return 0; 80*4882a593Smuzhiyun } 81*4882a593Smuzhiyun cleanup_before_linux(void)82*4882a593Smuzhiyunint cleanup_before_linux(void) 83*4882a593Smuzhiyun { 84*4882a593Smuzhiyun return cleanup_before_linux_select(CBL_ALL); 85*4882a593Smuzhiyun } 86