1*4882a593Smuzhiyun #ifndef __ASM_SH_CACHE_H 2*4882a593Smuzhiyun #define __ASM_SH_CACHE_H 3*4882a593Smuzhiyun 4*4882a593Smuzhiyun #if defined(CONFIG_CPU_SH4) 5*4882a593Smuzhiyun 6*4882a593Smuzhiyun #define L1_CACHE_BYTES 32 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun struct __large_struct { unsigned long buf[100]; }; 9*4882a593Smuzhiyun #define __m(x) (*(struct __large_struct *)(x)) 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #else 12*4882a593Smuzhiyun 13*4882a593Smuzhiyun /* 14*4882a593Smuzhiyun * 32-bytes is the largest L1 data cache line size for SH the architecture. So 15*4882a593Smuzhiyun * it is a safe default for DMA alignment. 16*4882a593Smuzhiyun */ 17*4882a593Smuzhiyun #define ARCH_DMA_MINALIGN 32 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun #endif /* CONFIG_CPU_SH4 */ 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun /* 22*4882a593Smuzhiyun * Use the L1 data cache line size value for the minimum DMA buffer alignment 23*4882a593Smuzhiyun * on SH. 24*4882a593Smuzhiyun */ 25*4882a593Smuzhiyun #ifndef ARCH_DMA_MINALIGN 26*4882a593Smuzhiyun #define ARCH_DMA_MINALIGN L1_CACHE_BYTES 27*4882a593Smuzhiyun #endif 28*4882a593Smuzhiyun 29*4882a593Smuzhiyun #endif /* __ASM_SH_CACHE_H */ 30