xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/cache.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  arch/arm/include/asm/cache.h
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __ASMARM_CACHE_H
6*4882a593Smuzhiyun #define __ASMARM_CACHE_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define L1_CACHE_SHIFT		CONFIG_ARM_L1_CACHE_SHIFT
9*4882a593Smuzhiyun #define L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT)
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /*
12*4882a593Smuzhiyun  * Memory returned by kmalloc() may be used for DMA, so we must make
13*4882a593Smuzhiyun  * sure that all such allocations are cache aligned. Otherwise,
14*4882a593Smuzhiyun  * unrelated code may cause parts of the buffer to be read into the
15*4882a593Smuzhiyun  * cache before the transfer is done, causing old data to be seen by
16*4882a593Smuzhiyun  * the CPU.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun  * With EABI on ARMv5 and above we must have 64-bit aligned slab pointers.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun #if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
24*4882a593Smuzhiyun #define ARCH_SLAB_MINALIGN 8
25*4882a593Smuzhiyun #endif
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #define __read_mostly __section(".data..read_mostly")
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #endif
30