xref: /OK3568_Linux_fs/u-boot/arch/powerpc/include/asm/cache.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * include/asm-ppc/cache.h
3*4882a593Smuzhiyun  */
4*4882a593Smuzhiyun #ifndef __ARCH_PPC_CACHE_H
5*4882a593Smuzhiyun #define __ARCH_PPC_CACHE_H
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <asm/processor.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /* bytes per L1 cache line */
10*4882a593Smuzhiyun #if defined(CONFIG_MPC8xx)
11*4882a593Smuzhiyun #define	L1_CACHE_SHIFT	4
12*4882a593Smuzhiyun #elif defined(CONFIG_PPC64BRIDGE)
13*4882a593Smuzhiyun #define L1_CACHE_SHIFT	7
14*4882a593Smuzhiyun #elif defined(CONFIG_E500MC)
15*4882a593Smuzhiyun #define L1_CACHE_SHIFT	6
16*4882a593Smuzhiyun #else
17*4882a593Smuzhiyun #define	L1_CACHE_SHIFT	5
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define L1_CACHE_BYTES          (1 << L1_CACHE_SHIFT)
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * Use the L1 data cache line size value for the minimum DMA buffer alignment
24*4882a593Smuzhiyun  * on PowerPC.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun #define ARCH_DMA_MINALIGN	L1_CACHE_BYTES
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*
29*4882a593Smuzhiyun  * For compatibility reasons support the CONFIG_SYS_CACHELINE_SIZE too
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun #ifndef CONFIG_SYS_CACHELINE_SIZE
32*4882a593Smuzhiyun #define CONFIG_SYS_CACHELINE_SIZE	L1_CACHE_BYTES
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define	L1_CACHE_ALIGN(x)       (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
36*4882a593Smuzhiyun #define	L1_CACHE_PAGES		8
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define	SMP_CACHE_BYTES L1_CACHE_BYTES
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #ifdef MODULE
41*4882a593Smuzhiyun #define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
42*4882a593Smuzhiyun #else
43*4882a593Smuzhiyun #define __cacheline_aligned					\
44*4882a593Smuzhiyun   __attribute__((__aligned__(L1_CACHE_BYTES),			\
45*4882a593Smuzhiyun 		 __section__(".data.cacheline_aligned")))
46*4882a593Smuzhiyun #endif
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
49*4882a593Smuzhiyun extern void flush_dcache_range(unsigned long start, unsigned long stop);
50*4882a593Smuzhiyun extern void clean_dcache_range(unsigned long start, unsigned long stop);
51*4882a593Smuzhiyun extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
52*4882a593Smuzhiyun extern void flush_dcache(void);
53*4882a593Smuzhiyun extern void invalidate_dcache(void);
54*4882a593Smuzhiyun extern void invalidate_icache(void);
55*4882a593Smuzhiyun #ifdef CONFIG_SYS_INIT_RAM_LOCK
56*4882a593Smuzhiyun extern void unlock_ram_in_cache(void);
57*4882a593Smuzhiyun #endif /* CONFIG_SYS_INIT_RAM_LOCK */
58*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
61*4882a593Smuzhiyun int l2cache_init(void);
62*4882a593Smuzhiyun void enable_cpc(void);
63*4882a593Smuzhiyun void disable_cpc_sram(void);
64*4882a593Smuzhiyun #endif
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* prep registers for L2 */
67*4882a593Smuzhiyun #define CACHECRBA       0x80000823      /* Cache configuration register address */
68*4882a593Smuzhiyun #define L2CACHE_MASK	0x03	/* Mask for 2 L2 Cache bits */
69*4882a593Smuzhiyun #define L2CACHE_512KB	0x00	/* 512KB */
70*4882a593Smuzhiyun #define L2CACHE_256KB	0x01	/* 256KB */
71*4882a593Smuzhiyun #define L2CACHE_1MB	0x02	/* 1MB */
72*4882a593Smuzhiyun #define L2CACHE_NONE	0x03	/* NONE */
73*4882a593Smuzhiyun #define L2CACHE_PARITY  0x08    /* Mask for L2 Cache Parity Protected bit */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #ifdef CONFIG_MPC8xx
76*4882a593Smuzhiyun /* Cache control on the MPC8xx is provided through some additional
77*4882a593Smuzhiyun  * special purpose registers.
78*4882a593Smuzhiyun  */
79*4882a593Smuzhiyun #define IC_CST		560	/* Instruction cache control/status */
80*4882a593Smuzhiyun #define IC_ADR		561	/* Address needed for some commands */
81*4882a593Smuzhiyun #define IC_DAT		562	/* Read-only data register */
82*4882a593Smuzhiyun #define DC_CST		568	/* Data cache control/status */
83*4882a593Smuzhiyun #define DC_ADR		569	/* Address needed for some commands */
84*4882a593Smuzhiyun #define DC_DAT		570	/* Read-only data register */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /* Commands.  Only the first few are available to the instruction cache.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun #define	IDC_ENABLE	0x02000000	/* Cache enable */
89*4882a593Smuzhiyun #define IDC_DISABLE	0x04000000	/* Cache disable */
90*4882a593Smuzhiyun #define IDC_LDLCK	0x06000000	/* Load and lock */
91*4882a593Smuzhiyun #define IDC_UNLINE	0x08000000	/* Unlock line */
92*4882a593Smuzhiyun #define IDC_UNALL	0x0a000000	/* Unlock all */
93*4882a593Smuzhiyun #define IDC_INVALL	0x0c000000	/* Invalidate all */
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define DC_FLINE	0x0e000000	/* Flush data cache line */
96*4882a593Smuzhiyun #define DC_SFWT		0x01000000	/* Set forced writethrough mode */
97*4882a593Smuzhiyun #define DC_CFWT		0x03000000	/* Clear forced writethrough mode */
98*4882a593Smuzhiyun #define DC_SLES		0x05000000	/* Set little endian swap mode */
99*4882a593Smuzhiyun #define DC_CLES		0x07000000	/* Clear little endian swap mode */
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /* Status.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun #define IDC_ENABLED	0x80000000	/* Cache is enabled */
104*4882a593Smuzhiyun #define IDC_CERR1	0x00200000	/* Cache error 1 */
105*4882a593Smuzhiyun #define IDC_CERR2	0x00100000	/* Cache error 2 */
106*4882a593Smuzhiyun #define IDC_CERR3	0x00080000	/* Cache error 3 */
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun #define DC_DFWT		0x40000000	/* Data cache is forced write through */
109*4882a593Smuzhiyun #define DC_LES		0x20000000	/* Caches are little endian mode */
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #if !defined(__ASSEMBLY__)
rd_ic_cst(void)112*4882a593Smuzhiyun static inline uint rd_ic_cst(void)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	return mfspr(IC_CST);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
wr_ic_cst(uint val)117*4882a593Smuzhiyun static inline void wr_ic_cst(uint val)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	mtspr(IC_CST, val);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
wr_ic_adr(uint val)122*4882a593Smuzhiyun static inline void wr_ic_adr(uint val)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	mtspr(IC_ADR, val);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
rd_dc_cst(void)127*4882a593Smuzhiyun static inline uint rd_dc_cst(void)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	return mfspr(DC_CST);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
wr_dc_cst(uint val)132*4882a593Smuzhiyun static inline void wr_dc_cst(uint val)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	mtspr(DC_CST, val);
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
wr_dc_adr(uint val)137*4882a593Smuzhiyun static inline void wr_dc_adr(uint val)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	mtspr(DC_ADR, val);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun #endif
142*4882a593Smuzhiyun #endif /* CONFIG_MPC8xx */
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #endif
145