xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/mmu.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_MMU_H_
3*4882a593Smuzhiyun #define _ASM_POWERPC_MMU_H_
4*4882a593Smuzhiyun #ifdef __KERNEL__
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/types.h>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <asm/asm-const.h>
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun  * MMU features bit definitions
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * MMU families
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun #define MMU_FTR_HPTE_TABLE		ASM_CONST(0x00000001)
18*4882a593Smuzhiyun #define MMU_FTR_TYPE_8xx		ASM_CONST(0x00000002)
19*4882a593Smuzhiyun #define MMU_FTR_TYPE_40x		ASM_CONST(0x00000004)
20*4882a593Smuzhiyun #define MMU_FTR_TYPE_44x		ASM_CONST(0x00000008)
21*4882a593Smuzhiyun #define MMU_FTR_TYPE_FSL_E		ASM_CONST(0x00000010)
22*4882a593Smuzhiyun #define MMU_FTR_TYPE_47x		ASM_CONST(0x00000020)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Radix page table supported and enabled */
25*4882a593Smuzhiyun #define MMU_FTR_TYPE_RADIX		ASM_CONST(0x00000040)
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * Individual features below.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * Support for KUEP feature.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define MMU_FTR_KUEP			ASM_CONST(0x00000400)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * Support for memory protection keys.
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun #define MMU_FTR_PKEY			ASM_CONST(0x00000800)
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /* Guest Translation Shootdown Enable */
42*4882a593Smuzhiyun #define MMU_FTR_GTSE			ASM_CONST(0x00001000)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun  * Support for 68 bit VA space. We added that from ISA 2.05
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun #define MMU_FTR_68_BIT_VA		ASM_CONST(0x00002000)
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * Kernel read only support.
50*4882a593Smuzhiyun  * We added the ppp value 0b110 in ISA 2.04.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun #define MMU_FTR_KERNEL_RO		ASM_CONST(0x00004000)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * We need to clear top 16bits of va (from the remaining 64 bits )in
56*4882a593Smuzhiyun  * tlbie* instructions
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define MMU_FTR_TLBIE_CROP_VA		ASM_CONST(0x00008000)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* Enable use of high BAT registers */
61*4882a593Smuzhiyun #define MMU_FTR_USE_HIGH_BATS		ASM_CONST(0x00010000)
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Enable >32-bit physical addresses on 32-bit processor, only used
64*4882a593Smuzhiyun  * by CONFIG_PPC_BOOK3S_32 currently as BookE supports that from day 1
65*4882a593Smuzhiyun  */
66*4882a593Smuzhiyun #define MMU_FTR_BIG_PHYS		ASM_CONST(0x00020000)
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* Enable use of broadcast TLB invalidations. We don't always set it
69*4882a593Smuzhiyun  * on processors that support it due to other constraints with the
70*4882a593Smuzhiyun  * use of such invalidations
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun #define MMU_FTR_USE_TLBIVAX_BCAST	ASM_CONST(0x00040000)
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /* Enable use of tlbilx invalidate instructions.
75*4882a593Smuzhiyun  */
76*4882a593Smuzhiyun #define MMU_FTR_USE_TLBILX		ASM_CONST(0x00080000)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* This indicates that the processor cannot handle multiple outstanding
79*4882a593Smuzhiyun  * broadcast tlbivax or tlbsync. This makes the code use a spinlock
80*4882a593Smuzhiyun  * around such invalidate forms.
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun #define MMU_FTR_LOCK_BCAST_INVAL	ASM_CONST(0x00100000)
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* This indicates that the processor doesn't handle way selection
85*4882a593Smuzhiyun  * properly and needs SW to track and update the LRU state.  This
86*4882a593Smuzhiyun  * is specific to an errata on e300c2/c3/c4 class parts
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun #define MMU_FTR_NEED_DTLB_SW_LRU	ASM_CONST(0x00200000)
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Enable use of TLB reservation.  Processor should support tlbsrx.
91*4882a593Smuzhiyun  * instruction and MAS0[WQ].
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun #define MMU_FTR_USE_TLBRSRV		ASM_CONST(0x00800000)
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* Use paired MAS registers (MAS7||MAS3, etc.)
96*4882a593Smuzhiyun  */
97*4882a593Smuzhiyun #define MMU_FTR_USE_PAIRED_MAS		ASM_CONST(0x01000000)
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* Doesn't support the B bit (1T segment) in SLBIE
100*4882a593Smuzhiyun  */
101*4882a593Smuzhiyun #define MMU_FTR_NO_SLBIE_B		ASM_CONST(0x02000000)
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* Support 16M large pages
104*4882a593Smuzhiyun  */
105*4882a593Smuzhiyun #define MMU_FTR_16M_PAGE		ASM_CONST(0x04000000)
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /* Supports TLBIEL variant
108*4882a593Smuzhiyun  */
109*4882a593Smuzhiyun #define MMU_FTR_TLBIEL			ASM_CONST(0x08000000)
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* Supports tlbies w/o locking
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun #define MMU_FTR_LOCKLESS_TLBIE		ASM_CONST(0x10000000)
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Large pages can be marked CI
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun #define MMU_FTR_CI_LARGE_PAGE		ASM_CONST(0x20000000)
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* 1T segments available
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun #define MMU_FTR_1T_SEGMENT		ASM_CONST(0x40000000)
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun  * Supports KUAP (key 0 controlling userspace addresses) on radix
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun #define MMU_FTR_RADIX_KUAP		ASM_CONST(0x80000000)
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* MMU feature bit sets for various CPUs */
129*4882a593Smuzhiyun #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2	\
130*4882a593Smuzhiyun 	MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2
131*4882a593Smuzhiyun #define MMU_FTRS_POWER		MMU_FTRS_DEFAULT_HPTE_ARCH_V2
132*4882a593Smuzhiyun #define MMU_FTRS_PPC970		MMU_FTRS_POWER | MMU_FTR_TLBIE_CROP_VA
133*4882a593Smuzhiyun #define MMU_FTRS_POWER5		MMU_FTRS_POWER | MMU_FTR_LOCKLESS_TLBIE
134*4882a593Smuzhiyun #define MMU_FTRS_POWER6		MMU_FTRS_POWER5 | MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA
135*4882a593Smuzhiyun #define MMU_FTRS_POWER7		MMU_FTRS_POWER6
136*4882a593Smuzhiyun #define MMU_FTRS_POWER8		MMU_FTRS_POWER6
137*4882a593Smuzhiyun #define MMU_FTRS_POWER9		MMU_FTRS_POWER6
138*4882a593Smuzhiyun #define MMU_FTRS_POWER10	MMU_FTRS_POWER6
139*4882a593Smuzhiyun #define MMU_FTRS_CELL		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
140*4882a593Smuzhiyun 				MMU_FTR_CI_LARGE_PAGE
141*4882a593Smuzhiyun #define MMU_FTRS_PA6T		MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
142*4882a593Smuzhiyun 				MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
143*4882a593Smuzhiyun #ifndef __ASSEMBLY__
144*4882a593Smuzhiyun #include <linux/bug.h>
145*4882a593Smuzhiyun #include <asm/cputable.h>
146*4882a593Smuzhiyun #include <asm/page.h>
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun typedef pte_t *pgtable_t;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #ifdef CONFIG_PPC_FSL_BOOK3E
151*4882a593Smuzhiyun #include <asm/percpu.h>
152*4882a593Smuzhiyun DECLARE_PER_CPU(int, next_tlbcam_idx);
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun enum {
156*4882a593Smuzhiyun 	MMU_FTRS_POSSIBLE =
157*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S
158*4882a593Smuzhiyun 		MMU_FTR_HPTE_TABLE |
159*4882a593Smuzhiyun #endif
160*4882a593Smuzhiyun #ifdef CONFIG_PPC_8xx
161*4882a593Smuzhiyun 		MMU_FTR_TYPE_8xx |
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun #ifdef CONFIG_40x
164*4882a593Smuzhiyun 		MMU_FTR_TYPE_40x |
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun #ifdef CONFIG_44x
167*4882a593Smuzhiyun 		MMU_FTR_TYPE_44x |
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun #if defined(CONFIG_E200) || defined(CONFIG_E500)
170*4882a593Smuzhiyun 		MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | MMU_FTR_USE_TLBILX |
171*4882a593Smuzhiyun #endif
172*4882a593Smuzhiyun #ifdef CONFIG_PPC_47x
173*4882a593Smuzhiyun 		MMU_FTR_TYPE_47x | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL |
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_32
176*4882a593Smuzhiyun 		MMU_FTR_USE_HIGH_BATS | MMU_FTR_NEED_DTLB_SW_LRU |
177*4882a593Smuzhiyun #endif
178*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3E_64
179*4882a593Smuzhiyun 		MMU_FTR_USE_TLBRSRV | MMU_FTR_USE_PAIRED_MAS |
180*4882a593Smuzhiyun #endif
181*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
182*4882a593Smuzhiyun 		MMU_FTR_NO_SLBIE_B | MMU_FTR_16M_PAGE | MMU_FTR_TLBIEL |
183*4882a593Smuzhiyun 		MMU_FTR_LOCKLESS_TLBIE | MMU_FTR_CI_LARGE_PAGE |
184*4882a593Smuzhiyun 		MMU_FTR_1T_SEGMENT | MMU_FTR_TLBIE_CROP_VA |
185*4882a593Smuzhiyun 		MMU_FTR_KERNEL_RO | MMU_FTR_68_BIT_VA |
186*4882a593Smuzhiyun #endif
187*4882a593Smuzhiyun #ifdef CONFIG_PPC_RADIX_MMU
188*4882a593Smuzhiyun 		MMU_FTR_TYPE_RADIX |
189*4882a593Smuzhiyun 		MMU_FTR_GTSE |
190*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUAP
191*4882a593Smuzhiyun 		MMU_FTR_RADIX_KUAP |
192*4882a593Smuzhiyun #endif /* CONFIG_PPC_KUAP */
193*4882a593Smuzhiyun #endif /* CONFIG_PPC_RADIX_MMU */
194*4882a593Smuzhiyun #ifdef CONFIG_PPC_MEM_KEYS
195*4882a593Smuzhiyun 	MMU_FTR_PKEY |
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun #ifdef CONFIG_PPC_KUEP
198*4882a593Smuzhiyun 	MMU_FTR_KUEP |
199*4882a593Smuzhiyun #endif /* CONFIG_PPC_KUAP */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 		0,
202*4882a593Smuzhiyun };
203*4882a593Smuzhiyun 
early_mmu_has_feature(unsigned long feature)204*4882a593Smuzhiyun static inline bool early_mmu_has_feature(unsigned long feature)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	return !!(MMU_FTRS_POSSIBLE & cur_cpu_spec->mmu_features & feature);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
210*4882a593Smuzhiyun #include <linux/jump_label.h>
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun #define NUM_MMU_FTR_KEYS	32
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun extern struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS];
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun extern void mmu_feature_keys_init(void);
217*4882a593Smuzhiyun 
mmu_has_feature(unsigned long feature)218*4882a593Smuzhiyun static __always_inline bool mmu_has_feature(unsigned long feature)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	int i;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun #ifndef __clang__ /* clang can't cope with this */
223*4882a593Smuzhiyun 	BUILD_BUG_ON(!__builtin_constant_p(feature));
224*4882a593Smuzhiyun #endif
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun #ifdef CONFIG_JUMP_LABEL_FEATURE_CHECK_DEBUG
227*4882a593Smuzhiyun 	if (!static_key_initialized) {
228*4882a593Smuzhiyun 		printk("Warning! mmu_has_feature() used prior to jump label init!\n");
229*4882a593Smuzhiyun 		dump_stack();
230*4882a593Smuzhiyun 		return early_mmu_has_feature(feature);
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun #endif
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (!(MMU_FTRS_POSSIBLE & feature))
235*4882a593Smuzhiyun 		return false;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	i = __builtin_ctzl(feature);
238*4882a593Smuzhiyun 	return static_branch_likely(&mmu_feature_keys[i]);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
mmu_clear_feature(unsigned long feature)241*4882a593Smuzhiyun static inline void mmu_clear_feature(unsigned long feature)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	int i;
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	i = __builtin_ctzl(feature);
246*4882a593Smuzhiyun 	cur_cpu_spec->mmu_features &= ~feature;
247*4882a593Smuzhiyun 	static_branch_disable(&mmu_feature_keys[i]);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun #else
250*4882a593Smuzhiyun 
mmu_feature_keys_init(void)251*4882a593Smuzhiyun static inline void mmu_feature_keys_init(void)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
mmu_has_feature(unsigned long feature)256*4882a593Smuzhiyun static inline bool mmu_has_feature(unsigned long feature)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	return early_mmu_has_feature(feature);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun 
mmu_clear_feature(unsigned long feature)261*4882a593Smuzhiyun static inline void mmu_clear_feature(unsigned long feature)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	cur_cpu_spec->mmu_features &= ~feature;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun #endif /* CONFIG_JUMP_LABEL */
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun #ifdef CONFIG_PPC64
270*4882a593Smuzhiyun /* This is our real memory area size on ppc64 server, on embedded, we
271*4882a593Smuzhiyun  * make it match the size our of bolted TLB area
272*4882a593Smuzhiyun  */
273*4882a593Smuzhiyun extern u64 ppc64_rma_size;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /* Cleanup function used by kexec */
276*4882a593Smuzhiyun extern void mmu_cleanup_all(void);
277*4882a593Smuzhiyun extern void radix__mmu_cleanup_all(void);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /* Functions for creating and updating partition table on POWER9 */
280*4882a593Smuzhiyun extern void mmu_partition_table_init(void);
281*4882a593Smuzhiyun extern void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
282*4882a593Smuzhiyun 					  unsigned long dw1, bool flush);
283*4882a593Smuzhiyun #endif /* CONFIG_PPC64 */
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun struct mm_struct;
286*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_VM
287*4882a593Smuzhiyun extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr);
288*4882a593Smuzhiyun #else /* CONFIG_DEBUG_VM */
assert_pte_locked(struct mm_struct * mm,unsigned long addr)289*4882a593Smuzhiyun static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun #endif /* !CONFIG_DEBUG_VM */
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun #ifdef CONFIG_PPC_RADIX_MMU
radix_enabled(void)295*4882a593Smuzhiyun static inline bool radix_enabled(void)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	return mmu_has_feature(MMU_FTR_TYPE_RADIX);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
early_radix_enabled(void)300*4882a593Smuzhiyun static inline bool early_radix_enabled(void)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	return early_mmu_has_feature(MMU_FTR_TYPE_RADIX);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun #else
radix_enabled(void)305*4882a593Smuzhiyun static inline bool radix_enabled(void)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	return false;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
early_radix_enabled(void)310*4882a593Smuzhiyun static inline bool early_radix_enabled(void)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	return false;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun #endif
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun #ifdef CONFIG_STRICT_KERNEL_RWX
strict_kernel_rwx_enabled(void)317*4882a593Smuzhiyun static inline bool strict_kernel_rwx_enabled(void)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	return rodata_enabled;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun #else
strict_kernel_rwx_enabled(void)322*4882a593Smuzhiyun static inline bool strict_kernel_rwx_enabled(void)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	return false;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun #endif
327*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /* The kernel use the constants below to index in the page sizes array.
330*4882a593Smuzhiyun  * The use of fixed constants for this purpose is better for performances
331*4882a593Smuzhiyun  * of the low level hash refill handlers.
332*4882a593Smuzhiyun  *
333*4882a593Smuzhiyun  * A non supported page size has a "shift" field set to 0
334*4882a593Smuzhiyun  *
335*4882a593Smuzhiyun  * Any new page size being implemented can get a new entry in here. Whether
336*4882a593Smuzhiyun  * the kernel will use it or not is a different matter though. The actual page
337*4882a593Smuzhiyun  * size used by hugetlbfs is not defined here and may be made variable
338*4882a593Smuzhiyun  *
339*4882a593Smuzhiyun  * Note: This array ended up being a false good idea as it's growing to the
340*4882a593Smuzhiyun  * point where I wonder if we should replace it with something different,
341*4882a593Smuzhiyun  * to think about, feedback welcome. --BenH.
342*4882a593Smuzhiyun  */
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /* These are #defines as they have to be used in assembly */
345*4882a593Smuzhiyun #define MMU_PAGE_4K	0
346*4882a593Smuzhiyun #define MMU_PAGE_16K	1
347*4882a593Smuzhiyun #define MMU_PAGE_64K	2
348*4882a593Smuzhiyun #define MMU_PAGE_64K_AP	3	/* "Admixed pages" (hash64 only) */
349*4882a593Smuzhiyun #define MMU_PAGE_256K	4
350*4882a593Smuzhiyun #define MMU_PAGE_512K	5
351*4882a593Smuzhiyun #define MMU_PAGE_1M	6
352*4882a593Smuzhiyun #define MMU_PAGE_2M	7
353*4882a593Smuzhiyun #define MMU_PAGE_4M	8
354*4882a593Smuzhiyun #define MMU_PAGE_8M	9
355*4882a593Smuzhiyun #define MMU_PAGE_16M	10
356*4882a593Smuzhiyun #define MMU_PAGE_64M	11
357*4882a593Smuzhiyun #define MMU_PAGE_256M	12
358*4882a593Smuzhiyun #define MMU_PAGE_1G	13
359*4882a593Smuzhiyun #define MMU_PAGE_16G	14
360*4882a593Smuzhiyun #define MMU_PAGE_64G	15
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun /*
363*4882a593Smuzhiyun  * N.B. we need to change the type of hpte_page_sizes if this gets to be > 16
364*4882a593Smuzhiyun  * Also we need to change he type of mm_context.low/high_slices_psize.
365*4882a593Smuzhiyun  */
366*4882a593Smuzhiyun #define MMU_PAGE_COUNT	16
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun #ifdef CONFIG_PPC_BOOK3S_64
369*4882a593Smuzhiyun #include <asm/book3s/64/mmu.h>
370*4882a593Smuzhiyun #else /* CONFIG_PPC_BOOK3S_64 */
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun #ifndef __ASSEMBLY__
373*4882a593Smuzhiyun /* MMU initialization */
374*4882a593Smuzhiyun extern void early_init_mmu(void);
375*4882a593Smuzhiyun extern void early_init_mmu_secondary(void);
376*4882a593Smuzhiyun extern void setup_initial_memory_limit(phys_addr_t first_memblock_base,
377*4882a593Smuzhiyun 				       phys_addr_t first_memblock_size);
mmu_early_init_devtree(void)378*4882a593Smuzhiyun static inline void mmu_early_init_devtree(void) { }
379*4882a593Smuzhiyun 
pkey_early_init_devtree(void)380*4882a593Smuzhiyun static inline void pkey_early_init_devtree(void) {}
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun extern void *abatron_pteptrs[2];
383*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
384*4882a593Smuzhiyun #endif
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun #if defined(CONFIG_PPC_BOOK3S_32)
387*4882a593Smuzhiyun /* 32-bit classic hash table MMU */
388*4882a593Smuzhiyun #include <asm/book3s/32/mmu-hash.h>
389*4882a593Smuzhiyun #elif defined(CONFIG_PPC_MMU_NOHASH)
390*4882a593Smuzhiyun #include <asm/nohash/mmu.h>
391*4882a593Smuzhiyun #endif
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun #endif /* __KERNEL__ */
394*4882a593Smuzhiyun #endif /* _ASM_POWERPC_MMU_H_ */
395