1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_MMU_8XX_H_
3*4882a593Smuzhiyun #define _ASM_POWERPC_MMU_8XX_H_
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun * PPC8xx support
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun /* Control/status registers for the MPC8xx.
9*4882a593Smuzhiyun * A write operation to these registers causes serialized access.
10*4882a593Smuzhiyun * During software tablewalk, the registers used perform mask/shift-add
11*4882a593Smuzhiyun * operations when written/read. A TLB entry is created when the Mx_RPN
12*4882a593Smuzhiyun * is written, and the contents of several registers are used to
13*4882a593Smuzhiyun * create the entry.
14*4882a593Smuzhiyun */
15*4882a593Smuzhiyun #define SPRN_MI_CTR 784 /* Instruction TLB control register */
16*4882a593Smuzhiyun #define MI_GPM 0x80000000 /* Set domain manager mode */
17*4882a593Smuzhiyun #define MI_PPM 0x40000000 /* Set subpage protection */
18*4882a593Smuzhiyun #define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
19*4882a593Smuzhiyun #define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */
20*4882a593Smuzhiyun #define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
21*4882a593Smuzhiyun #define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun /* These are the Ks and Kp from the PowerPC books. For proper operation,
24*4882a593Smuzhiyun * Ks = 0, Kp = 1.
25*4882a593Smuzhiyun */
26*4882a593Smuzhiyun #define SPRN_MI_AP 786
27*4882a593Smuzhiyun #define MI_Ks 0x80000000 /* Should not be set */
28*4882a593Smuzhiyun #define MI_Kp 0x40000000 /* Should always be set */
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC
32*4882a593Smuzhiyun * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means
33*4882a593Smuzhiyun * respectively NA for All or X for Supervisor and no access for User.
34*4882a593Smuzhiyun * Then we use the APG to say whether accesses are according to Page rules or
35*4882a593Smuzhiyun * "all Supervisor" rules (Access to all)
36*4882a593Smuzhiyun * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say
37*4882a593Smuzhiyun * "all User" rules, that will lead to NA for all.
38*4882a593Smuzhiyun * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED
39*4882a593Smuzhiyun * 0 => Kernel => 11 (all accesses performed according as user iaw page definition)
40*4882a593Smuzhiyun * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition)
41*4882a593Smuzhiyun * 2 => User => 11 (all accesses performed according as user iaw page definition)
42*4882a593Smuzhiyun * 3 => User+Accessed => 00 (all accesses performed as supervisor iaw page definition) for INIT
43*4882a593Smuzhiyun * => 10 (all accesses performed according to swaped page definition) for KUEP
44*4882a593Smuzhiyun * 4-15 => Not Used
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun #define MI_APG_INIT 0xdc000000
47*4882a593Smuzhiyun #define MI_APG_KUEP 0xde000000
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* The effective page number register. When read, contains the information
50*4882a593Smuzhiyun * about the last instruction TLB miss. When MI_RPN is written, bits in
51*4882a593Smuzhiyun * this register are used to create the TLB entry.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #define SPRN_MI_EPN 787
54*4882a593Smuzhiyun #define MI_EPNMASK 0xfffff000 /* Effective page number for entry */
55*4882a593Smuzhiyun #define MI_EVALID 0x00000200 /* Entry is valid */
56*4882a593Smuzhiyun #define MI_ASIDMASK 0x0000000f /* ASID match value */
57*4882a593Smuzhiyun /* Reset value is undefined */
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* A "level 1" or "segment" or whatever you want to call it register.
60*4882a593Smuzhiyun * For the instruction TLB, it contains bits that get loaded into the
61*4882a593Smuzhiyun * TLB entry when the MI_RPN is written.
62*4882a593Smuzhiyun */
63*4882a593Smuzhiyun #define SPRN_MI_TWC 789
64*4882a593Smuzhiyun #define MI_APG 0x000001e0 /* Access protection group (0) */
65*4882a593Smuzhiyun #define MI_GUARDED 0x00000010 /* Guarded storage */
66*4882a593Smuzhiyun #define MI_PSMASK 0x0000000c /* Mask of page size bits */
67*4882a593Smuzhiyun #define MI_PS8MEG 0x0000000c /* 8M page size */
68*4882a593Smuzhiyun #define MI_PS512K 0x00000004 /* 512K page size */
69*4882a593Smuzhiyun #define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */
70*4882a593Smuzhiyun #define MI_SVALID 0x00000001 /* Segment entry is valid */
71*4882a593Smuzhiyun /* Reset value is undefined */
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun /* Real page number. Defined by the pte. Writing this register
74*4882a593Smuzhiyun * causes a TLB entry to be created for the instruction TLB, using
75*4882a593Smuzhiyun * additional information from the MI_EPN, and MI_TWC registers.
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun #define SPRN_MI_RPN 790
78*4882a593Smuzhiyun #define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* Define an RPN value for mapping kernel memory to large virtual
81*4882a593Smuzhiyun * pages for boot initialization. This has real page number of 0,
82*4882a593Smuzhiyun * large page size, shared page, cache enabled, and valid.
83*4882a593Smuzhiyun * Also mark all subpages valid and write access.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun #define MI_BOOTINIT 0x000001fd
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define SPRN_MD_CTR 792 /* Data TLB control register */
88*4882a593Smuzhiyun #define MD_GPM 0x80000000 /* Set domain manager mode */
89*4882a593Smuzhiyun #define MD_PPM 0x40000000 /* Set subpage protection */
90*4882a593Smuzhiyun #define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */
91*4882a593Smuzhiyun #define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */
92*4882a593Smuzhiyun #define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */
93*4882a593Smuzhiyun #define MD_TWAM 0x04000000 /* Use 4K page hardware assist */
94*4882a593Smuzhiyun #define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */
95*4882a593Smuzhiyun #define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define SPRN_M_CASID 793 /* Address space ID (context) to match */
98*4882a593Smuzhiyun #define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* These are the Ks and Kp from the PowerPC books. For proper operation,
102*4882a593Smuzhiyun * Ks = 0, Kp = 1.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun #define SPRN_MD_AP 794
105*4882a593Smuzhiyun #define MD_Ks 0x80000000 /* Should not be set */
106*4882a593Smuzhiyun #define MD_Kp 0x40000000 /* Should always be set */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* See explanation above at the definition of MI_APG_INIT */
109*4882a593Smuzhiyun #define MD_APG_INIT 0xdc000000
110*4882a593Smuzhiyun #define MD_APG_KUAP 0xde000000
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* The effective page number register. When read, contains the information
113*4882a593Smuzhiyun * about the last instruction TLB miss. When MD_RPN is written, bits in
114*4882a593Smuzhiyun * this register are used to create the TLB entry.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun #define SPRN_MD_EPN 795
117*4882a593Smuzhiyun #define MD_EPNMASK 0xfffff000 /* Effective page number for entry */
118*4882a593Smuzhiyun #define MD_EVALID 0x00000200 /* Entry is valid */
119*4882a593Smuzhiyun #define MD_ASIDMASK 0x0000000f /* ASID match value */
120*4882a593Smuzhiyun /* Reset value is undefined */
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* The pointer to the base address of the first level page table.
123*4882a593Smuzhiyun * During a software tablewalk, reading this register provides the address
124*4882a593Smuzhiyun * of the entry associated with MD_EPN.
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun #define SPRN_M_TWB 796
127*4882a593Smuzhiyun #define M_L1TB 0xfffff000 /* Level 1 table base address */
128*4882a593Smuzhiyun #define M_L1INDX 0x00000ffc /* Level 1 index, when read */
129*4882a593Smuzhiyun /* Reset value is undefined */
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* A "level 1" or "segment" or whatever you want to call it register.
132*4882a593Smuzhiyun * For the data TLB, it contains bits that get loaded into the TLB entry
133*4882a593Smuzhiyun * when the MD_RPN is written. It is also provides the hardware assist
134*4882a593Smuzhiyun * for finding the PTE address during software tablewalk.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun #define SPRN_MD_TWC 797
137*4882a593Smuzhiyun #define MD_L2TB 0xfffff000 /* Level 2 table base address */
138*4882a593Smuzhiyun #define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */
139*4882a593Smuzhiyun #define MD_APG 0x000001e0 /* Access protection group (0) */
140*4882a593Smuzhiyun #define MD_GUARDED 0x00000010 /* Guarded storage */
141*4882a593Smuzhiyun #define MD_PSMASK 0x0000000c /* Mask of page size bits */
142*4882a593Smuzhiyun #define MD_PS8MEG 0x0000000c /* 8M page size */
143*4882a593Smuzhiyun #define MD_PS512K 0x00000004 /* 512K page size */
144*4882a593Smuzhiyun #define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */
145*4882a593Smuzhiyun #define MD_WT 0x00000002 /* Use writethrough page attribute */
146*4882a593Smuzhiyun #define MD_SVALID 0x00000001 /* Segment entry is valid */
147*4882a593Smuzhiyun /* Reset value is undefined */
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /* Real page number. Defined by the pte. Writing this register
151*4882a593Smuzhiyun * causes a TLB entry to be created for the data TLB, using
152*4882a593Smuzhiyun * additional information from the MD_EPN, and MD_TWC registers.
153*4882a593Smuzhiyun */
154*4882a593Smuzhiyun #define SPRN_MD_RPN 798
155*4882a593Smuzhiyun #define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* This is a temporary storage register that could be used to save
158*4882a593Smuzhiyun * a processor working register during a tablewalk.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun #define SPRN_M_TW 799
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun #if defined(CONFIG_PPC_4K_PAGES)
163*4882a593Smuzhiyun #define mmu_virtual_psize MMU_PAGE_4K
164*4882a593Smuzhiyun #elif defined(CONFIG_PPC_16K_PAGES)
165*4882a593Smuzhiyun #define mmu_virtual_psize MMU_PAGE_16K
166*4882a593Smuzhiyun #define PTE_FRAG_NR 4
167*4882a593Smuzhiyun #define PTE_FRAG_SIZE_SHIFT 12
168*4882a593Smuzhiyun #define PTE_FRAG_SIZE (1UL << 12)
169*4882a593Smuzhiyun #else
170*4882a593Smuzhiyun #error "Unsupported PAGE_SIZE"
171*4882a593Smuzhiyun #endif
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun #define mmu_linear_psize MMU_PAGE_8M
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun #ifndef __ASSEMBLY__
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #include <linux/mmdebug.h>
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun void mmu_pin_tlb(unsigned long top, bool readonly);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun typedef struct {
182*4882a593Smuzhiyun unsigned int id;
183*4882a593Smuzhiyun unsigned int active;
184*4882a593Smuzhiyun unsigned long vdso_base;
185*4882a593Smuzhiyun void *pte_frag;
186*4882a593Smuzhiyun } mm_context_t;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000)
189*4882a593Smuzhiyun #define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE))
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Page size definitions, common between 32 and 64-bit
192*4882a593Smuzhiyun *
193*4882a593Smuzhiyun * shift : is the "PAGE_SHIFT" value for that page size
194*4882a593Smuzhiyun * penc : is the pte encoding mask
195*4882a593Smuzhiyun *
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun struct mmu_psize_def {
198*4882a593Smuzhiyun unsigned int shift; /* number of bits */
199*4882a593Smuzhiyun unsigned int enc; /* PTE encoding */
200*4882a593Smuzhiyun unsigned int ind; /* Corresponding indirect page size shift */
201*4882a593Smuzhiyun unsigned int flags;
202*4882a593Smuzhiyun #define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */
203*4882a593Smuzhiyun #define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
207*4882a593Smuzhiyun
shift_to_mmu_psize(unsigned int shift)208*4882a593Smuzhiyun static inline int shift_to_mmu_psize(unsigned int shift)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun int psize;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
213*4882a593Smuzhiyun if (mmu_psize_defs[psize].shift == shift)
214*4882a593Smuzhiyun return psize;
215*4882a593Smuzhiyun return -1;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun
mmu_psize_to_shift(unsigned int mmu_psize)218*4882a593Smuzhiyun static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun if (mmu_psize_defs[mmu_psize].shift)
221*4882a593Smuzhiyun return mmu_psize_defs[mmu_psize].shift;
222*4882a593Smuzhiyun BUG();
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* patch sites */
226*4882a593Smuzhiyun extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1;
227*4882a593Smuzhiyun extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun #endif /* _ASM_POWERPC_MMU_8XX_H_ */
232