1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun * PowerPC64 memory management structures
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Dave Engebretsen & Mike Corrigan <{engebret|mikejc}@us.ibm.com>
8*4882a593Smuzhiyun * PPC64 rework.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <asm/page.h>
12*4882a593Smuzhiyun #include <asm/bug.h>
13*4882a593Smuzhiyun #include <asm/asm-const.h>
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /*
16*4882a593Smuzhiyun * This is necessary to get the definition of PGTABLE_RANGE which we
17*4882a593Smuzhiyun * need for various slices related matters. Note that this isn't the
18*4882a593Smuzhiyun * complete pgtable.h but only a portion of it.
19*4882a593Smuzhiyun */
20*4882a593Smuzhiyun #include <asm/book3s/64/pgtable.h>
21*4882a593Smuzhiyun #include <asm/bug.h>
22*4882a593Smuzhiyun #include <asm/task_size_64.h>
23*4882a593Smuzhiyun #include <asm/cpu_has_feature.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * SLB
27*4882a593Smuzhiyun */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define SLB_NUM_BOLTED 2
30*4882a593Smuzhiyun #define SLB_CACHE_ENTRIES 8
31*4882a593Smuzhiyun #define SLB_MIN_SIZE 32
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* Bits in the SLB ESID word */
34*4882a593Smuzhiyun #define SLB_ESID_V ASM_CONST(0x0000000008000000) /* valid */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Bits in the SLB VSID word */
37*4882a593Smuzhiyun #define SLB_VSID_SHIFT 12
38*4882a593Smuzhiyun #define SLB_VSID_SHIFT_256M SLB_VSID_SHIFT
39*4882a593Smuzhiyun #define SLB_VSID_SHIFT_1T 24
40*4882a593Smuzhiyun #define SLB_VSID_SSIZE_SHIFT 62
41*4882a593Smuzhiyun #define SLB_VSID_B ASM_CONST(0xc000000000000000)
42*4882a593Smuzhiyun #define SLB_VSID_B_256M ASM_CONST(0x0000000000000000)
43*4882a593Smuzhiyun #define SLB_VSID_B_1T ASM_CONST(0x4000000000000000)
44*4882a593Smuzhiyun #define SLB_VSID_KS ASM_CONST(0x0000000000000800)
45*4882a593Smuzhiyun #define SLB_VSID_KP ASM_CONST(0x0000000000000400)
46*4882a593Smuzhiyun #define SLB_VSID_N ASM_CONST(0x0000000000000200) /* no-execute */
47*4882a593Smuzhiyun #define SLB_VSID_L ASM_CONST(0x0000000000000100)
48*4882a593Smuzhiyun #define SLB_VSID_C ASM_CONST(0x0000000000000080) /* class */
49*4882a593Smuzhiyun #define SLB_VSID_LP ASM_CONST(0x0000000000000030)
50*4882a593Smuzhiyun #define SLB_VSID_LP_00 ASM_CONST(0x0000000000000000)
51*4882a593Smuzhiyun #define SLB_VSID_LP_01 ASM_CONST(0x0000000000000010)
52*4882a593Smuzhiyun #define SLB_VSID_LP_10 ASM_CONST(0x0000000000000020)
53*4882a593Smuzhiyun #define SLB_VSID_LP_11 ASM_CONST(0x0000000000000030)
54*4882a593Smuzhiyun #define SLB_VSID_LLP (SLB_VSID_L|SLB_VSID_LP)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define SLB_VSID_KERNEL (SLB_VSID_KP)
57*4882a593Smuzhiyun #define SLB_VSID_USER (SLB_VSID_KP|SLB_VSID_KS|SLB_VSID_C)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #define SLBIE_C (0x08000000)
60*4882a593Smuzhiyun #define SLBIE_SSIZE_SHIFT 25
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Hash table
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #define HPTES_PER_GROUP 8
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define HPTE_V_SSIZE_SHIFT 62
69*4882a593Smuzhiyun #define HPTE_V_AVPN_SHIFT 7
70*4882a593Smuzhiyun #define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
71*4882a593Smuzhiyun #define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
72*4882a593Smuzhiyun #define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
73*4882a593Smuzhiyun #define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
74*4882a593Smuzhiyun #define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
75*4882a593Smuzhiyun #define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
76*4882a593Smuzhiyun #define HPTE_V_LOCK ASM_CONST(0x0000000000000008)
77*4882a593Smuzhiyun #define HPTE_V_LARGE ASM_CONST(0x0000000000000004)
78*4882a593Smuzhiyun #define HPTE_V_SECONDARY ASM_CONST(0x0000000000000002)
79*4882a593Smuzhiyun #define HPTE_V_VALID ASM_CONST(0x0000000000000001)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun * ISA 3.0 has a different HPTE format.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun #define HPTE_R_3_0_SSIZE_SHIFT 58
85*4882a593Smuzhiyun #define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
86*4882a593Smuzhiyun #define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
87*4882a593Smuzhiyun #define HPTE_R_TS ASM_CONST(0x4000000000000000)
88*4882a593Smuzhiyun #define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
89*4882a593Smuzhiyun #define HPTE_R_KEY_BIT4 ASM_CONST(0x2000000000000000)
90*4882a593Smuzhiyun #define HPTE_R_KEY_BIT3 ASM_CONST(0x1000000000000000)
91*4882a593Smuzhiyun #define HPTE_R_RPN_SHIFT 12
92*4882a593Smuzhiyun #define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
93*4882a593Smuzhiyun #define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
94*4882a593Smuzhiyun #define HPTE_R_PP ASM_CONST(0x0000000000000003)
95*4882a593Smuzhiyun #define HPTE_R_PPP ASM_CONST(0x8000000000000003)
96*4882a593Smuzhiyun #define HPTE_R_N ASM_CONST(0x0000000000000004)
97*4882a593Smuzhiyun #define HPTE_R_G ASM_CONST(0x0000000000000008)
98*4882a593Smuzhiyun #define HPTE_R_M ASM_CONST(0x0000000000000010)
99*4882a593Smuzhiyun #define HPTE_R_I ASM_CONST(0x0000000000000020)
100*4882a593Smuzhiyun #define HPTE_R_W ASM_CONST(0x0000000000000040)
101*4882a593Smuzhiyun #define HPTE_R_WIMG ASM_CONST(0x0000000000000078)
102*4882a593Smuzhiyun #define HPTE_R_C ASM_CONST(0x0000000000000080)
103*4882a593Smuzhiyun #define HPTE_R_R ASM_CONST(0x0000000000000100)
104*4882a593Smuzhiyun #define HPTE_R_KEY_LO ASM_CONST(0x0000000000000e00)
105*4882a593Smuzhiyun #define HPTE_R_KEY_BIT2 ASM_CONST(0x0000000000000800)
106*4882a593Smuzhiyun #define HPTE_R_KEY_BIT1 ASM_CONST(0x0000000000000400)
107*4882a593Smuzhiyun #define HPTE_R_KEY_BIT0 ASM_CONST(0x0000000000000200)
108*4882a593Smuzhiyun #define HPTE_R_KEY (HPTE_R_KEY_LO | HPTE_R_KEY_HI)
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun #define HPTE_V_1TB_SEG ASM_CONST(0x4000000000000000)
111*4882a593Smuzhiyun #define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Values for PP (assumes Ks=0, Kp=1) */
114*4882a593Smuzhiyun #define PP_RWXX 0 /* Supervisor read/write, User none */
115*4882a593Smuzhiyun #define PP_RWRX 1 /* Supervisor read/write, User read */
116*4882a593Smuzhiyun #define PP_RWRW 2 /* Supervisor read/write, User read/write */
117*4882a593Smuzhiyun #define PP_RXRX 3 /* Supervisor read, User read */
118*4882a593Smuzhiyun #define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Fields for tlbiel instruction in architecture 2.06 */
121*4882a593Smuzhiyun #define TLBIEL_INVAL_SEL_MASK 0xc00 /* invalidation selector */
122*4882a593Smuzhiyun #define TLBIEL_INVAL_PAGE 0x000 /* invalidate a single page */
123*4882a593Smuzhiyun #define TLBIEL_INVAL_SET_LPID 0x800 /* invalidate a set for current LPID */
124*4882a593Smuzhiyun #define TLBIEL_INVAL_SET 0xc00 /* invalidate a set for all LPIDs */
125*4882a593Smuzhiyun #define TLBIEL_INVAL_SET_MASK 0xfff000 /* set number to inval. */
126*4882a593Smuzhiyun #define TLBIEL_INVAL_SET_SHIFT 12
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
129*4882a593Smuzhiyun #define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
130*4882a593Smuzhiyun #define POWER9_TLB_SETS_HASH 256 /* # sets in POWER9 TLB Hash mode */
131*4882a593Smuzhiyun #define POWER9_TLB_SETS_RADIX 128 /* # sets in POWER9 TLB Radix mode */
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #ifndef __ASSEMBLY__
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun struct mmu_hash_ops {
136*4882a593Smuzhiyun void (*hpte_invalidate)(unsigned long slot,
137*4882a593Smuzhiyun unsigned long vpn,
138*4882a593Smuzhiyun int bpsize, int apsize,
139*4882a593Smuzhiyun int ssize, int local);
140*4882a593Smuzhiyun long (*hpte_updatepp)(unsigned long slot,
141*4882a593Smuzhiyun unsigned long newpp,
142*4882a593Smuzhiyun unsigned long vpn,
143*4882a593Smuzhiyun int bpsize, int apsize,
144*4882a593Smuzhiyun int ssize, unsigned long flags);
145*4882a593Smuzhiyun void (*hpte_updateboltedpp)(unsigned long newpp,
146*4882a593Smuzhiyun unsigned long ea,
147*4882a593Smuzhiyun int psize, int ssize);
148*4882a593Smuzhiyun long (*hpte_insert)(unsigned long hpte_group,
149*4882a593Smuzhiyun unsigned long vpn,
150*4882a593Smuzhiyun unsigned long prpn,
151*4882a593Smuzhiyun unsigned long rflags,
152*4882a593Smuzhiyun unsigned long vflags,
153*4882a593Smuzhiyun int psize, int apsize,
154*4882a593Smuzhiyun int ssize);
155*4882a593Smuzhiyun long (*hpte_remove)(unsigned long hpte_group);
156*4882a593Smuzhiyun int (*hpte_removebolted)(unsigned long ea,
157*4882a593Smuzhiyun int psize, int ssize);
158*4882a593Smuzhiyun void (*flush_hash_range)(unsigned long number, int local);
159*4882a593Smuzhiyun void (*hugepage_invalidate)(unsigned long vsid,
160*4882a593Smuzhiyun unsigned long addr,
161*4882a593Smuzhiyun unsigned char *hpte_slot_array,
162*4882a593Smuzhiyun int psize, int ssize, int local);
163*4882a593Smuzhiyun int (*resize_hpt)(unsigned long shift);
164*4882a593Smuzhiyun /*
165*4882a593Smuzhiyun * Special for kexec.
166*4882a593Smuzhiyun * To be called in real mode with interrupts disabled. No locks are
167*4882a593Smuzhiyun * taken as such, concurrent access on pre POWER5 hardware could result
168*4882a593Smuzhiyun * in a deadlock.
169*4882a593Smuzhiyun * The linear mapping is destroyed as well.
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun void (*hpte_clear_all)(void);
172*4882a593Smuzhiyun };
173*4882a593Smuzhiyun extern struct mmu_hash_ops mmu_hash_ops;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun struct hash_pte {
176*4882a593Smuzhiyun __be64 v;
177*4882a593Smuzhiyun __be64 r;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun extern struct hash_pte *htab_address;
181*4882a593Smuzhiyun extern unsigned long htab_size_bytes;
182*4882a593Smuzhiyun extern unsigned long htab_hash_mask;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun
shift_to_mmu_psize(unsigned int shift)185*4882a593Smuzhiyun static inline int shift_to_mmu_psize(unsigned int shift)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun int psize;
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun for (psize = 0; psize < MMU_PAGE_COUNT; ++psize)
190*4882a593Smuzhiyun if (mmu_psize_defs[psize].shift == shift)
191*4882a593Smuzhiyun return psize;
192*4882a593Smuzhiyun return -1;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
mmu_psize_to_shift(unsigned int mmu_psize)195*4882a593Smuzhiyun static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun if (mmu_psize_defs[mmu_psize].shift)
198*4882a593Smuzhiyun return mmu_psize_defs[mmu_psize].shift;
199*4882a593Smuzhiyun BUG();
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
ap_to_shift(unsigned long ap)202*4882a593Smuzhiyun static inline unsigned int ap_to_shift(unsigned long ap)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun int psize;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
207*4882a593Smuzhiyun if (mmu_psize_defs[psize].ap == ap)
208*4882a593Smuzhiyun return mmu_psize_defs[psize].shift;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return -1;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
get_sllp_encoding(int psize)214*4882a593Smuzhiyun static inline unsigned long get_sllp_encoding(int psize)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun unsigned long sllp;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun sllp = ((mmu_psize_defs[psize].sllp & SLB_VSID_L) >> 6) |
219*4882a593Smuzhiyun ((mmu_psize_defs[psize].sllp & SLB_VSID_LP) >> 4);
220*4882a593Smuzhiyun return sllp;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * Segment sizes.
227*4882a593Smuzhiyun * These are the values used by hardware in the B field of
228*4882a593Smuzhiyun * SLB entries and the first dword of MMU hashtable entries.
229*4882a593Smuzhiyun * The B field is 2 bits; the values 2 and 3 are unused and reserved.
230*4882a593Smuzhiyun */
231*4882a593Smuzhiyun #define MMU_SEGSIZE_256M 0
232*4882a593Smuzhiyun #define MMU_SEGSIZE_1T 1
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*
235*4882a593Smuzhiyun * encode page number shift.
236*4882a593Smuzhiyun * in order to fit the 78 bit va in a 64 bit variable we shift the va by
237*4882a593Smuzhiyun * 12 bits. This enable us to address upto 76 bit va.
238*4882a593Smuzhiyun * For hpt hash from a va we can ignore the page size bits of va and for
239*4882a593Smuzhiyun * hpte encoding we ignore up to 23 bits of va. So ignoring lower 12 bits ensure
240*4882a593Smuzhiyun * we work in all cases including 4k page size.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun #define VPN_SHIFT 12
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * HPTE Large Page (LP) details
246*4882a593Smuzhiyun */
247*4882a593Smuzhiyun #define LP_SHIFT 12
248*4882a593Smuzhiyun #define LP_BITS 8
249*4882a593Smuzhiyun #define LP_MASK(i) ((0xFF >> (i)) << LP_SHIFT)
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun #ifndef __ASSEMBLY__
252*4882a593Smuzhiyun
slb_vsid_shift(int ssize)253*4882a593Smuzhiyun static inline int slb_vsid_shift(int ssize)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun if (ssize == MMU_SEGSIZE_256M)
256*4882a593Smuzhiyun return SLB_VSID_SHIFT;
257*4882a593Smuzhiyun return SLB_VSID_SHIFT_1T;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
segment_shift(int ssize)260*4882a593Smuzhiyun static inline int segment_shift(int ssize)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun if (ssize == MMU_SEGSIZE_256M)
263*4882a593Smuzhiyun return SID_SHIFT;
264*4882a593Smuzhiyun return SID_SHIFT_1T;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * This array is indexed by the LP field of the HPTE second dword.
269*4882a593Smuzhiyun * Since this field may contain some RPN bits, some entries are
270*4882a593Smuzhiyun * replicated so that we get the same value irrespective of RPN.
271*4882a593Smuzhiyun * The top 4 bits are the page size index (MMU_PAGE_*) for the
272*4882a593Smuzhiyun * actual page size, the bottom 4 bits are the base page size.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun extern u8 hpte_page_sizes[1 << LP_BITS];
275*4882a593Smuzhiyun
__hpte_page_size(unsigned long h,unsigned long l,bool is_base_size)276*4882a593Smuzhiyun static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
277*4882a593Smuzhiyun bool is_base_size)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun unsigned int i, lp;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (!(h & HPTE_V_LARGE))
282*4882a593Smuzhiyun return 1ul << 12;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Look at the 8 bit LP value */
285*4882a593Smuzhiyun lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
286*4882a593Smuzhiyun i = hpte_page_sizes[lp];
287*4882a593Smuzhiyun if (!i)
288*4882a593Smuzhiyun return 0;
289*4882a593Smuzhiyun if (!is_base_size)
290*4882a593Smuzhiyun i >>= 4;
291*4882a593Smuzhiyun return 1ul << mmu_psize_defs[i & 0xf].shift;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
hpte_page_size(unsigned long h,unsigned long l)294*4882a593Smuzhiyun static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun return __hpte_page_size(h, l, 0);
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun
hpte_base_page_size(unsigned long h,unsigned long l)299*4882a593Smuzhiyun static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun return __hpte_page_size(h, l, 1);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /*
305*4882a593Smuzhiyun * The current system page and segment sizes
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun extern int mmu_kernel_ssize;
308*4882a593Smuzhiyun extern int mmu_highuser_ssize;
309*4882a593Smuzhiyun extern u16 mmu_slb_size;
310*4882a593Smuzhiyun extern unsigned long tce_alloc_start, tce_alloc_end;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * If the processor supports 64k normal pages but not 64k cache
314*4882a593Smuzhiyun * inhibited pages, we have to be prepared to switch processes
315*4882a593Smuzhiyun * to use 4k pages when they create cache-inhibited mappings.
316*4882a593Smuzhiyun * If this is the case, mmu_ci_restrictions will be set to 1.
317*4882a593Smuzhiyun */
318*4882a593Smuzhiyun extern int mmu_ci_restrictions;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun /*
321*4882a593Smuzhiyun * This computes the AVPN and B fields of the first dword of a HPTE,
322*4882a593Smuzhiyun * for use when we want to match an existing PTE. The bottom 7 bits
323*4882a593Smuzhiyun * of the returned value are zero.
324*4882a593Smuzhiyun */
hpte_encode_avpn(unsigned long vpn,int psize,int ssize)325*4882a593Smuzhiyun static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
326*4882a593Smuzhiyun int ssize)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun unsigned long v;
329*4882a593Smuzhiyun /*
330*4882a593Smuzhiyun * The AVA field omits the low-order 23 bits of the 78 bits VA.
331*4882a593Smuzhiyun * These bits are not needed in the PTE, because the
332*4882a593Smuzhiyun * low-order b of these bits are part of the byte offset
333*4882a593Smuzhiyun * into the virtual page and, if b < 23, the high-order
334*4882a593Smuzhiyun * 23-b of these bits are always used in selecting the
335*4882a593Smuzhiyun * PTEGs to be searched
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun v = (vpn >> (23 - VPN_SHIFT)) & ~(mmu_psize_defs[psize].avpnm);
338*4882a593Smuzhiyun v <<= HPTE_V_AVPN_SHIFT;
339*4882a593Smuzhiyun v |= ((unsigned long) ssize) << HPTE_V_SSIZE_SHIFT;
340*4882a593Smuzhiyun return v;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun * ISA v3.0 defines a new HPTE format, which differs from the old
345*4882a593Smuzhiyun * format in having smaller AVPN and ARPN fields, and the B field
346*4882a593Smuzhiyun * in the second dword instead of the first.
347*4882a593Smuzhiyun */
hpte_old_to_new_v(unsigned long v)348*4882a593Smuzhiyun static inline unsigned long hpte_old_to_new_v(unsigned long v)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun /* trim AVPN, drop B */
351*4882a593Smuzhiyun return v & HPTE_V_COMMON_BITS;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
hpte_old_to_new_r(unsigned long v,unsigned long r)354*4882a593Smuzhiyun static inline unsigned long hpte_old_to_new_r(unsigned long v, unsigned long r)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun /* move B field from 1st to 2nd dword, trim ARPN */
357*4882a593Smuzhiyun return (r & ~HPTE_R_3_0_SSIZE_MASK) |
358*4882a593Smuzhiyun (((v) >> HPTE_V_SSIZE_SHIFT) << HPTE_R_3_0_SSIZE_SHIFT);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
hpte_new_to_old_v(unsigned long v,unsigned long r)361*4882a593Smuzhiyun static inline unsigned long hpte_new_to_old_v(unsigned long v, unsigned long r)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun /* insert B field */
364*4882a593Smuzhiyun return (v & HPTE_V_COMMON_BITS) |
365*4882a593Smuzhiyun ((r & HPTE_R_3_0_SSIZE_MASK) <<
366*4882a593Smuzhiyun (HPTE_V_SSIZE_SHIFT - HPTE_R_3_0_SSIZE_SHIFT));
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
hpte_new_to_old_r(unsigned long r)369*4882a593Smuzhiyun static inline unsigned long hpte_new_to_old_r(unsigned long r)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun /* clear out B field */
372*4882a593Smuzhiyun return r & ~HPTE_R_3_0_SSIZE_MASK;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
hpte_get_old_v(struct hash_pte * hptep)375*4882a593Smuzhiyun static inline unsigned long hpte_get_old_v(struct hash_pte *hptep)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun unsigned long hpte_v;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun hpte_v = be64_to_cpu(hptep->v);
380*4882a593Smuzhiyun if (cpu_has_feature(CPU_FTR_ARCH_300))
381*4882a593Smuzhiyun hpte_v = hpte_new_to_old_v(hpte_v, be64_to_cpu(hptep->r));
382*4882a593Smuzhiyun return hpte_v;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun * This function sets the AVPN and L fields of the HPTE appropriately
387*4882a593Smuzhiyun * using the base page size and actual page size.
388*4882a593Smuzhiyun */
hpte_encode_v(unsigned long vpn,int base_psize,int actual_psize,int ssize)389*4882a593Smuzhiyun static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
390*4882a593Smuzhiyun int actual_psize, int ssize)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun unsigned long v;
393*4882a593Smuzhiyun v = hpte_encode_avpn(vpn, base_psize, ssize);
394*4882a593Smuzhiyun if (actual_psize != MMU_PAGE_4K)
395*4882a593Smuzhiyun v |= HPTE_V_LARGE;
396*4882a593Smuzhiyun return v;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /*
400*4882a593Smuzhiyun * This function sets the ARPN, and LP fields of the HPTE appropriately
401*4882a593Smuzhiyun * for the page size. We assume the pa is already "clean" that is properly
402*4882a593Smuzhiyun * aligned for the requested page size
403*4882a593Smuzhiyun */
hpte_encode_r(unsigned long pa,int base_psize,int actual_psize)404*4882a593Smuzhiyun static inline unsigned long hpte_encode_r(unsigned long pa, int base_psize,
405*4882a593Smuzhiyun int actual_psize)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun /* A 4K page needs no special encoding */
408*4882a593Smuzhiyun if (actual_psize == MMU_PAGE_4K)
409*4882a593Smuzhiyun return pa & HPTE_R_RPN;
410*4882a593Smuzhiyun else {
411*4882a593Smuzhiyun unsigned int penc = mmu_psize_defs[base_psize].penc[actual_psize];
412*4882a593Smuzhiyun unsigned int shift = mmu_psize_defs[actual_psize].shift;
413*4882a593Smuzhiyun return (pa & ~((1ul << shift) - 1)) | (penc << LP_SHIFT);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /*
418*4882a593Smuzhiyun * Build a VPN_SHIFT bit shifted va given VSID, EA and segment size.
419*4882a593Smuzhiyun */
hpt_vpn(unsigned long ea,unsigned long vsid,int ssize)420*4882a593Smuzhiyun static inline unsigned long hpt_vpn(unsigned long ea,
421*4882a593Smuzhiyun unsigned long vsid, int ssize)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun unsigned long mask;
424*4882a593Smuzhiyun int s_shift = segment_shift(ssize);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun mask = (1ul << (s_shift - VPN_SHIFT)) - 1;
427*4882a593Smuzhiyun return (vsid << (s_shift - VPN_SHIFT)) | ((ea >> VPN_SHIFT) & mask);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun * This hashes a virtual address
432*4882a593Smuzhiyun */
hpt_hash(unsigned long vpn,unsigned int shift,int ssize)433*4882a593Smuzhiyun static inline unsigned long hpt_hash(unsigned long vpn,
434*4882a593Smuzhiyun unsigned int shift, int ssize)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun unsigned long mask;
437*4882a593Smuzhiyun unsigned long hash, vsid;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun /* VPN_SHIFT can be atmost 12 */
440*4882a593Smuzhiyun if (ssize == MMU_SEGSIZE_256M) {
441*4882a593Smuzhiyun mask = (1ul << (SID_SHIFT - VPN_SHIFT)) - 1;
442*4882a593Smuzhiyun hash = (vpn >> (SID_SHIFT - VPN_SHIFT)) ^
443*4882a593Smuzhiyun ((vpn & mask) >> (shift - VPN_SHIFT));
444*4882a593Smuzhiyun } else {
445*4882a593Smuzhiyun mask = (1ul << (SID_SHIFT_1T - VPN_SHIFT)) - 1;
446*4882a593Smuzhiyun vsid = vpn >> (SID_SHIFT_1T - VPN_SHIFT);
447*4882a593Smuzhiyun hash = vsid ^ (vsid << 25) ^
448*4882a593Smuzhiyun ((vpn & mask) >> (shift - VPN_SHIFT)) ;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun return hash & 0x7fffffffffUL;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun #define HPTE_LOCAL_UPDATE 0x1
454*4882a593Smuzhiyun #define HPTE_NOHPTE_UPDATE 0x2
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun extern int __hash_page_4K(unsigned long ea, unsigned long access,
457*4882a593Smuzhiyun unsigned long vsid, pte_t *ptep, unsigned long trap,
458*4882a593Smuzhiyun unsigned long flags, int ssize, int subpage_prot);
459*4882a593Smuzhiyun extern int __hash_page_64K(unsigned long ea, unsigned long access,
460*4882a593Smuzhiyun unsigned long vsid, pte_t *ptep, unsigned long trap,
461*4882a593Smuzhiyun unsigned long flags, int ssize);
462*4882a593Smuzhiyun struct mm_struct;
463*4882a593Smuzhiyun unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
464*4882a593Smuzhiyun extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
465*4882a593Smuzhiyun unsigned long access, unsigned long trap,
466*4882a593Smuzhiyun unsigned long flags);
467*4882a593Smuzhiyun extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
468*4882a593Smuzhiyun unsigned long dsisr);
469*4882a593Smuzhiyun int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
470*4882a593Smuzhiyun pte_t *ptep, unsigned long trap, unsigned long flags,
471*4882a593Smuzhiyun int ssize, unsigned int shift, unsigned int mmu_psize);
472*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
473*4882a593Smuzhiyun extern int __hash_page_thp(unsigned long ea, unsigned long access,
474*4882a593Smuzhiyun unsigned long vsid, pmd_t *pmdp, unsigned long trap,
475*4882a593Smuzhiyun unsigned long flags, int ssize, unsigned int psize);
476*4882a593Smuzhiyun #else
__hash_page_thp(unsigned long ea,unsigned long access,unsigned long vsid,pmd_t * pmdp,unsigned long trap,unsigned long flags,int ssize,unsigned int psize)477*4882a593Smuzhiyun static inline int __hash_page_thp(unsigned long ea, unsigned long access,
478*4882a593Smuzhiyun unsigned long vsid, pmd_t *pmdp,
479*4882a593Smuzhiyun unsigned long trap, unsigned long flags,
480*4882a593Smuzhiyun int ssize, unsigned int psize)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun BUG();
483*4882a593Smuzhiyun return -1;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun #endif
486*4882a593Smuzhiyun extern void hash_failure_debug(unsigned long ea, unsigned long access,
487*4882a593Smuzhiyun unsigned long vsid, unsigned long trap,
488*4882a593Smuzhiyun int ssize, int psize, int lpsize,
489*4882a593Smuzhiyun unsigned long pte);
490*4882a593Smuzhiyun extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
491*4882a593Smuzhiyun unsigned long pstart, unsigned long prot,
492*4882a593Smuzhiyun int psize, int ssize);
493*4882a593Smuzhiyun int htab_remove_mapping(unsigned long vstart, unsigned long vend,
494*4882a593Smuzhiyun int psize, int ssize);
495*4882a593Smuzhiyun extern void pseries_add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages);
496*4882a593Smuzhiyun extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun extern void hash__setup_new_exec(void);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun #ifdef CONFIG_PPC_PSERIES
501*4882a593Smuzhiyun void hpte_init_pseries(void);
502*4882a593Smuzhiyun #else
hpte_init_pseries(void)503*4882a593Smuzhiyun static inline void hpte_init_pseries(void) { }
504*4882a593Smuzhiyun #endif
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun extern void hpte_init_native(void);
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun struct slb_entry {
509*4882a593Smuzhiyun u64 esid;
510*4882a593Smuzhiyun u64 vsid;
511*4882a593Smuzhiyun };
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun extern void slb_initialize(void);
514*4882a593Smuzhiyun void slb_flush_and_restore_bolted(void);
515*4882a593Smuzhiyun void slb_flush_all_realmode(void);
516*4882a593Smuzhiyun void __slb_restore_bolted_realmode(void);
517*4882a593Smuzhiyun void slb_restore_bolted_realmode(void);
518*4882a593Smuzhiyun void slb_save_contents(struct slb_entry *slb_ptr);
519*4882a593Smuzhiyun void slb_dump_contents(struct slb_entry *slb_ptr);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun extern void slb_vmalloc_update(void);
522*4882a593Smuzhiyun extern void slb_set_size(u16 size);
523*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /*
526*4882a593Smuzhiyun * VSID allocation (256MB segment)
527*4882a593Smuzhiyun *
528*4882a593Smuzhiyun * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
529*4882a593Smuzhiyun * from mmu context id and effective segment id of the address.
530*4882a593Smuzhiyun *
531*4882a593Smuzhiyun * For user processes max context id is limited to MAX_USER_CONTEXT.
532*4882a593Smuzhiyun * more details in get_user_context
533*4882a593Smuzhiyun *
534*4882a593Smuzhiyun * For kernel space get_kernel_context
535*4882a593Smuzhiyun *
536*4882a593Smuzhiyun * The proto-VSIDs are then scrambled into real VSIDs with the
537*4882a593Smuzhiyun * multiplicative hash:
538*4882a593Smuzhiyun *
539*4882a593Smuzhiyun * VSID = (proto-VSID * VSID_MULTIPLIER) % VSID_MODULUS
540*4882a593Smuzhiyun *
541*4882a593Smuzhiyun * VSID_MULTIPLIER is prime, so in particular it is
542*4882a593Smuzhiyun * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
543*4882a593Smuzhiyun * Because the modulus is 2^n-1 we can compute it efficiently without
544*4882a593Smuzhiyun * a divide or extra multiply (see below). The scramble function gives
545*4882a593Smuzhiyun * robust scattering in the hash table (at least based on some initial
546*4882a593Smuzhiyun * results).
547*4882a593Smuzhiyun *
548*4882a593Smuzhiyun * We use VSID 0 to indicate an invalid VSID. The means we can't use context id
549*4882a593Smuzhiyun * 0, because a context id of 0 and an EA of 0 gives a proto-VSID of 0, which
550*4882a593Smuzhiyun * will produce a VSID of 0.
551*4882a593Smuzhiyun *
552*4882a593Smuzhiyun * We also need to avoid the last segment of the last context, because that
553*4882a593Smuzhiyun * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
554*4882a593Smuzhiyun * because of the modulo operation in vsid scramble.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun /*
558*4882a593Smuzhiyun * Max Va bits we support as of now is 68 bits. We want 19 bit
559*4882a593Smuzhiyun * context ID.
560*4882a593Smuzhiyun * Restrictions:
561*4882a593Smuzhiyun * GPU has restrictions of not able to access beyond 128TB
562*4882a593Smuzhiyun * (47 bit effective address). We also cannot do more than 20bit PID.
563*4882a593Smuzhiyun * For p4 and p5 which can only do 65 bit VA, we restrict our CONTEXT_BITS
564*4882a593Smuzhiyun * to 16 bits (ie, we can only have 2^16 pids at the same time).
565*4882a593Smuzhiyun */
566*4882a593Smuzhiyun #define VA_BITS 68
567*4882a593Smuzhiyun #define CONTEXT_BITS 19
568*4882a593Smuzhiyun #define ESID_BITS (VA_BITS - (SID_SHIFT + CONTEXT_BITS))
569*4882a593Smuzhiyun #define ESID_BITS_1T (VA_BITS - (SID_SHIFT_1T + CONTEXT_BITS))
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun #define ESID_BITS_MASK ((1 << ESID_BITS) - 1)
572*4882a593Smuzhiyun #define ESID_BITS_1T_MASK ((1 << ESID_BITS_1T) - 1)
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /*
575*4882a593Smuzhiyun * Now certain config support MAX_PHYSMEM more than 512TB. Hence we will need
576*4882a593Smuzhiyun * to use more than one context for linear mapping the kernel.
577*4882a593Smuzhiyun * For vmalloc and memmap, we use just one context with 512TB. With 64 byte
578*4882a593Smuzhiyun * struct page size, we need ony 32 TB in memmap for 2PB (51 bits (MAX_PHYSMEM_BITS)).
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun #if (H_MAX_PHYSMEM_BITS > MAX_EA_BITS_PER_CONTEXT)
581*4882a593Smuzhiyun #define MAX_KERNEL_CTX_CNT (1UL << (H_MAX_PHYSMEM_BITS - MAX_EA_BITS_PER_CONTEXT))
582*4882a593Smuzhiyun #else
583*4882a593Smuzhiyun #define MAX_KERNEL_CTX_CNT 1
584*4882a593Smuzhiyun #endif
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun #define MAX_VMALLOC_CTX_CNT 1
587*4882a593Smuzhiyun #define MAX_IO_CTX_CNT 1
588*4882a593Smuzhiyun #define MAX_VMEMMAP_CTX_CNT 1
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun * 256MB segment
592*4882a593Smuzhiyun * The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
593*4882a593Smuzhiyun * available for user + kernel mapping. VSID 0 is reserved as invalid, contexts
594*4882a593Smuzhiyun * 1-4 are used for kernel mapping. Each segment contains 2^28 bytes. Each
595*4882a593Smuzhiyun * context maps 2^49 bytes (512TB).
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * We also need to avoid the last segment of the last context, because that
598*4882a593Smuzhiyun * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
599*4882a593Smuzhiyun * because of the modulo operation in vsid scramble.
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun */
602*4882a593Smuzhiyun #define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 2)
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun // The + 2 accounts for INVALID_REGION and 1 more to avoid overlap with kernel
605*4882a593Smuzhiyun #define MIN_USER_CONTEXT (MAX_KERNEL_CTX_CNT + MAX_VMALLOC_CTX_CNT + \
606*4882a593Smuzhiyun MAX_IO_CTX_CNT + MAX_VMEMMAP_CTX_CNT + 2)
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /*
609*4882a593Smuzhiyun * For platforms that support on 65bit VA we limit the context bits
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun #define MAX_USER_CONTEXT_65BIT_VA ((ASM_CONST(1) << (65 - (SID_SHIFT + ESID_BITS))) - 2)
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun /*
614*4882a593Smuzhiyun * This should be computed such that protovosid * vsid_mulitplier
615*4882a593Smuzhiyun * doesn't overflow 64 bits. The vsid_mutliplier should also be
616*4882a593Smuzhiyun * co-prime to vsid_modulus. We also need to make sure that number
617*4882a593Smuzhiyun * of bits in multiplied result (dividend) is less than twice the number of
618*4882a593Smuzhiyun * protovsid bits for our modulus optmization to work.
619*4882a593Smuzhiyun *
620*4882a593Smuzhiyun * The below table shows the current values used.
621*4882a593Smuzhiyun * |-------+------------+----------------------+------------+-------------------|
622*4882a593Smuzhiyun * | | Prime Bits | proto VSID_BITS_65VA | Total Bits | 2* prot VSID_BITS |
623*4882a593Smuzhiyun * |-------+------------+----------------------+------------+-------------------|
624*4882a593Smuzhiyun * | 1T | 24 | 25 | 49 | 50 |
625*4882a593Smuzhiyun * |-------+------------+----------------------+------------+-------------------|
626*4882a593Smuzhiyun * | 256MB | 24 | 37 | 61 | 74 |
627*4882a593Smuzhiyun * |-------+------------+----------------------+------------+-------------------|
628*4882a593Smuzhiyun *
629*4882a593Smuzhiyun * |-------+------------+----------------------+------------+--------------------|
630*4882a593Smuzhiyun * | | Prime Bits | proto VSID_BITS_68VA | Total Bits | 2* proto VSID_BITS |
631*4882a593Smuzhiyun * |-------+------------+----------------------+------------+--------------------|
632*4882a593Smuzhiyun * | 1T | 24 | 28 | 52 | 56 |
633*4882a593Smuzhiyun * |-------+------------+----------------------+------------+--------------------|
634*4882a593Smuzhiyun * | 256MB | 24 | 40 | 64 | 80 |
635*4882a593Smuzhiyun * |-------+------------+----------------------+------------+--------------------|
636*4882a593Smuzhiyun *
637*4882a593Smuzhiyun */
638*4882a593Smuzhiyun #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
639*4882a593Smuzhiyun #define VSID_BITS_256M (VA_BITS - SID_SHIFT)
640*4882a593Smuzhiyun #define VSID_BITS_65_256M (65 - SID_SHIFT)
641*4882a593Smuzhiyun /*
642*4882a593Smuzhiyun * Modular multiplicative inverse of VSID_MULTIPLIER under modulo VSID_MODULUS
643*4882a593Smuzhiyun */
644*4882a593Smuzhiyun #define VSID_MULINV_256M ASM_CONST(665548017062)
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
647*4882a593Smuzhiyun #define VSID_BITS_1T (VA_BITS - SID_SHIFT_1T)
648*4882a593Smuzhiyun #define VSID_BITS_65_1T (65 - SID_SHIFT_1T)
649*4882a593Smuzhiyun #define VSID_MULINV_1T ASM_CONST(209034062)
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun /* 1TB VSID reserved for VRMA */
652*4882a593Smuzhiyun #define VRMA_VSID 0x1ffffffUL
653*4882a593Smuzhiyun #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* 4 bits per slice and we have one slice per 1TB */
656*4882a593Smuzhiyun #define SLICE_ARRAY_SIZE (H_PGTABLE_RANGE >> 41)
657*4882a593Smuzhiyun #define LOW_SLICE_ARRAY_SZ (BITS_PER_LONG / BITS_PER_BYTE)
658*4882a593Smuzhiyun #define TASK_SLICE_ARRAY_SZ(x) ((x)->hash_context->slb_addr_limit >> 41)
659*4882a593Smuzhiyun #ifndef __ASSEMBLY__
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun #ifdef CONFIG_PPC_SUBPAGE_PROT
662*4882a593Smuzhiyun /*
663*4882a593Smuzhiyun * For the sub-page protection option, we extend the PGD with one of
664*4882a593Smuzhiyun * these. Basically we have a 3-level tree, with the top level being
665*4882a593Smuzhiyun * the protptrs array. To optimize speed and memory consumption when
666*4882a593Smuzhiyun * only addresses < 4GB are being protected, pointers to the first
667*4882a593Smuzhiyun * four pages of sub-page protection words are stored in the low_prot
668*4882a593Smuzhiyun * array.
669*4882a593Smuzhiyun * Each page of sub-page protection words protects 1GB (4 bytes
670*4882a593Smuzhiyun * protects 64k). For the 3-level tree, each page of pointers then
671*4882a593Smuzhiyun * protects 8TB.
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun struct subpage_prot_table {
674*4882a593Smuzhiyun unsigned long maxaddr; /* only addresses < this are protected */
675*4882a593Smuzhiyun unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
676*4882a593Smuzhiyun unsigned int *low_prot[4];
677*4882a593Smuzhiyun };
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun #define SBP_L1_BITS (PAGE_SHIFT - 2)
680*4882a593Smuzhiyun #define SBP_L2_BITS (PAGE_SHIFT - 3)
681*4882a593Smuzhiyun #define SBP_L1_COUNT (1 << SBP_L1_BITS)
682*4882a593Smuzhiyun #define SBP_L2_COUNT (1 << SBP_L2_BITS)
683*4882a593Smuzhiyun #define SBP_L2_SHIFT (PAGE_SHIFT + SBP_L1_BITS)
684*4882a593Smuzhiyun #define SBP_L3_SHIFT (SBP_L2_SHIFT + SBP_L2_BITS)
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun extern void subpage_prot_free(struct mm_struct *mm);
687*4882a593Smuzhiyun #else
subpage_prot_free(struct mm_struct * mm)688*4882a593Smuzhiyun static inline void subpage_prot_free(struct mm_struct *mm) {}
689*4882a593Smuzhiyun #endif /* CONFIG_PPC_SUBPAGE_PROT */
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun /*
692*4882a593Smuzhiyun * One bit per slice. We have lower slices which cover 256MB segments
693*4882a593Smuzhiyun * upto 4G range. That gets us 16 low slices. For the rest we track slices
694*4882a593Smuzhiyun * in 1TB size.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun struct slice_mask {
697*4882a593Smuzhiyun u64 low_slices;
698*4882a593Smuzhiyun DECLARE_BITMAP(high_slices, SLICE_NUM_HIGH);
699*4882a593Smuzhiyun };
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun struct hash_mm_context {
702*4882a593Smuzhiyun u16 user_psize; /* page size index */
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* SLB page size encodings*/
705*4882a593Smuzhiyun unsigned char low_slices_psize[LOW_SLICE_ARRAY_SZ];
706*4882a593Smuzhiyun unsigned char high_slices_psize[SLICE_ARRAY_SIZE];
707*4882a593Smuzhiyun unsigned long slb_addr_limit;
708*4882a593Smuzhiyun #ifdef CONFIG_PPC_64K_PAGES
709*4882a593Smuzhiyun struct slice_mask mask_64k;
710*4882a593Smuzhiyun #endif
711*4882a593Smuzhiyun struct slice_mask mask_4k;
712*4882a593Smuzhiyun #ifdef CONFIG_HUGETLB_PAGE
713*4882a593Smuzhiyun struct slice_mask mask_16m;
714*4882a593Smuzhiyun struct slice_mask mask_16g;
715*4882a593Smuzhiyun #endif
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun #ifdef CONFIG_PPC_SUBPAGE_PROT
718*4882a593Smuzhiyun struct subpage_prot_table *spt;
719*4882a593Smuzhiyun #endif /* CONFIG_PPC_SUBPAGE_PROT */
720*4882a593Smuzhiyun };
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun #if 0
723*4882a593Smuzhiyun /*
724*4882a593Smuzhiyun * The code below is equivalent to this function for arguments
725*4882a593Smuzhiyun * < 2^VSID_BITS, which is all this should ever be called
726*4882a593Smuzhiyun * with. However gcc is not clever enough to compute the
727*4882a593Smuzhiyun * modulus (2^n-1) without a second multiply.
728*4882a593Smuzhiyun */
729*4882a593Smuzhiyun #define vsid_scramble(protovsid, size) \
730*4882a593Smuzhiyun ((((protovsid) * VSID_MULTIPLIER_##size) % VSID_MODULUS_##size))
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* simplified form avoiding mod operation */
733*4882a593Smuzhiyun #define vsid_scramble(protovsid, size) \
734*4882a593Smuzhiyun ({ \
735*4882a593Smuzhiyun unsigned long x; \
736*4882a593Smuzhiyun x = (protovsid) * VSID_MULTIPLIER_##size; \
737*4882a593Smuzhiyun x = (x >> VSID_BITS_##size) + (x & VSID_MODULUS_##size); \
738*4882a593Smuzhiyun (x + ((x+1) >> VSID_BITS_##size)) & VSID_MODULUS_##size; \
739*4882a593Smuzhiyun })
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun #else /* 1 */
vsid_scramble(unsigned long protovsid,unsigned long vsid_multiplier,int vsid_bits)742*4882a593Smuzhiyun static inline unsigned long vsid_scramble(unsigned long protovsid,
743*4882a593Smuzhiyun unsigned long vsid_multiplier, int vsid_bits)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun unsigned long vsid;
746*4882a593Smuzhiyun unsigned long vsid_modulus = ((1UL << vsid_bits) - 1);
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * We have same multipler for both 256 and 1T segements now
749*4882a593Smuzhiyun */
750*4882a593Smuzhiyun vsid = protovsid * vsid_multiplier;
751*4882a593Smuzhiyun vsid = (vsid >> vsid_bits) + (vsid & vsid_modulus);
752*4882a593Smuzhiyun return (vsid + ((vsid + 1) >> vsid_bits)) & vsid_modulus;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun #endif /* 1 */
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /* Returns the segment size indicator for a user address */
user_segment_size(unsigned long addr)758*4882a593Smuzhiyun static inline int user_segment_size(unsigned long addr)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun /* Use 1T segments if possible for addresses >= 1T */
761*4882a593Smuzhiyun if (addr >= (1UL << SID_SHIFT_1T))
762*4882a593Smuzhiyun return mmu_highuser_ssize;
763*4882a593Smuzhiyun return MMU_SEGSIZE_256M;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
get_vsid(unsigned long context,unsigned long ea,int ssize)766*4882a593Smuzhiyun static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
767*4882a593Smuzhiyun int ssize)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun unsigned long va_bits = VA_BITS;
770*4882a593Smuzhiyun unsigned long vsid_bits;
771*4882a593Smuzhiyun unsigned long protovsid;
772*4882a593Smuzhiyun
773*4882a593Smuzhiyun /*
774*4882a593Smuzhiyun * Bad address. We return VSID 0 for that
775*4882a593Smuzhiyun */
776*4882a593Smuzhiyun if ((ea & EA_MASK) >= H_PGTABLE_RANGE)
777*4882a593Smuzhiyun return 0;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (!mmu_has_feature(MMU_FTR_68_BIT_VA))
780*4882a593Smuzhiyun va_bits = 65;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun if (ssize == MMU_SEGSIZE_256M) {
783*4882a593Smuzhiyun vsid_bits = va_bits - SID_SHIFT;
784*4882a593Smuzhiyun protovsid = (context << ESID_BITS) |
785*4882a593Smuzhiyun ((ea >> SID_SHIFT) & ESID_BITS_MASK);
786*4882a593Smuzhiyun return vsid_scramble(protovsid, VSID_MULTIPLIER_256M, vsid_bits);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun /* 1T segment */
789*4882a593Smuzhiyun vsid_bits = va_bits - SID_SHIFT_1T;
790*4882a593Smuzhiyun protovsid = (context << ESID_BITS_1T) |
791*4882a593Smuzhiyun ((ea >> SID_SHIFT_1T) & ESID_BITS_1T_MASK);
792*4882a593Smuzhiyun return vsid_scramble(protovsid, VSID_MULTIPLIER_1T, vsid_bits);
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun /*
796*4882a593Smuzhiyun * For kernel space, we use context ids as
797*4882a593Smuzhiyun * below. Range is 512TB per context.
798*4882a593Smuzhiyun *
799*4882a593Smuzhiyun * 0x00001 - [ 0xc000000000000000 - 0xc001ffffffffffff]
800*4882a593Smuzhiyun * 0x00002 - [ 0xc002000000000000 - 0xc003ffffffffffff]
801*4882a593Smuzhiyun * 0x00003 - [ 0xc004000000000000 - 0xc005ffffffffffff]
802*4882a593Smuzhiyun * 0x00004 - [ 0xc006000000000000 - 0xc007ffffffffffff]
803*4882a593Smuzhiyun *
804*4882a593Smuzhiyun * vmap, IO, vmemap
805*4882a593Smuzhiyun *
806*4882a593Smuzhiyun * 0x00005 - [ 0xc008000000000000 - 0xc009ffffffffffff]
807*4882a593Smuzhiyun * 0x00006 - [ 0xc00a000000000000 - 0xc00bffffffffffff]
808*4882a593Smuzhiyun * 0x00007 - [ 0xc00c000000000000 - 0xc00dffffffffffff]
809*4882a593Smuzhiyun *
810*4882a593Smuzhiyun */
get_kernel_context(unsigned long ea)811*4882a593Smuzhiyun static inline unsigned long get_kernel_context(unsigned long ea)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun unsigned long region_id = get_region_id(ea);
814*4882a593Smuzhiyun unsigned long ctx;
815*4882a593Smuzhiyun /*
816*4882a593Smuzhiyun * Depending on Kernel config, kernel region can have one context
817*4882a593Smuzhiyun * or more.
818*4882a593Smuzhiyun */
819*4882a593Smuzhiyun if (region_id == LINEAR_MAP_REGION_ID) {
820*4882a593Smuzhiyun /*
821*4882a593Smuzhiyun * We already verified ea to be not beyond the addr limit.
822*4882a593Smuzhiyun */
823*4882a593Smuzhiyun ctx = 1 + ((ea & EA_MASK) >> MAX_EA_BITS_PER_CONTEXT);
824*4882a593Smuzhiyun } else
825*4882a593Smuzhiyun ctx = region_id + MAX_KERNEL_CTX_CNT - 1;
826*4882a593Smuzhiyun return ctx;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun * This is only valid for addresses >= PAGE_OFFSET
831*4882a593Smuzhiyun */
get_kernel_vsid(unsigned long ea,int ssize)832*4882a593Smuzhiyun static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun unsigned long context;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (!is_kernel_addr(ea))
837*4882a593Smuzhiyun return 0;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun context = get_kernel_context(ea);
840*4882a593Smuzhiyun return get_vsid(context, ea, ssize);
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun unsigned htab_shift_for_mem_size(unsigned long mem_size);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun #endif /* _ASM_POWERPC_BOOK3S_64_MMU_HASH_H_ */
848