1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 1994 - 2002 by Ralf Baechle
3*4882a593Smuzhiyun * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
4*4882a593Smuzhiyun * Copyright (C) 2002 Maciej W. Rozycki
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * SPDX-License-Identifier: GPL-2.0
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef _ASM_PGTABLE_BITS_H
9*4882a593Smuzhiyun #define _ASM_PGTABLE_BITS_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun /*
13*4882a593Smuzhiyun * Note that we shift the lower 32bits of each EntryLo[01] entry
14*4882a593Smuzhiyun * 6 bits to the left. That way we can convert the PFN into the
15*4882a593Smuzhiyun * physical address by a single 'and' operation and gain 6 additional
16*4882a593Smuzhiyun * bits for storing information which isn't present in a normal
17*4882a593Smuzhiyun * MIPS page table.
18*4882a593Smuzhiyun *
19*4882a593Smuzhiyun * Similar to the Alpha port, we need to keep track of the ref
20*4882a593Smuzhiyun * and mod bits in software. We have a software "yeah you can read
21*4882a593Smuzhiyun * from this page" bit, and a hardware one which actually lets the
22*4882a593Smuzhiyun * process read from the page. On the same token we have a software
23*4882a593Smuzhiyun * writable bit and the real hardware one which actually lets the
24*4882a593Smuzhiyun * process write to the page, this keeps a mod bit via the hardware
25*4882a593Smuzhiyun * dirty bit.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * Certain revisions of the R4000 and R5000 have a bug where if a
28*4882a593Smuzhiyun * certain sequence occurs in the last 3 instructions of an executable
29*4882a593Smuzhiyun * page, and the following page is not mapped, the cpu can do
30*4882a593Smuzhiyun * unpredictable things. The code (when it is written) to deal with
31*4882a593Smuzhiyun * this problem will be in the update_mmu_cache() code for the r4k.
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * The following bits are implemented by the TLB hardware
37*4882a593Smuzhiyun */
38*4882a593Smuzhiyun #define _PAGE_NO_EXEC_SHIFT 0
39*4882a593Smuzhiyun #define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
40*4882a593Smuzhiyun #define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
41*4882a593Smuzhiyun #define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
42*4882a593Smuzhiyun #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
43*4882a593Smuzhiyun #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
44*4882a593Smuzhiyun #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
45*4882a593Smuzhiyun #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
46*4882a593Smuzhiyun #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
47*4882a593Smuzhiyun #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
48*4882a593Smuzhiyun #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
49*4882a593Smuzhiyun #define _CACHE_MASK (7 << _CACHE_SHIFT)
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /*
52*4882a593Smuzhiyun * The following bits are implemented in software
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun #define _PAGE_PRESENT_SHIFT (24)
55*4882a593Smuzhiyun #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
56*4882a593Smuzhiyun #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
57*4882a593Smuzhiyun #define _PAGE_READ (1 << _PAGE_READ_SHIFT)
58*4882a593Smuzhiyun #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
59*4882a593Smuzhiyun #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
60*4882a593Smuzhiyun #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
61*4882a593Smuzhiyun #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
62*4882a593Smuzhiyun #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
63*4882a593Smuzhiyun #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /*
68*4882a593Smuzhiyun * Bits for extended EntryLo0/EntryLo1 registers
69*4882a593Smuzhiyun */
70*4882a593Smuzhiyun #define _PFNX_MASK 0xffffff
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * The following bits are implemented in software
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun #define _PAGE_PRESENT_SHIFT (0)
78*4882a593Smuzhiyun #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
79*4882a593Smuzhiyun #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
80*4882a593Smuzhiyun #define _PAGE_READ (1 << _PAGE_READ_SHIFT)
81*4882a593Smuzhiyun #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
82*4882a593Smuzhiyun #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
83*4882a593Smuzhiyun #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
84*4882a593Smuzhiyun #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
85*4882a593Smuzhiyun #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
86*4882a593Smuzhiyun #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * The following bits are implemented by the TLB hardware
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4)
92*4882a593Smuzhiyun #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
93*4882a593Smuzhiyun #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
94*4882a593Smuzhiyun #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
95*4882a593Smuzhiyun #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
96*4882a593Smuzhiyun #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
97*4882a593Smuzhiyun #define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1)
98*4882a593Smuzhiyun #define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
99*4882a593Smuzhiyun #define _CACHE_MASK _CACHE_UNCACHED
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define _PFN_SHIFT PAGE_SHIFT
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun #else
104*4882a593Smuzhiyun /*
105*4882a593Smuzhiyun * Below are the "Normal" R4K cases
106*4882a593Smuzhiyun */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /*
109*4882a593Smuzhiyun * The following bits are implemented in software
110*4882a593Smuzhiyun */
111*4882a593Smuzhiyun #define _PAGE_PRESENT_SHIFT 0
112*4882a593Smuzhiyun #define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
113*4882a593Smuzhiyun /* R2 or later cores check for RI/XI support to determine _PAGE_READ */
114*4882a593Smuzhiyun #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
115*4882a593Smuzhiyun #define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1)
116*4882a593Smuzhiyun #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
117*4882a593Smuzhiyun #else
118*4882a593Smuzhiyun #define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
119*4882a593Smuzhiyun #define _PAGE_READ (1 << _PAGE_READ_SHIFT)
120*4882a593Smuzhiyun #define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
121*4882a593Smuzhiyun #define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
122*4882a593Smuzhiyun #endif
123*4882a593Smuzhiyun #define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
124*4882a593Smuzhiyun #define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
125*4882a593Smuzhiyun #define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
126*4882a593Smuzhiyun #define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun #if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
129*4882a593Smuzhiyun /* Huge TLB page */
130*4882a593Smuzhiyun #define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
131*4882a593Smuzhiyun #define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
132*4882a593Smuzhiyun #define _PAGE_SPLITTING_SHIFT (_PAGE_HUGE_SHIFT + 1)
133*4882a593Smuzhiyun #define _PAGE_SPLITTING (1 << _PAGE_SPLITTING_SHIFT)
134*4882a593Smuzhiyun #endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137*4882a593Smuzhiyun /* XI - page cannot be executed */
138*4882a593Smuzhiyun #ifdef _PAGE_SPLITTING_SHIFT
139*4882a593Smuzhiyun #define _PAGE_NO_EXEC_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
140*4882a593Smuzhiyun #else
141*4882a593Smuzhiyun #define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun #define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* RI - page cannot be read */
146*4882a593Smuzhiyun #define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
147*4882a593Smuzhiyun #define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
148*4882a593Smuzhiyun #define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT
149*4882a593Smuzhiyun #define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
150*4882a593Smuzhiyun #endif /* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun #if defined(_PAGE_NO_READ_SHIFT)
153*4882a593Smuzhiyun #define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
154*4882a593Smuzhiyun #elif defined(_PAGE_SPLITTING_SHIFT)
155*4882a593Smuzhiyun #define _PAGE_GLOBAL_SHIFT (_PAGE_SPLITTING_SHIFT + 1)
156*4882a593Smuzhiyun #else
157*4882a593Smuzhiyun #define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
158*4882a593Smuzhiyun #endif
159*4882a593Smuzhiyun #define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun #define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
162*4882a593Smuzhiyun #define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
163*4882a593Smuzhiyun #define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
164*4882a593Smuzhiyun #define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
165*4882a593Smuzhiyun #define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
166*4882a593Smuzhiyun #define _CACHE_MASK (7 << _CACHE_SHIFT)
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun #define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun #endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #ifndef _PAGE_NO_EXEC
173*4882a593Smuzhiyun #define _PAGE_NO_EXEC 0
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun #ifndef _PAGE_NO_READ
176*4882a593Smuzhiyun #define _PAGE_NO_READ 0
177*4882a593Smuzhiyun #endif
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun #define _PAGE_SILENT_READ _PAGE_VALID
180*4882a593Smuzhiyun #define _PAGE_SILENT_WRITE _PAGE_DIRTY
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun #define _PFN_MASK (~((1 << (_PFN_SHIFT)) - 1))
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun * The final layouts of the PTE bits are:
186*4882a593Smuzhiyun *
187*4882a593Smuzhiyun * 64-bit, R1 or earlier: CCC D V G [S H] M A W R P
188*4882a593Smuzhiyun * 32-bit, R1 or earler: CCC D V G M A W R P
189*4882a593Smuzhiyun * 64-bit, R2 or later: CCC D V G RI/R XI [S H] M A W P
190*4882a593Smuzhiyun * 32-bit, R2 or later: CCC D V G RI/R XI M A W P
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #ifndef __ASSEMBLY__
195*4882a593Smuzhiyun /*
196*4882a593Smuzhiyun * pte_to_entrylo converts a page table entry (PTE) into a Mips
197*4882a593Smuzhiyun * entrylo0/1 value.
198*4882a593Smuzhiyun */
pte_to_entrylo(unsigned long pte_val)199*4882a593Smuzhiyun static inline uint64_t pte_to_entrylo(unsigned long pte_val)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
202*4882a593Smuzhiyun if (cpu_has_rixi) {
203*4882a593Smuzhiyun int sa;
204*4882a593Smuzhiyun #ifdef CONFIG_32BIT
205*4882a593Smuzhiyun sa = 31 - _PAGE_NO_READ_SHIFT;
206*4882a593Smuzhiyun #else
207*4882a593Smuzhiyun sa = 63 - _PAGE_NO_READ_SHIFT;
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * C has no way to express that this is a DSRL
211*4882a593Smuzhiyun * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2. Luckily
212*4882a593Smuzhiyun * in the fast path this is done in assembly
213*4882a593Smuzhiyun */
214*4882a593Smuzhiyun return (pte_val >> _PAGE_GLOBAL_SHIFT) |
215*4882a593Smuzhiyun ((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun return pte_val >> _PAGE_GLOBAL_SHIFT;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun #endif
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun * Cache attributes
225*4882a593Smuzhiyun */
226*4882a593Smuzhiyun #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun #define _CACHE_CACHABLE_NONCOHERENT 0
229*4882a593Smuzhiyun #define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun #elif defined(CONFIG_CPU_SB1)
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* No penalty for being coherent on the SB1, so just
234*4882a593Smuzhiyun use it for "noncoherent" spaces, too. Shouldn't hurt. */
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #elif defined(CONFIG_CPU_LOONGSON3)
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Using COHERENT flag for NONCOHERENT doesn't hurt. */
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT) /* LOONGSON */
243*4882a593Smuzhiyun #define _CACHE_CACHABLE_COHERENT (3<<_CACHE_SHIFT) /* LOONGSON-3 */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun #elif defined(CONFIG_MACH_INGENIC)
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* Ingenic uses the WA bit to achieve write-combine memory writes */
248*4882a593Smuzhiyun #define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun #endif
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun #ifndef _CACHE_CACHABLE_NO_WA
253*4882a593Smuzhiyun #define _CACHE_CACHABLE_NO_WA (0<<_CACHE_SHIFT)
254*4882a593Smuzhiyun #endif
255*4882a593Smuzhiyun #ifndef _CACHE_CACHABLE_WA
256*4882a593Smuzhiyun #define _CACHE_CACHABLE_WA (1<<_CACHE_SHIFT)
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun #ifndef _CACHE_UNCACHED
259*4882a593Smuzhiyun #define _CACHE_UNCACHED (2<<_CACHE_SHIFT)
260*4882a593Smuzhiyun #endif
261*4882a593Smuzhiyun #ifndef _CACHE_CACHABLE_NONCOHERENT
262*4882a593Smuzhiyun #define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)
263*4882a593Smuzhiyun #endif
264*4882a593Smuzhiyun #ifndef _CACHE_CACHABLE_CE
265*4882a593Smuzhiyun #define _CACHE_CACHABLE_CE (4<<_CACHE_SHIFT)
266*4882a593Smuzhiyun #endif
267*4882a593Smuzhiyun #ifndef _CACHE_CACHABLE_COW
268*4882a593Smuzhiyun #define _CACHE_CACHABLE_COW (5<<_CACHE_SHIFT)
269*4882a593Smuzhiyun #endif
270*4882a593Smuzhiyun #ifndef _CACHE_CACHABLE_CUW
271*4882a593Smuzhiyun #define _CACHE_CACHABLE_CUW (6<<_CACHE_SHIFT)
272*4882a593Smuzhiyun #endif
273*4882a593Smuzhiyun #ifndef _CACHE_UNCACHED_ACCELERATED
274*4882a593Smuzhiyun #define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
275*4882a593Smuzhiyun #endif
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
278*4882a593Smuzhiyun #define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun #define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
281*4882a593Smuzhiyun _PFN_MASK | _CACHE_MASK)
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun #endif /* _ASM_PGTABLE_BITS_H */
284