1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
3*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
4*4882a593Smuzhiyun * for more details.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2003 Ralf Baechle
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #ifndef _ASM_PGTABLE_H
9*4882a593Smuzhiyun #define _ASM_PGTABLE_H
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/mm_types.h>
12*4882a593Smuzhiyun #include <linux/mmzone.h>
13*4882a593Smuzhiyun #ifdef CONFIG_32BIT
14*4882a593Smuzhiyun #include <asm/pgtable-32.h>
15*4882a593Smuzhiyun #endif
16*4882a593Smuzhiyun #ifdef CONFIG_64BIT
17*4882a593Smuzhiyun #include <asm/pgtable-64.h>
18*4882a593Smuzhiyun #endif
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <asm/cmpxchg.h>
21*4882a593Smuzhiyun #include <asm/io.h>
22*4882a593Smuzhiyun #include <asm/pgtable-bits.h>
23*4882a593Smuzhiyun #include <asm/cpu-features.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun struct mm_struct;
26*4882a593Smuzhiyun struct vm_area_struct;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
29*4882a593Smuzhiyun _page_cachable_default)
30*4882a593Smuzhiyun #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
31*4882a593Smuzhiyun _page_cachable_default)
32*4882a593Smuzhiyun #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
33*4882a593Smuzhiyun _page_cachable_default)
34*4882a593Smuzhiyun #define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
35*4882a593Smuzhiyun _page_cachable_default)
36*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
37*4882a593Smuzhiyun _PAGE_GLOBAL | _page_cachable_default)
38*4882a593Smuzhiyun #define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
39*4882a593Smuzhiyun _PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
40*4882a593Smuzhiyun #define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
41*4882a593Smuzhiyun __WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * If _PAGE_NO_EXEC is not defined, we can't do page protection for
45*4882a593Smuzhiyun * execute, and consider it to be the same as read. Also, write
46*4882a593Smuzhiyun * permissions imply read permissions. This is the closest we can get
47*4882a593Smuzhiyun * by reasonable means..
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /*
51*4882a593Smuzhiyun * Dummy values to fill the table in mmap.c
52*4882a593Smuzhiyun * The real values will be generated at runtime
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun #define __P000 __pgprot(0)
55*4882a593Smuzhiyun #define __P001 __pgprot(0)
56*4882a593Smuzhiyun #define __P010 __pgprot(0)
57*4882a593Smuzhiyun #define __P011 __pgprot(0)
58*4882a593Smuzhiyun #define __P100 __pgprot(0)
59*4882a593Smuzhiyun #define __P101 __pgprot(0)
60*4882a593Smuzhiyun #define __P110 __pgprot(0)
61*4882a593Smuzhiyun #define __P111 __pgprot(0)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define __S000 __pgprot(0)
64*4882a593Smuzhiyun #define __S001 __pgprot(0)
65*4882a593Smuzhiyun #define __S010 __pgprot(0)
66*4882a593Smuzhiyun #define __S011 __pgprot(0)
67*4882a593Smuzhiyun #define __S100 __pgprot(0)
68*4882a593Smuzhiyun #define __S101 __pgprot(0)
69*4882a593Smuzhiyun #define __S110 __pgprot(0)
70*4882a593Smuzhiyun #define __S111 __pgprot(0)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun extern unsigned long _page_cachable_default;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * ZERO_PAGE is a global shared page that is always zero; used
76*4882a593Smuzhiyun * for zero-mapped memory areas etc..
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun extern unsigned long empty_zero_page;
80*4882a593Smuzhiyun extern unsigned long zero_page_mask;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) \
83*4882a593Smuzhiyun (virt_to_page((void *)(empty_zero_page + (((unsigned long)(vaddr)) & zero_page_mask))))
84*4882a593Smuzhiyun #define __HAVE_COLOR_ZERO_PAGE
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun extern void paging_init(void);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun * Conversion functions: convert a page and protection to a page entry,
90*4882a593Smuzhiyun * and a page entry and page directory to the page they refer to.
91*4882a593Smuzhiyun */
92*4882a593Smuzhiyun #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define __pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
95*4882a593Smuzhiyun #ifndef CONFIG_TRANSPARENT_HUGEPAGE
96*4882a593Smuzhiyun #define pmd_page(pmd) __pmd_page(pmd)
97*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun #define pmd_page_vaddr(pmd) pmd_val(pmd)
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define htw_stop() \
102*4882a593Smuzhiyun do { \
103*4882a593Smuzhiyun unsigned long flags; \
104*4882a593Smuzhiyun \
105*4882a593Smuzhiyun if (cpu_has_htw) { \
106*4882a593Smuzhiyun local_irq_save(flags); \
107*4882a593Smuzhiyun if(!raw_current_cpu_data.htw_seq++) { \
108*4882a593Smuzhiyun write_c0_pwctl(read_c0_pwctl() & \
109*4882a593Smuzhiyun ~(1 << MIPS_PWCTL_PWEN_SHIFT)); \
110*4882a593Smuzhiyun back_to_back_c0_hazard(); \
111*4882a593Smuzhiyun } \
112*4882a593Smuzhiyun local_irq_restore(flags); \
113*4882a593Smuzhiyun } \
114*4882a593Smuzhiyun } while(0)
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun #define htw_start() \
117*4882a593Smuzhiyun do { \
118*4882a593Smuzhiyun unsigned long flags; \
119*4882a593Smuzhiyun \
120*4882a593Smuzhiyun if (cpu_has_htw) { \
121*4882a593Smuzhiyun local_irq_save(flags); \
122*4882a593Smuzhiyun if (!--raw_current_cpu_data.htw_seq) { \
123*4882a593Smuzhiyun write_c0_pwctl(read_c0_pwctl() | \
124*4882a593Smuzhiyun (1 << MIPS_PWCTL_PWEN_SHIFT)); \
125*4882a593Smuzhiyun back_to_back_c0_hazard(); \
126*4882a593Smuzhiyun } \
127*4882a593Smuzhiyun local_irq_restore(flags); \
128*4882a593Smuzhiyun } \
129*4882a593Smuzhiyun } while(0)
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
132*4882a593Smuzhiyun pte_t *ptep, pte_t pteval);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun #ifdef CONFIG_XPA
137*4882a593Smuzhiyun # define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
138*4882a593Smuzhiyun #else
139*4882a593Smuzhiyun # define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
140*4882a593Smuzhiyun #endif
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
143*4882a593Smuzhiyun #define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
144*4882a593Smuzhiyun
set_pte(pte_t * ptep,pte_t pte)145*4882a593Smuzhiyun static inline void set_pte(pte_t *ptep, pte_t pte)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun ptep->pte_high = pte.pte_high;
148*4882a593Smuzhiyun smp_wmb();
149*4882a593Smuzhiyun ptep->pte_low = pte.pte_low;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun #ifdef CONFIG_XPA
152*4882a593Smuzhiyun if (pte.pte_high & _PAGE_GLOBAL) {
153*4882a593Smuzhiyun #else
154*4882a593Smuzhiyun if (pte.pte_low & _PAGE_GLOBAL) {
155*4882a593Smuzhiyun #endif
156*4882a593Smuzhiyun pte_t *buddy = ptep_buddy(ptep);
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * Make sure the buddy is global too (if it's !none,
159*4882a593Smuzhiyun * it better already be global)
160*4882a593Smuzhiyun */
161*4882a593Smuzhiyun if (pte_none(*buddy)) {
162*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
163*4882a593Smuzhiyun buddy->pte_low |= _PAGE_GLOBAL;
164*4882a593Smuzhiyun buddy->pte_high |= _PAGE_GLOBAL;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun pte_t null = __pte(0);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun htw_stop();
174*4882a593Smuzhiyun /* Preserve global status for the pair */
175*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_XPA)) {
176*4882a593Smuzhiyun if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
177*4882a593Smuzhiyun null.pte_high = _PAGE_GLOBAL;
178*4882a593Smuzhiyun } else {
179*4882a593Smuzhiyun if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
180*4882a593Smuzhiyun null.pte_low = null.pte_high = _PAGE_GLOBAL;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun set_pte_at(mm, addr, ptep, null);
184*4882a593Smuzhiyun htw_start();
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun #else
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
189*4882a593Smuzhiyun #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
190*4882a593Smuzhiyun #define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * Certain architectures need to do special things when pte's
194*4882a593Smuzhiyun * within a page table are directly modified. Thus, the following
195*4882a593Smuzhiyun * hook is made available.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun static inline void set_pte(pte_t *ptep, pte_t pteval)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun *ptep = pteval;
200*4882a593Smuzhiyun #if !defined(CONFIG_CPU_R3K_TLB)
201*4882a593Smuzhiyun if (pte_val(pteval) & _PAGE_GLOBAL) {
202*4882a593Smuzhiyun pte_t *buddy = ptep_buddy(ptep);
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Make sure the buddy is global too (if it's !none,
205*4882a593Smuzhiyun * it better already be global)
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun # if defined(CONFIG_PHYS_ADDR_T_64BIT) && !defined(CONFIG_CPU_MIPS32)
208*4882a593Smuzhiyun cmpxchg64(&buddy->pte, 0, _PAGE_GLOBAL);
209*4882a593Smuzhiyun # else
210*4882a593Smuzhiyun cmpxchg(&buddy->pte, 0, _PAGE_GLOBAL);
211*4882a593Smuzhiyun # endif
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun #endif
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun htw_stop();
219*4882a593Smuzhiyun #if !defined(CONFIG_CPU_R3K_TLB)
220*4882a593Smuzhiyun /* Preserve global status for the pair */
221*4882a593Smuzhiyun if (pte_val(*ptep_buddy(ptep)) & _PAGE_GLOBAL)
222*4882a593Smuzhiyun set_pte_at(mm, addr, ptep, __pte(_PAGE_GLOBAL));
223*4882a593Smuzhiyun else
224*4882a593Smuzhiyun #endif
225*4882a593Smuzhiyun set_pte_at(mm, addr, ptep, __pte(0));
226*4882a593Smuzhiyun htw_start();
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun #endif
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
231*4882a593Smuzhiyun pte_t *ptep, pte_t pteval)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun extern void __update_cache(unsigned long address, pte_t pte);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (!pte_present(pteval))
236*4882a593Smuzhiyun goto cache_sync_done;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
239*4882a593Smuzhiyun goto cache_sync_done;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun __update_cache(addr, pteval);
242*4882a593Smuzhiyun cache_sync_done:
243*4882a593Smuzhiyun set_pte(ptep, pteval);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /*
247*4882a593Smuzhiyun * (pmds are folded into puds so this doesn't get actually called,
248*4882a593Smuzhiyun * but the define is needed for a generic inline function.)
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun #define set_pmd(pmdptr, pmdval) do { *(pmdptr) = (pmdval); } while(0)
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun #ifndef __PAGETABLE_PMD_FOLDED
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * (puds are folded into pgds so this doesn't get actually called,
255*4882a593Smuzhiyun * but the define is needed for a generic inline function.)
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun #define set_pud(pudptr, pudval) do { *(pudptr) = (pudval); } while(0)
258*4882a593Smuzhiyun #endif
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun #define PGD_T_LOG2 (__builtin_ffs(sizeof(pgd_t)) - 1)
261*4882a593Smuzhiyun #define PMD_T_LOG2 (__builtin_ffs(sizeof(pmd_t)) - 1)
262*4882a593Smuzhiyun #define PTE_T_LOG2 (__builtin_ffs(sizeof(pte_t)) - 1)
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /*
265*4882a593Smuzhiyun * We used to declare this array with size but gcc 3.3 and older are not able
266*4882a593Smuzhiyun * to find that this expression is a constant, so the size is dropped.
267*4882a593Smuzhiyun */
268*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[];
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun * Platform specific pte_special() and pte_mkspecial() definitions
272*4882a593Smuzhiyun * are required only when ARCH_HAS_PTE_SPECIAL is enabled.
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun #if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
275*4882a593Smuzhiyun #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
276*4882a593Smuzhiyun static inline int pte_special(pte_t pte)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun return pte.pte_low & _PAGE_SPECIAL;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun pte.pte_low |= _PAGE_SPECIAL;
284*4882a593Smuzhiyun return pte;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun #else
287*4882a593Smuzhiyun static inline int pte_special(pte_t pte)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun return pte_val(pte) & _PAGE_SPECIAL;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun pte_val(pte) |= _PAGE_SPECIAL;
295*4882a593Smuzhiyun return pte;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun #endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * The following only work if pte_present() is true.
302*4882a593Smuzhiyun * Undefined behaviour if not..
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun #if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
305*4882a593Smuzhiyun static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
306*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
307*4882a593Smuzhiyun static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun pte.pte_low &= ~_PAGE_WRITE;
312*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
313*4882a593Smuzhiyun pte.pte_low &= ~_PAGE_SILENT_WRITE;
314*4882a593Smuzhiyun pte.pte_high &= ~_PAGE_SILENT_WRITE;
315*4882a593Smuzhiyun return pte;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun pte.pte_low &= ~_PAGE_MODIFIED;
321*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
322*4882a593Smuzhiyun pte.pte_low &= ~_PAGE_SILENT_WRITE;
323*4882a593Smuzhiyun pte.pte_high &= ~_PAGE_SILENT_WRITE;
324*4882a593Smuzhiyun return pte;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun pte.pte_low &= ~_PAGE_ACCESSED;
330*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
331*4882a593Smuzhiyun pte.pte_low &= ~_PAGE_SILENT_READ;
332*4882a593Smuzhiyun pte.pte_high &= ~_PAGE_SILENT_READ;
333*4882a593Smuzhiyun return pte;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun pte.pte_low |= _PAGE_WRITE;
339*4882a593Smuzhiyun if (pte.pte_low & _PAGE_MODIFIED) {
340*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
341*4882a593Smuzhiyun pte.pte_low |= _PAGE_SILENT_WRITE;
342*4882a593Smuzhiyun pte.pte_high |= _PAGE_SILENT_WRITE;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun return pte;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun pte.pte_low |= _PAGE_MODIFIED;
350*4882a593Smuzhiyun if (pte.pte_low & _PAGE_WRITE) {
351*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
352*4882a593Smuzhiyun pte.pte_low |= _PAGE_SILENT_WRITE;
353*4882a593Smuzhiyun pte.pte_high |= _PAGE_SILENT_WRITE;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun return pte;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun pte.pte_low |= _PAGE_ACCESSED;
361*4882a593Smuzhiyun if (!(pte.pte_low & _PAGE_NO_READ)) {
362*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_XPA))
363*4882a593Smuzhiyun pte.pte_low |= _PAGE_SILENT_READ;
364*4882a593Smuzhiyun pte.pte_high |= _PAGE_SILENT_READ;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun return pte;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun #else
369*4882a593Smuzhiyun static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_WRITE; }
370*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_MODIFIED; }
371*4882a593Smuzhiyun static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED; }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
376*4882a593Smuzhiyun return pte;
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
382*4882a593Smuzhiyun return pte;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
388*4882a593Smuzhiyun return pte;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun pte_val(pte) |= _PAGE_WRITE;
394*4882a593Smuzhiyun if (pte_val(pte) & _PAGE_MODIFIED)
395*4882a593Smuzhiyun pte_val(pte) |= _PAGE_SILENT_WRITE;
396*4882a593Smuzhiyun return pte;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun pte_val(pte) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
402*4882a593Smuzhiyun if (pte_val(pte) & _PAGE_WRITE)
403*4882a593Smuzhiyun pte_val(pte) |= _PAGE_SILENT_WRITE;
404*4882a593Smuzhiyun return pte;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun pte_val(pte) |= _PAGE_ACCESSED;
410*4882a593Smuzhiyun if (!(pte_val(pte) & _PAGE_NO_READ))
411*4882a593Smuzhiyun pte_val(pte) |= _PAGE_SILENT_READ;
412*4882a593Smuzhiyun return pte;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun #define pte_sw_mkyoung pte_mkyoung
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
418*4882a593Smuzhiyun static inline int pte_huge(pte_t pte) { return pte_val(pte) & _PAGE_HUGE; }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun static inline pte_t pte_mkhuge(pte_t pte)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun pte_val(pte) |= _PAGE_HUGE;
423*4882a593Smuzhiyun return pte;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun #endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
428*4882a593Smuzhiyun static inline bool pte_soft_dirty(pte_t pte)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun return pte_val(pte) & _PAGE_SOFT_DIRTY;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun #define pte_swp_soft_dirty pte_soft_dirty
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun static inline pte_t pte_mksoft_dirty(pte_t pte)
435*4882a593Smuzhiyun {
436*4882a593Smuzhiyun pte_val(pte) |= _PAGE_SOFT_DIRTY;
437*4882a593Smuzhiyun return pte;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun #define pte_swp_mksoft_dirty pte_mksoft_dirty
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun static inline pte_t pte_clear_soft_dirty(pte_t pte)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_SOFT_DIRTY);
444*4882a593Smuzhiyun return pte;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun #define pte_swp_clear_soft_dirty pte_clear_soft_dirty
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun #endif
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /*
453*4882a593Smuzhiyun * Macro to make mark a page protection value as "uncacheable". Note
454*4882a593Smuzhiyun * that "protection" is really a misnomer here as the protection value
455*4882a593Smuzhiyun * contains the memory attribute bits, dirty bits, and various other
456*4882a593Smuzhiyun * bits as well.
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun #define pgprot_noncached pgprot_noncached
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun static inline pgprot_t pgprot_noncached(pgprot_t _prot)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun unsigned long prot = pgprot_val(_prot);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun return __pgprot(prot);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun #define pgprot_writecombine pgprot_writecombine
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun unsigned long prot = pgprot_val(_prot);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /* cpu_data[0].writecombine is already shifted by _CACHE_SHIFT */
476*4882a593Smuzhiyun prot = (prot & ~_CACHE_MASK) | cpu_data[0].writecombine;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun return __pgprot(prot);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
482*4882a593Smuzhiyun unsigned long address)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
487*4882a593Smuzhiyun static inline int pte_same(pte_t pte_a, pte_t pte_b)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun return pte_val(pte_a) == pte_val(pte_b);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
493*4882a593Smuzhiyun static inline int ptep_set_access_flags(struct vm_area_struct *vma,
494*4882a593Smuzhiyun unsigned long address, pte_t *ptep,
495*4882a593Smuzhiyun pte_t entry, int dirty)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun if (!pte_same(*ptep, entry))
498*4882a593Smuzhiyun set_pte_at(vma->vm_mm, address, ptep, entry);
499*4882a593Smuzhiyun /*
500*4882a593Smuzhiyun * update_mmu_cache will unconditionally execute, handling both
501*4882a593Smuzhiyun * the case that the PTE changed and the spurious fault case.
502*4882a593Smuzhiyun */
503*4882a593Smuzhiyun return true;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun * Conversion functions: convert a page and protection to a page entry,
508*4882a593Smuzhiyun * and a page entry and page directory to the page they refer to.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun #if defined(CONFIG_XPA)
513*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
514*4882a593Smuzhiyun {
515*4882a593Smuzhiyun pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
516*4882a593Smuzhiyun pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
517*4882a593Smuzhiyun pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
518*4882a593Smuzhiyun pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
519*4882a593Smuzhiyun return pte;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun #elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
522*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun pte.pte_low &= _PAGE_CHG_MASK;
525*4882a593Smuzhiyun pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
526*4882a593Smuzhiyun pte.pte_low |= pgprot_val(newprot);
527*4882a593Smuzhiyun pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
528*4882a593Smuzhiyun return pte;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun #else
531*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun pte_val(pte) &= _PAGE_CHG_MASK;
534*4882a593Smuzhiyun pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_CHG_MASK;
535*4882a593Smuzhiyun if ((pte_val(pte) & _PAGE_ACCESSED) && !(pte_val(pte) & _PAGE_NO_READ))
536*4882a593Smuzhiyun pte_val(pte) |= _PAGE_SILENT_READ;
537*4882a593Smuzhiyun return pte;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun #endif
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
543*4882a593Smuzhiyun pte_t pte);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun static inline void update_mmu_cache(struct vm_area_struct *vma,
546*4882a593Smuzhiyun unsigned long address, pte_t *ptep)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun pte_t pte = *ptep;
549*4882a593Smuzhiyun __update_tlb(vma, address, pte);
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun #define __HAVE_ARCH_UPDATE_MMU_TLB
553*4882a593Smuzhiyun #define update_mmu_tlb update_mmu_cache
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
556*4882a593Smuzhiyun unsigned long address, pmd_t *pmdp)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun pte_t pte = *(pte_t *)pmdp;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun __update_tlb(vma, address, pte);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun #define kern_addr_valid(addr) (1)
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /*
566*4882a593Smuzhiyun * Allow physical addresses to be fixed up to help 36-bit peripherals.
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun #ifdef CONFIG_MIPS_FIXUP_BIGPHYS_ADDR
569*4882a593Smuzhiyun phys_addr_t fixup_bigphys_addr(phys_addr_t addr, phys_addr_t size);
570*4882a593Smuzhiyun int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long vaddr,
571*4882a593Smuzhiyun unsigned long pfn, unsigned long size, pgprot_t prot);
572*4882a593Smuzhiyun #define io_remap_pfn_range io_remap_pfn_range
573*4882a593Smuzhiyun #else
574*4882a593Smuzhiyun #define fixup_bigphys_addr(addr, size) (addr)
575*4882a593Smuzhiyun #endif /* CONFIG_MIPS_FIXUP_BIGPHYS_ADDR */
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun /* We don't have hardware dirty/accessed bits, generic_pmdp_establish is fine.*/
580*4882a593Smuzhiyun #define pmdp_establish generic_pmdp_establish
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun #define has_transparent_hugepage has_transparent_hugepage
583*4882a593Smuzhiyun extern int has_transparent_hugepage(void);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun static inline int pmd_trans_huge(pmd_t pmd)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun return !!(pmd_val(pmd) & _PAGE_HUGE);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun static inline pmd_t pmd_mkhuge(pmd_t pmd)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_HUGE;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun return pmd;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
598*4882a593Smuzhiyun pmd_t *pmdp, pmd_t pmd);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun #define pmd_write pmd_write
601*4882a593Smuzhiyun static inline int pmd_write(pmd_t pmd)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun return !!(pmd_val(pmd) & _PAGE_WRITE);
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
606*4882a593Smuzhiyun static inline pmd_t pmd_wrprotect(pmd_t pmd)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun pmd_val(pmd) &= ~(_PAGE_WRITE | _PAGE_SILENT_WRITE);
609*4882a593Smuzhiyun return pmd;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun static inline pmd_t pmd_mkwrite(pmd_t pmd)
613*4882a593Smuzhiyun {
614*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_WRITE;
615*4882a593Smuzhiyun if (pmd_val(pmd) & _PAGE_MODIFIED)
616*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_SILENT_WRITE;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return pmd;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun static inline int pmd_dirty(pmd_t pmd)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun return !!(pmd_val(pmd) & _PAGE_MODIFIED);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun static inline pmd_t pmd_mkclean(pmd_t pmd)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun pmd_val(pmd) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
629*4882a593Smuzhiyun return pmd;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun static inline pmd_t pmd_mkdirty(pmd_t pmd)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_MODIFIED | _PAGE_SOFT_DIRTY;
635*4882a593Smuzhiyun if (pmd_val(pmd) & _PAGE_WRITE)
636*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_SILENT_WRITE;
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun return pmd;
639*4882a593Smuzhiyun }
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun static inline int pmd_young(pmd_t pmd)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun return !!(pmd_val(pmd) & _PAGE_ACCESSED);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun static inline pmd_t pmd_mkold(pmd_t pmd)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun pmd_val(pmd) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun return pmd;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun static inline pmd_t pmd_mkyoung(pmd_t pmd)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_ACCESSED;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (!(pmd_val(pmd) & _PAGE_NO_READ))
658*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_SILENT_READ;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun return pmd;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun #ifdef CONFIG_HAVE_ARCH_SOFT_DIRTY
664*4882a593Smuzhiyun static inline int pmd_soft_dirty(pmd_t pmd)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun return !!(pmd_val(pmd) & _PAGE_SOFT_DIRTY);
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun pmd_val(pmd) |= _PAGE_SOFT_DIRTY;
672*4882a593Smuzhiyun return pmd;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun pmd_val(pmd) &= ~(_PAGE_SOFT_DIRTY);
678*4882a593Smuzhiyun return pmd;
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun #endif /* CONFIG_HAVE_ARCH_SOFT_DIRTY */
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun /* Extern to avoid header file madness */
684*4882a593Smuzhiyun extern pmd_t mk_pmd(struct page *page, pgprot_t prot);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun static inline unsigned long pmd_pfn(pmd_t pmd)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun return pmd_val(pmd) >> _PFN_SHIFT;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun static inline struct page *pmd_page(pmd_t pmd)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun if (pmd_trans_huge(pmd))
694*4882a593Smuzhiyun return pfn_to_page(pmd_pfn(pmd));
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun return pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
702*4882a593Smuzhiyun (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
703*4882a593Smuzhiyun return pmd;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun static inline pmd_t pmd_mkinvalid(pmd_t pmd)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun pmd_val(pmd) &= ~(_PAGE_PRESENT | _PAGE_VALID | _PAGE_DIRTY);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun return pmd;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun /*
714*4882a593Smuzhiyun * The generic version pmdp_huge_get_and_clear uses a version of pmd_clear() with a
715*4882a593Smuzhiyun * different prototype.
716*4882a593Smuzhiyun */
717*4882a593Smuzhiyun #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
718*4882a593Smuzhiyun static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
719*4882a593Smuzhiyun unsigned long address, pmd_t *pmdp)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun pmd_t old = *pmdp;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun pmd_clear(pmdp);
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return old;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun #ifdef _PAGE_HUGE
731*4882a593Smuzhiyun #define pmd_leaf(pmd) ((pmd_val(pmd) & _PAGE_HUGE) != 0)
732*4882a593Smuzhiyun #define pud_leaf(pud) ((pud_val(pud) & _PAGE_HUGE) != 0)
733*4882a593Smuzhiyun #endif
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun #define gup_fast_permitted(start, end) (!cpu_has_dc_aliases)
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun /*
738*4882a593Smuzhiyun * We provide our own get_unmapped area to cope with the virtual aliasing
739*4882a593Smuzhiyun * constraints placed on us by the cache architecture.
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA
742*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun #endif /* _ASM_PGTABLE_H */
745