xref: /OK3568_Linux_fs/kernel/arch/um/include/asm/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
4*4882a593Smuzhiyun  * Copyright 2003 PathScale, Inc.
5*4882a593Smuzhiyun  * Derived from include/asm-i386/pgtable.h
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #ifndef __UM_PGTABLE_H
9*4882a593Smuzhiyun #define __UM_PGTABLE_H
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <asm/fixmap.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #define _PAGE_PRESENT	0x001
14*4882a593Smuzhiyun #define _PAGE_NEWPAGE	0x002
15*4882a593Smuzhiyun #define _PAGE_NEWPROT	0x004
16*4882a593Smuzhiyun #define _PAGE_RW	0x020
17*4882a593Smuzhiyun #define _PAGE_USER	0x040
18*4882a593Smuzhiyun #define _PAGE_ACCESSED	0x080
19*4882a593Smuzhiyun #define _PAGE_DIRTY	0x100
20*4882a593Smuzhiyun /* If _PAGE_PRESENT is clear, we use these: */
21*4882a593Smuzhiyun #define _PAGE_PROTNONE	0x010	/* if the user mapped it with PROT_NONE;
22*4882a593Smuzhiyun 				   pte_present gives true */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifdef CONFIG_3_LEVEL_PGTABLES
25*4882a593Smuzhiyun #include <asm/pgtable-3level.h>
26*4882a593Smuzhiyun #else
27*4882a593Smuzhiyun #include <asm/pgtable-2level.h>
28*4882a593Smuzhiyun #endif
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /* zero page used for uninitialized stuff */
33*4882a593Smuzhiyun extern unsigned long *empty_zero_page;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun /* Just any arbitrary offset to the start of the vmalloc VM area: the
36*4882a593Smuzhiyun  * current 8MB value just means that there will be a 8MB "hole" after the
37*4882a593Smuzhiyun  * physical memory until the kernel virtual memory starts.  That means that
38*4882a593Smuzhiyun  * any out-of-bounds memory accesses will hopefully be caught.
39*4882a593Smuzhiyun  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
40*4882a593Smuzhiyun  * area for the same reason. ;)
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun extern unsigned long end_iomem;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define VMALLOC_OFFSET	(__va_space)
46*4882a593Smuzhiyun #define VMALLOC_START ((end_iomem + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
47*4882a593Smuzhiyun #define PKMAP_BASE ((FIXADDR_START - LAST_PKMAP * PAGE_SIZE) & PMD_MASK)
48*4882a593Smuzhiyun #define VMALLOC_END	(FIXADDR_START-2*PAGE_SIZE)
49*4882a593Smuzhiyun #define MODULES_VADDR	VMALLOC_START
50*4882a593Smuzhiyun #define MODULES_END	VMALLOC_END
51*4882a593Smuzhiyun #define MODULES_LEN	(MODULES_VADDR - MODULES_END)
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define _PAGE_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
54*4882a593Smuzhiyun #define _KERNPG_TABLE	(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
55*4882a593Smuzhiyun #define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
56*4882a593Smuzhiyun #define __PAGE_KERNEL_EXEC                                              \
57*4882a593Smuzhiyun 	 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
58*4882a593Smuzhiyun #define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
59*4882a593Smuzhiyun #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
60*4882a593Smuzhiyun #define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
61*4882a593Smuzhiyun #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
62*4882a593Smuzhiyun #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
63*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * The i386 can't do page protection for execute, and considers that the same
67*4882a593Smuzhiyun  * are read.
68*4882a593Smuzhiyun  * Also, write permissions imply read permissions. This is the closest we can
69*4882a593Smuzhiyun  * get..
70*4882a593Smuzhiyun  */
71*4882a593Smuzhiyun #define __P000	PAGE_NONE
72*4882a593Smuzhiyun #define __P001	PAGE_READONLY
73*4882a593Smuzhiyun #define __P010	PAGE_COPY
74*4882a593Smuzhiyun #define __P011	PAGE_COPY
75*4882a593Smuzhiyun #define __P100	PAGE_READONLY
76*4882a593Smuzhiyun #define __P101	PAGE_READONLY
77*4882a593Smuzhiyun #define __P110	PAGE_COPY
78*4882a593Smuzhiyun #define __P111	PAGE_COPY
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define __S000	PAGE_NONE
81*4882a593Smuzhiyun #define __S001	PAGE_READONLY
82*4882a593Smuzhiyun #define __S010	PAGE_SHARED
83*4882a593Smuzhiyun #define __S011	PAGE_SHARED
84*4882a593Smuzhiyun #define __S100	PAGE_READONLY
85*4882a593Smuzhiyun #define __S101	PAGE_READONLY
86*4882a593Smuzhiyun #define __S110	PAGE_SHARED
87*4882a593Smuzhiyun #define __S111	PAGE_SHARED
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*
90*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero: used
91*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
92*4882a593Smuzhiyun  */
93*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) virt_to_page(empty_zero_page)
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun #define pte_clear(mm,addr,xp) pte_set_val(*(xp), (phys_t) 0, __pgprot(_PAGE_NEWPAGE))
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun #define pmd_none(x)	(!((unsigned long)pmd_val(x) & ~_PAGE_NEWPAGE))
98*4882a593Smuzhiyun #define	pmd_bad(x)	((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define pmd_present(x)	(pmd_val(x) & _PAGE_PRESENT)
101*4882a593Smuzhiyun #define pmd_clear(xp)	do { pmd_val(*(xp)) = _PAGE_NEWPAGE; } while (0)
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun #define pmd_newpage(x)  (pmd_val(x) & _PAGE_NEWPAGE)
104*4882a593Smuzhiyun #define pmd_mkuptodate(x) (pmd_val(x) &= ~_PAGE_NEWPAGE)
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define pud_newpage(x)  (pud_val(x) & _PAGE_NEWPAGE)
107*4882a593Smuzhiyun #define pud_mkuptodate(x) (pud_val(x) &= ~_PAGE_NEWPAGE)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define p4d_newpage(x)  (p4d_val(x) & _PAGE_NEWPAGE)
110*4882a593Smuzhiyun #define p4d_mkuptodate(x) (p4d_val(x) &= ~_PAGE_NEWPAGE)
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define pmd_page(pmd) phys_to_page(pmd_val(pmd) & PAGE_MASK)
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun #define pte_page(x) pfn_to_page(pte_pfn(x))
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define pte_present(x)	pte_get_bits(x, (_PAGE_PRESENT | _PAGE_PROTNONE))
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun  * =================================
120*4882a593Smuzhiyun  * Flags checking section.
121*4882a593Smuzhiyun  * =================================
122*4882a593Smuzhiyun  */
123*4882a593Smuzhiyun 
pte_none(pte_t pte)124*4882a593Smuzhiyun static inline int pte_none(pte_t pte)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	return pte_is_zero(pte);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * The following only work if pte_present() is true.
131*4882a593Smuzhiyun  * Undefined behaviour if not..
132*4882a593Smuzhiyun  */
pte_read(pte_t pte)133*4882a593Smuzhiyun static inline int pte_read(pte_t pte)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	return((pte_get_bits(pte, _PAGE_USER)) &&
136*4882a593Smuzhiyun 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
pte_exec(pte_t pte)139*4882a593Smuzhiyun static inline int pte_exec(pte_t pte){
140*4882a593Smuzhiyun 	return((pte_get_bits(pte, _PAGE_USER)) &&
141*4882a593Smuzhiyun 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
pte_write(pte_t pte)144*4882a593Smuzhiyun static inline int pte_write(pte_t pte)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	return((pte_get_bits(pte, _PAGE_RW)) &&
147*4882a593Smuzhiyun 	       !(pte_get_bits(pte, _PAGE_PROTNONE)));
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
pte_dirty(pte_t pte)150*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	return pte_get_bits(pte, _PAGE_DIRTY);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
pte_young(pte_t pte)155*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	return pte_get_bits(pte, _PAGE_ACCESSED);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
pte_newpage(pte_t pte)160*4882a593Smuzhiyun static inline int pte_newpage(pte_t pte)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	return pte_get_bits(pte, _PAGE_NEWPAGE);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
pte_newprot(pte_t pte)165*4882a593Smuzhiyun static inline int pte_newprot(pte_t pte)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	return(pte_present(pte) && (pte_get_bits(pte, _PAGE_NEWPROT)));
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun  * =================================
172*4882a593Smuzhiyun  * Flags setting section.
173*4882a593Smuzhiyun  * =================================
174*4882a593Smuzhiyun  */
175*4882a593Smuzhiyun 
pte_mknewprot(pte_t pte)176*4882a593Smuzhiyun static inline pte_t pte_mknewprot(pte_t pte)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	pte_set_bits(pte, _PAGE_NEWPROT);
179*4882a593Smuzhiyun 	return(pte);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)182*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	pte_clear_bits(pte, _PAGE_DIRTY);
185*4882a593Smuzhiyun 	return(pte);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
pte_mkold(pte_t pte)188*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	pte_clear_bits(pte, _PAGE_ACCESSED);
191*4882a593Smuzhiyun 	return(pte);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun 
pte_wrprotect(pte_t pte)194*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	if (likely(pte_get_bits(pte, _PAGE_RW)))
197*4882a593Smuzhiyun 		pte_clear_bits(pte, _PAGE_RW);
198*4882a593Smuzhiyun 	else
199*4882a593Smuzhiyun 		return pte;
200*4882a593Smuzhiyun 	return(pte_mknewprot(pte));
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
pte_mkread(pte_t pte)203*4882a593Smuzhiyun static inline pte_t pte_mkread(pte_t pte)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	if (unlikely(pte_get_bits(pte, _PAGE_USER)))
206*4882a593Smuzhiyun 		return pte;
207*4882a593Smuzhiyun 	pte_set_bits(pte, _PAGE_USER);
208*4882a593Smuzhiyun 	return(pte_mknewprot(pte));
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)211*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	pte_set_bits(pte, _PAGE_DIRTY);
214*4882a593Smuzhiyun 	return(pte);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)217*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	pte_set_bits(pte, _PAGE_ACCESSED);
220*4882a593Smuzhiyun 	return(pte);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)223*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	if (unlikely(pte_get_bits(pte,  _PAGE_RW)))
226*4882a593Smuzhiyun 		return pte;
227*4882a593Smuzhiyun 	pte_set_bits(pte, _PAGE_RW);
228*4882a593Smuzhiyun 	return(pte_mknewprot(pte));
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
pte_mkuptodate(pte_t pte)231*4882a593Smuzhiyun static inline pte_t pte_mkuptodate(pte_t pte)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	pte_clear_bits(pte, _PAGE_NEWPAGE);
234*4882a593Smuzhiyun 	if(pte_present(pte))
235*4882a593Smuzhiyun 		pte_clear_bits(pte, _PAGE_NEWPROT);
236*4882a593Smuzhiyun 	return(pte);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
pte_mknewpage(pte_t pte)239*4882a593Smuzhiyun static inline pte_t pte_mknewpage(pte_t pte)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	pte_set_bits(pte, _PAGE_NEWPAGE);
242*4882a593Smuzhiyun 	return(pte);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
set_pte(pte_t * pteptr,pte_t pteval)245*4882a593Smuzhiyun static inline void set_pte(pte_t *pteptr, pte_t pteval)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	pte_copy(*pteptr, pteval);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* If it's a swap entry, it needs to be marked _PAGE_NEWPAGE so
250*4882a593Smuzhiyun 	 * fix_range knows to unmap it.  _PAGE_NEWPROT is specific to
251*4882a593Smuzhiyun 	 * mapped pages.
252*4882a593Smuzhiyun 	 */
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	*pteptr = pte_mknewpage(*pteptr);
255*4882a593Smuzhiyun 	if(pte_present(*pteptr)) *pteptr = pte_mknewprot(*pteptr);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * pteptr,pte_t pteval)258*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
259*4882a593Smuzhiyun 			      pte_t *pteptr, pte_t pteval)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	set_pte(pteptr, pteval);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)265*4882a593Smuzhiyun static inline int pte_same(pte_t pte_a, pte_t pte_b)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	return !((pte_val(pte_a) ^ pte_val(pte_b)) & ~_PAGE_NEWPAGE);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun /*
271*4882a593Smuzhiyun  * Conversion functions: convert a page and protection to a page entry,
272*4882a593Smuzhiyun  * and a page entry and page directory to the page they refer to.
273*4882a593Smuzhiyun  */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun #define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
276*4882a593Smuzhiyun #define __virt_to_page(virt) phys_to_page(__pa(virt))
277*4882a593Smuzhiyun #define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
278*4882a593Smuzhiyun #define virt_to_page(addr) __virt_to_page((const unsigned long) addr)
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define mk_pte(page, pgprot) \
281*4882a593Smuzhiyun 	({ pte_t pte;					\
282*4882a593Smuzhiyun 							\
283*4882a593Smuzhiyun 	pte_set_val(pte, page_to_phys(page), (pgprot));	\
284*4882a593Smuzhiyun 	if (pte_present(pte))				\
285*4882a593Smuzhiyun 		pte_mknewprot(pte_mknewpage(pte));	\
286*4882a593Smuzhiyun 	pte;})
287*4882a593Smuzhiyun 
pte_modify(pte_t pte,pgprot_t newprot)288*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	pte_set_val(pte, (pte_val(pte) & _PAGE_CHG_MASK), newprot);
291*4882a593Smuzhiyun 	return pte;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /*
295*4882a593Smuzhiyun  * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
296*4882a593Smuzhiyun  *
297*4882a593Smuzhiyun  * this macro returns the index of the entry in the pmd page which would
298*4882a593Smuzhiyun  * control the given virtual address
299*4882a593Smuzhiyun  */
300*4882a593Smuzhiyun #define pmd_page_vaddr(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun struct mm_struct;
303*4882a593Smuzhiyun extern pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun #define update_mmu_cache(vma,address,ptep) do ; while (0)
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /* Encode and de-code a swap entry */
308*4882a593Smuzhiyun #define __swp_type(x)			(((x).val >> 5) & 0x1f)
309*4882a593Smuzhiyun #define __swp_offset(x)			((x).val >> 11)
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #define __swp_entry(type, offset) \
312*4882a593Smuzhiyun 	((swp_entry_t) { ((type) << 5) | ((offset) << 11) })
313*4882a593Smuzhiyun #define __pte_to_swp_entry(pte) \
314*4882a593Smuzhiyun 	((swp_entry_t) { pte_val(pte_mkuptodate(pte)) })
315*4882a593Smuzhiyun #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #define kern_addr_valid(addr) (1)
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /* Clear a kernel PTE and flush it from the TLB */
320*4882a593Smuzhiyun #define kpte_clear_flush(ptep, vaddr)		\
321*4882a593Smuzhiyun do {						\
322*4882a593Smuzhiyun 	pte_clear(&init_mm, (vaddr), (ptep));	\
323*4882a593Smuzhiyun 	__flush_tlb_one((vaddr));		\
324*4882a593Smuzhiyun } while (0)
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun #endif
327