xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  arch/arm/include/asm/pgtable.h
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (C) 1995-2002 Russell King
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #ifndef _ASMARM_PGTABLE_H
8*4882a593Smuzhiyun #define _ASMARM_PGTABLE_H
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/const.h>
11*4882a593Smuzhiyun #include <asm/proc-fns.h>
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #ifndef __ASSEMBLY__
14*4882a593Smuzhiyun /*
15*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero: used
16*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun extern struct page *empty_zero_page;
19*4882a593Smuzhiyun #define ZERO_PAGE(vaddr)	(empty_zero_page)
20*4882a593Smuzhiyun #endif
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #ifndef CONFIG_MMU
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm-generic/pgtable-nopud.h>
25*4882a593Smuzhiyun #include <asm/pgtable-nommu.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #else
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <asm-generic/pgtable-nopud.h>
30*4882a593Smuzhiyun #include <asm/memory.h>
31*4882a593Smuzhiyun #include <asm/pgtable-hwdef.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <asm/tlbflush.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
37*4882a593Smuzhiyun #include <asm/pgtable-3level.h>
38*4882a593Smuzhiyun #else
39*4882a593Smuzhiyun #include <asm/pgtable-2level.h>
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*
43*4882a593Smuzhiyun  * Just any arbitrary offset to the start of the vmalloc VM area: the
44*4882a593Smuzhiyun  * current 8MB value just means that there will be a 8MB "hole" after the
45*4882a593Smuzhiyun  * physical memory until the kernel virtual memory starts.  That means that
46*4882a593Smuzhiyun  * any out-of-bounds memory accesses will hopefully be caught.
47*4882a593Smuzhiyun  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
48*4882a593Smuzhiyun  * area for the same reason. ;)
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun #define VMALLOC_OFFSET		(8*1024*1024)
51*4882a593Smuzhiyun #define VMALLOC_START		(((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
52*4882a593Smuzhiyun #define VMALLOC_END		0xff800000UL
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define LIBRARY_TEXT_START	0x0c000000
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #ifndef __ASSEMBLY__
57*4882a593Smuzhiyun extern void __pte_error(const char *file, int line, pte_t);
58*4882a593Smuzhiyun extern void __pmd_error(const char *file, int line, pmd_t);
59*4882a593Smuzhiyun extern void __pgd_error(const char *file, int line, pgd_t);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte)
62*4882a593Smuzhiyun #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd)
63*4882a593Smuzhiyun #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd)
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * This is the lowest virtual address we can permit any user space
67*4882a593Smuzhiyun  * mapping to be mapped at.  This is particularly important for
68*4882a593Smuzhiyun  * non-high vector CPUs.
69*4882a593Smuzhiyun  */
70*4882a593Smuzhiyun #define FIRST_USER_ADDRESS	(PAGE_SIZE * 2)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * Use TASK_SIZE as the ceiling argument for free_pgtables() and
74*4882a593Smuzhiyun  * free_pgd_range() to avoid freeing the modules pmd when LPAE is enabled (pmd
75*4882a593Smuzhiyun  * page shared between user and kernel).
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun #ifdef CONFIG_ARM_LPAE
78*4882a593Smuzhiyun #define USER_PGTABLES_CEILING	TASK_SIZE
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun /*
82*4882a593Smuzhiyun  * The pgprot_* and protection_map entries will be fixed up in runtime
83*4882a593Smuzhiyun  * to include the cachable and bufferable bits based on memory policy,
84*4882a593Smuzhiyun  * as well as any architecture dependent bits like global/ASID and SMP
85*4882a593Smuzhiyun  * shared mapping bits.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun #define _L_PTE_DEFAULT	L_PTE_PRESENT | L_PTE_YOUNG
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun extern pgprot_t		pgprot_user;
90*4882a593Smuzhiyun extern pgprot_t		pgprot_kernel;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun #define _MOD_PROT(p, b)	__pgprot(pgprot_val(p) | (b))
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define PAGE_NONE		_MOD_PROT(pgprot_user, L_PTE_XN | L_PTE_RDONLY | L_PTE_NONE)
95*4882a593Smuzhiyun #define PAGE_SHARED		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_XN)
96*4882a593Smuzhiyun #define PAGE_SHARED_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER)
97*4882a593Smuzhiyun #define PAGE_COPY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
98*4882a593Smuzhiyun #define PAGE_COPY_EXEC		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
99*4882a593Smuzhiyun #define PAGE_READONLY		_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
100*4882a593Smuzhiyun #define PAGE_READONLY_EXEC	_MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
101*4882a593Smuzhiyun #define PAGE_KERNEL		_MOD_PROT(pgprot_kernel, L_PTE_XN)
102*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC	pgprot_kernel
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define __PAGE_NONE		__pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
105*4882a593Smuzhiyun #define __PAGE_SHARED		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
106*4882a593Smuzhiyun #define __PAGE_SHARED_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER)
107*4882a593Smuzhiyun #define __PAGE_COPY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
108*4882a593Smuzhiyun #define __PAGE_COPY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
109*4882a593Smuzhiyun #define __PAGE_READONLY		__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY | L_PTE_XN)
110*4882a593Smuzhiyun #define __PAGE_READONLY_EXEC	__pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_RDONLY)
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define __pgprot_modify(prot,mask,bits)		\
113*4882a593Smuzhiyun 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define pgprot_noncached(prot) \
116*4882a593Smuzhiyun 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define pgprot_writecombine(prot) \
119*4882a593Smuzhiyun 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #define pgprot_stronglyordered(prot) \
122*4882a593Smuzhiyun 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED)
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #define pgprot_device(prot) \
125*4882a593Smuzhiyun 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_DEV_SHARED | L_PTE_SHARED | L_PTE_DIRTY | L_PTE_XN)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
128*4882a593Smuzhiyun #define pgprot_dmacoherent(prot) \
129*4882a593Smuzhiyun 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_BUFFERABLE | L_PTE_XN)
130*4882a593Smuzhiyun #define __HAVE_PHYS_MEM_ACCESS_PROT
131*4882a593Smuzhiyun struct file;
132*4882a593Smuzhiyun extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
133*4882a593Smuzhiyun 				     unsigned long size, pgprot_t vma_prot);
134*4882a593Smuzhiyun #else
135*4882a593Smuzhiyun #define pgprot_dmacoherent(prot) \
136*4882a593Smuzhiyun 	__pgprot_modify(prot, L_PTE_MT_MASK, L_PTE_MT_UNCACHED | L_PTE_XN)
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun  * The table below defines the page protection levels that we insert into our
143*4882a593Smuzhiyun  * Linux page table version.  These get translated into the best that the
144*4882a593Smuzhiyun  * architecture can perform.  Note that on most ARM hardware:
145*4882a593Smuzhiyun  *  1) We cannot do execute protection
146*4882a593Smuzhiyun  *  2) If we could do execute protection, then read is implied
147*4882a593Smuzhiyun  *  3) write implies read permissions
148*4882a593Smuzhiyun  */
149*4882a593Smuzhiyun #define __P000  __PAGE_NONE
150*4882a593Smuzhiyun #define __P001  __PAGE_READONLY
151*4882a593Smuzhiyun #define __P010  __PAGE_COPY
152*4882a593Smuzhiyun #define __P011  __PAGE_COPY
153*4882a593Smuzhiyun #define __P100  __PAGE_READONLY_EXEC
154*4882a593Smuzhiyun #define __P101  __PAGE_READONLY_EXEC
155*4882a593Smuzhiyun #define __P110  __PAGE_COPY_EXEC
156*4882a593Smuzhiyun #define __P111  __PAGE_COPY_EXEC
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun #define __S000  __PAGE_NONE
159*4882a593Smuzhiyun #define __S001  __PAGE_READONLY
160*4882a593Smuzhiyun #define __S010  __PAGE_SHARED
161*4882a593Smuzhiyun #define __S011  __PAGE_SHARED
162*4882a593Smuzhiyun #define __S100  __PAGE_READONLY_EXEC
163*4882a593Smuzhiyun #define __S101  __PAGE_READONLY_EXEC
164*4882a593Smuzhiyun #define __S110  __PAGE_SHARED_EXEC
165*4882a593Smuzhiyun #define __S111  __PAGE_SHARED_EXEC
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #ifndef __ASSEMBLY__
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #define pmd_none(pmd)		(!pmd_val(pmd))
172*4882a593Smuzhiyun 
pmd_page_vaddr(pmd_t pmd)173*4882a593Smuzhiyun static inline pte_t *pmd_page_vaddr(pmd_t pmd)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	return __va(pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
181*4882a593Smuzhiyun #define pfn_pte(pfn,prot)	__pte(__pfn_to_phys(pfn) | pgprot_val(prot))
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #define pte_page(pte)		pfn_to_page(pte_pfn(pte))
184*4882a593Smuzhiyun #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page), prot)
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun #define pte_clear(mm,addr,ptep)	set_pte_ext(ptep, __pte(0), 0)
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #define pte_isset(pte, val)	((u32)(val) == (val) ? pte_val(pte) & (val) \
189*4882a593Smuzhiyun 						: !!(pte_val(pte) & (val)))
190*4882a593Smuzhiyun #define pte_isclear(pte, val)	(!(pte_val(pte) & (val)))
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #define pte_none(pte)		(!pte_val(pte))
193*4882a593Smuzhiyun #define pte_present(pte)	(pte_isset((pte), L_PTE_PRESENT))
194*4882a593Smuzhiyun #define pte_valid(pte)		(pte_isset((pte), L_PTE_VALID))
195*4882a593Smuzhiyun #define pte_accessible(mm, pte)	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid(pte))
196*4882a593Smuzhiyun #define pte_write(pte)		(pte_isclear((pte), L_PTE_RDONLY))
197*4882a593Smuzhiyun #define pte_dirty(pte)		(pte_isset((pte), L_PTE_DIRTY))
198*4882a593Smuzhiyun #define pte_young(pte)		(pte_isset((pte), L_PTE_YOUNG))
199*4882a593Smuzhiyun #define pte_exec(pte)		(pte_isclear((pte), L_PTE_XN))
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define pte_valid_user(pte)	\
202*4882a593Smuzhiyun 	(pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
203*4882a593Smuzhiyun 
pte_access_permitted(pte_t pte,bool write)204*4882a593Smuzhiyun static inline bool pte_access_permitted(pte_t pte, bool write)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
207*4882a593Smuzhiyun 	pteval_t needed = mask;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (write)
210*4882a593Smuzhiyun 		mask |= L_PTE_RDONLY;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	return (pte_val(pte) & mask) == needed;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun #define pte_access_permitted pte_access_permitted
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun #if __LINUX_ARM_ARCH__ < 6
__sync_icache_dcache(pte_t pteval)217*4882a593Smuzhiyun static inline void __sync_icache_dcache(pte_t pteval)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun #else
221*4882a593Smuzhiyun extern void __sync_icache_dcache(pte_t pteval);
222*4882a593Smuzhiyun #endif
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun void set_pte_at(struct mm_struct *mm, unsigned long addr,
225*4882a593Smuzhiyun 		      pte_t *ptep, pte_t pteval);
226*4882a593Smuzhiyun 
clear_pte_bit(pte_t pte,pgprot_t prot)227*4882a593Smuzhiyun static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	pte_val(pte) &= ~pgprot_val(prot);
230*4882a593Smuzhiyun 	return pte;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
set_pte_bit(pte_t pte,pgprot_t prot)233*4882a593Smuzhiyun static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun 	pte_val(pte) |= pgprot_val(prot);
236*4882a593Smuzhiyun 	return pte;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
pte_wrprotect(pte_t pte)239*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)244*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)249*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)254*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
pte_mkold(pte_t pte)259*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)264*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
pte_mkexec(pte_t pte)269*4882a593Smuzhiyun static inline pte_t pte_mkexec(pte_t pte)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return clear_pte_bit(pte, __pgprot(L_PTE_XN));
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
pte_mknexec(pte_t pte)274*4882a593Smuzhiyun static inline pte_t pte_mknexec(pte_t pte)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	return set_pte_bit(pte, __pgprot(L_PTE_XN));
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
pte_modify(pte_t pte,pgprot_t newprot)279*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
282*4882a593Smuzhiyun 		L_PTE_NONE | L_PTE_VALID;
283*4882a593Smuzhiyun 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
284*4882a593Smuzhiyun 	return pte;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * Encode and decode a swap entry.  Swap entries are stored in the Linux
289*4882a593Smuzhiyun  * page tables as follows:
290*4882a593Smuzhiyun  *
291*4882a593Smuzhiyun  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
292*4882a593Smuzhiyun  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
293*4882a593Smuzhiyun  *   <--------------- offset ------------------------> < type -> 0 0
294*4882a593Smuzhiyun  *
295*4882a593Smuzhiyun  * This gives us up to 31 swap files and 128GB per swap file.  Note that
296*4882a593Smuzhiyun  * the offset field is always non-zero.
297*4882a593Smuzhiyun  */
298*4882a593Smuzhiyun #define __SWP_TYPE_SHIFT	2
299*4882a593Smuzhiyun #define __SWP_TYPE_BITS		5
300*4882a593Smuzhiyun #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
301*4882a593Smuzhiyun #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
304*4882a593Smuzhiyun #define __swp_offset(x)		((x).val >> __SWP_OFFSET_SHIFT)
305*4882a593Smuzhiyun #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
308*4882a593Smuzhiyun #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /*
311*4882a593Smuzhiyun  * It is an error for the kernel to have more swap files than we can
312*4882a593Smuzhiyun  * encode in the PTEs.  This ensures that we know when MAX_SWAPFILES
313*4882a593Smuzhiyun  * is increased beyond what we presently support.
314*4882a593Smuzhiyun  */
315*4882a593Smuzhiyun #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
318*4882a593Smuzhiyun /* FIXME: this is not correct */
319*4882a593Smuzhiyun #define kern_addr_valid(addr)	(1)
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun /*
322*4882a593Smuzhiyun  * We provide our own arch_get_unmapped_area to cope with VIPT caches.
323*4882a593Smuzhiyun  */
324*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA
325*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun #endif /* CONFIG_MMU */
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun #endif /* _ASMARM_PGTABLE_H */
332