xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/book3s/32/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm-generic/pgtable-nopmd.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <asm/book3s/32/hash.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /* And here we include common definitions */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define _PAGE_KERNEL_RO		0
12*4882a593Smuzhiyun #define _PAGE_KERNEL_ROX	(_PAGE_EXEC)
13*4882a593Smuzhiyun #define _PAGE_KERNEL_RW		(_PAGE_DIRTY | _PAGE_RW)
14*4882a593Smuzhiyun #define _PAGE_KERNEL_RWX	(_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC)
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define _PAGE_HPTEFLAGS _PAGE_HASHPTE
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #ifndef __ASSEMBLY__
19*4882a593Smuzhiyun 
pte_user(pte_t pte)20*4882a593Smuzhiyun static inline bool pte_user(pte_t pte)
21*4882a593Smuzhiyun {
22*4882a593Smuzhiyun 	return pte_val(pte) & _PAGE_USER;
23*4882a593Smuzhiyun }
24*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /*
27*4882a593Smuzhiyun  * Location of the PFN in the PTE. Most 32-bit platforms use the same
28*4882a593Smuzhiyun  * as _PAGE_SHIFT here (ie, naturally aligned).
29*4882a593Smuzhiyun  * Platform who don't just pre-define the value so we don't override it here.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun #define PTE_RPN_SHIFT	(PAGE_SHIFT)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * The mask covered by the RPN must be a ULL on 32-bit platforms with
35*4882a593Smuzhiyun  * 64-bit PTEs.
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #ifdef CONFIG_PTE_64BIT
38*4882a593Smuzhiyun #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
39*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS 36
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
42*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS 32
43*4882a593Smuzhiyun #endif
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * _PAGE_CHG_MASK masks of bits that are to be preserved across
47*4882a593Smuzhiyun  * pgprot changes.
48*4882a593Smuzhiyun  */
49*4882a593Smuzhiyun #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_HASHPTE | _PAGE_DIRTY | \
50*4882a593Smuzhiyun 			 _PAGE_ACCESSED | _PAGE_SPECIAL)
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * We define 2 sets of base prot bits, one for basic pages (ie,
54*4882a593Smuzhiyun  * cacheable kernel and user pages) and one for non cacheable
55*4882a593Smuzhiyun  * pages. We always set _PAGE_COHERENT when SMP is enabled or
56*4882a593Smuzhiyun  * the processor might need it for DMA coherency.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define _PAGE_BASE_NC	(_PAGE_PRESENT | _PAGE_ACCESSED)
59*4882a593Smuzhiyun #define _PAGE_BASE	(_PAGE_BASE_NC | _PAGE_COHERENT)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun  * Permission masks used to generate the __P and __S table.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  * Note:__pgprot is defined in arch/powerpc/include/asm/page.h
65*4882a593Smuzhiyun  *
66*4882a593Smuzhiyun  * Write permissions imply read permissions for now.
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun #define PAGE_NONE	__pgprot(_PAGE_BASE)
69*4882a593Smuzhiyun #define PAGE_SHARED	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
70*4882a593Smuzhiyun #define PAGE_SHARED_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
71*4882a593Smuzhiyun #define PAGE_COPY	__pgprot(_PAGE_BASE | _PAGE_USER)
72*4882a593Smuzhiyun #define PAGE_COPY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
73*4882a593Smuzhiyun #define PAGE_READONLY	__pgprot(_PAGE_BASE | _PAGE_USER)
74*4882a593Smuzhiyun #define PAGE_READONLY_X	__pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* Permission masks used for kernel mappings */
77*4882a593Smuzhiyun #define PAGE_KERNEL	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
78*4882a593Smuzhiyun #define PAGE_KERNEL_NC	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
79*4882a593Smuzhiyun #define PAGE_KERNEL_NCG	__pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | \
80*4882a593Smuzhiyun 				 _PAGE_NO_CACHE | _PAGE_GUARDED)
81*4882a593Smuzhiyun #define PAGE_KERNEL_X	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
82*4882a593Smuzhiyun #define PAGE_KERNEL_RO	__pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
83*4882a593Smuzhiyun #define PAGE_KERNEL_ROX	__pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun  * Protection used for kernel text. We want the debuggers to be able to
87*4882a593Smuzhiyun  * set breakpoints anywhere, so don't write protect the kernel text
88*4882a593Smuzhiyun  * on platforms where such control is possible.
89*4882a593Smuzhiyun  */
90*4882a593Smuzhiyun #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\
91*4882a593Smuzhiyun 	defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE)
92*4882a593Smuzhiyun #define PAGE_KERNEL_TEXT	PAGE_KERNEL_X
93*4882a593Smuzhiyun #else
94*4882a593Smuzhiyun #define PAGE_KERNEL_TEXT	PAGE_KERNEL_ROX
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Make modules code happy. We don't set RO yet */
98*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC	PAGE_KERNEL_X
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Advertise special mapping type for AGP */
101*4882a593Smuzhiyun #define PAGE_AGP		(PAGE_KERNEL_NC)
102*4882a593Smuzhiyun #define HAVE_PAGE_AGP
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define PTE_INDEX_SIZE	PTE_SHIFT
105*4882a593Smuzhiyun #define PMD_INDEX_SIZE	0
106*4882a593Smuzhiyun #define PUD_INDEX_SIZE	0
107*4882a593Smuzhiyun #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
110*4882a593Smuzhiyun #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #ifndef __ASSEMBLY__
113*4882a593Smuzhiyun #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
114*4882a593Smuzhiyun #define PMD_TABLE_SIZE	0
115*4882a593Smuzhiyun #define PUD_TABLE_SIZE	0
116*4882a593Smuzhiyun #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* Bits to mask out from a PMD to get to the PTE page */
119*4882a593Smuzhiyun #define PMD_MASKED_BITS		(PTE_TABLE_SIZE - 1)
120*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
123*4882a593Smuzhiyun #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * The normal case is that PTEs are 32-bits and we have a 1-page
127*4882a593Smuzhiyun  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
128*4882a593Smuzhiyun  *
129*4882a593Smuzhiyun  * For any >32-bit physical address platform, we can use the following
130*4882a593Smuzhiyun  * two level page table layout where the pgdir is 8KB and the MS 13 bits
131*4882a593Smuzhiyun  * are an index to the second level table.  The combined pgdir/pmd first
132*4882a593Smuzhiyun  * level has 2048 entries and the second level has 512 64-bit PTE entries.
133*4882a593Smuzhiyun  * -Matt
134*4882a593Smuzhiyun  */
135*4882a593Smuzhiyun /* PGDIR_SHIFT determines what a top-level page table entry can map */
136*4882a593Smuzhiyun #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
137*4882a593Smuzhiyun #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
138*4882a593Smuzhiyun #define PGDIR_MASK	(~(PGDIR_SIZE-1))
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #ifndef __ASSEMBLY__
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
145*4882a593Smuzhiyun void unmap_kernel_page(unsigned long va);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
151*4882a593Smuzhiyun  * value (for now) on others, from where we can start layout kernel
152*4882a593Smuzhiyun  * virtual space that goes below PKMAP and FIXMAP
153*4882a593Smuzhiyun  */
154*4882a593Smuzhiyun #include <asm/fixmap.h>
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun /*
157*4882a593Smuzhiyun  * ioremap_bot starts at that address. Early ioremaps move down from there,
158*4882a593Smuzhiyun  * until mem_init() at which point this becomes the top of the vmalloc
159*4882a593Smuzhiyun  * and ioremap space
160*4882a593Smuzhiyun  */
161*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
162*4882a593Smuzhiyun #define IOREMAP_TOP	PKMAP_BASE
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun #define IOREMAP_TOP	FIXADDR_START
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* PPC32 shares vmalloc area with ioremap */
168*4882a593Smuzhiyun #define IOREMAP_START	VMALLOC_START
169*4882a593Smuzhiyun #define IOREMAP_END	VMALLOC_END
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun /*
172*4882a593Smuzhiyun  * Just any arbitrary offset to the start of the vmalloc VM area: the
173*4882a593Smuzhiyun  * current 16MB value just means that there will be a 64MB "hole" after the
174*4882a593Smuzhiyun  * physical memory until the kernel virtual memory starts.  That means that
175*4882a593Smuzhiyun  * any out-of-bounds memory accesses will hopefully be caught.
176*4882a593Smuzhiyun  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
177*4882a593Smuzhiyun  * area for the same reason. ;)
178*4882a593Smuzhiyun  *
179*4882a593Smuzhiyun  * We no longer map larger than phys RAM with the BATs so we don't have
180*4882a593Smuzhiyun  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
181*4882a593Smuzhiyun  * about clashes between our early calls to ioremap() that start growing down
182*4882a593Smuzhiyun  * from ioremap_base being run into the VM area allocations (growing upwards
183*4882a593Smuzhiyun  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
184*4882a593Smuzhiyun  * we actually run into our mappings setup in the early boot with the VM
185*4882a593Smuzhiyun  * system.  This really does become a problem for machines with good amounts
186*4882a593Smuzhiyun  * of RAM.  -- Cort
187*4882a593Smuzhiyun  */
188*4882a593Smuzhiyun #define VMALLOC_OFFSET (0x1000000) /* 16M */
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun #ifdef CONFIG_KASAN_VMALLOC
193*4882a593Smuzhiyun #define VMALLOC_END	ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
194*4882a593Smuzhiyun #else
195*4882a593Smuzhiyun #define VMALLOC_END	ioremap_bot
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun #ifdef CONFIG_STRICT_KERNEL_RWX
199*4882a593Smuzhiyun #define MODULES_END	ALIGN_DOWN(PAGE_OFFSET, SZ_256M)
200*4882a593Smuzhiyun #define MODULES_VADDR	(MODULES_END - SZ_256M)
201*4882a593Smuzhiyun #endif
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #ifndef __ASSEMBLY__
204*4882a593Smuzhiyun #include <linux/sched.h>
205*4882a593Smuzhiyun #include <linux/threads.h>
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /* Bits to mask out from a PGD to get to the PUD page */
208*4882a593Smuzhiyun #define PGD_MASKED_BITS		0
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define pte_ERROR(e) \
211*4882a593Smuzhiyun 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
212*4882a593Smuzhiyun 		(unsigned long long)pte_val(e))
213*4882a593Smuzhiyun #define pgd_ERROR(e) \
214*4882a593Smuzhiyun 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
215*4882a593Smuzhiyun /*
216*4882a593Smuzhiyun  * Bits in a linux-style PTE.  These match the bits in the
217*4882a593Smuzhiyun  * (hardware-defined) PowerPC PTE as closely as possible.
218*4882a593Smuzhiyun  */
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun #define pte_clear(mm, addr, ptep) \
221*4882a593Smuzhiyun 	do { pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0); } while (0)
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun #define pmd_none(pmd)		(!pmd_val(pmd))
224*4882a593Smuzhiyun #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
225*4882a593Smuzhiyun #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
pmd_clear(pmd_t * pmdp)226*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun 	*pmdp = __pmd(0);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun /*
233*4882a593Smuzhiyun  * When flushing the tlb entry for a page, we also need to flush the hash
234*4882a593Smuzhiyun  * table entry.  flush_hash_pages is assembler (for speed) in hashtable.S.
235*4882a593Smuzhiyun  */
236*4882a593Smuzhiyun extern int flush_hash_pages(unsigned context, unsigned long va,
237*4882a593Smuzhiyun 			    unsigned long pmdval, int count);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun /* Add an HPTE to the hash table */
240*4882a593Smuzhiyun extern void add_hash_page(unsigned context, unsigned long va,
241*4882a593Smuzhiyun 			  unsigned long pmdval);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* Flush an entry from the TLB/hash table */
244*4882a593Smuzhiyun extern void flush_hash_entry(struct mm_struct *mm, pte_t *ptep,
245*4882a593Smuzhiyun 			     unsigned long address);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun /*
248*4882a593Smuzhiyun  * PTE updates. This function is called whenever an existing
249*4882a593Smuzhiyun  * valid PTE is updated. This does -not- include set_pte_at()
250*4882a593Smuzhiyun  * which nowadays only sets a new PTE.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * Depending on the type of MMU, we may need to use atomic updates
253*4882a593Smuzhiyun  * and the PTE may be either 32 or 64 bit wide. In the later case,
254*4882a593Smuzhiyun  * when using atomic updates, only the low part of the PTE is
255*4882a593Smuzhiyun  * accessed atomically.
256*4882a593Smuzhiyun  */
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * p,unsigned long clr,unsigned long set,int huge)257*4882a593Smuzhiyun static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
258*4882a593Smuzhiyun 				     unsigned long clr, unsigned long set, int huge)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	pte_basic_t old;
261*4882a593Smuzhiyun 	unsigned long tmp;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	__asm__ __volatile__(
264*4882a593Smuzhiyun #ifndef CONFIG_PTE_64BIT
265*4882a593Smuzhiyun "1:	lwarx	%0, 0, %3\n"
266*4882a593Smuzhiyun "	andc	%1, %0, %4\n"
267*4882a593Smuzhiyun #else
268*4882a593Smuzhiyun "1:	lwarx	%L0, 0, %3\n"
269*4882a593Smuzhiyun "	lwz	%0, -4(%3)\n"
270*4882a593Smuzhiyun "	andc	%1, %L0, %4\n"
271*4882a593Smuzhiyun #endif
272*4882a593Smuzhiyun "	or	%1, %1, %5\n"
273*4882a593Smuzhiyun "	stwcx.	%1, 0, %3\n"
274*4882a593Smuzhiyun "	bne-	1b"
275*4882a593Smuzhiyun 	: "=&r" (old), "=&r" (tmp), "=m" (*p)
276*4882a593Smuzhiyun #ifndef CONFIG_PTE_64BIT
277*4882a593Smuzhiyun 	: "r" (p),
278*4882a593Smuzhiyun #else
279*4882a593Smuzhiyun 	: "b" ((unsigned long)(p) + 4),
280*4882a593Smuzhiyun #endif
281*4882a593Smuzhiyun 	  "r" (clr), "r" (set), "m" (*p)
282*4882a593Smuzhiyun 	: "cc" );
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	return old;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun /*
288*4882a593Smuzhiyun  * 2.6 calls this without flushing the TLB entry; this is wrong
289*4882a593Smuzhiyun  * for our hash-based implementation, we fix that up here.
290*4882a593Smuzhiyun  */
291*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pte_t * ptep)292*4882a593Smuzhiyun static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
293*4882a593Smuzhiyun 					      unsigned long addr, pte_t *ptep)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun 	unsigned long old;
296*4882a593Smuzhiyun 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
297*4882a593Smuzhiyun 	if (old & _PAGE_HASHPTE) {
298*4882a593Smuzhiyun 		unsigned long ptephys = __pa(ptep) & PAGE_MASK;
299*4882a593Smuzhiyun 		flush_hash_pages(mm->context.id, addr, ptephys, 1);
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 	return (old & _PAGE_ACCESSED) != 0;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
304*4882a593Smuzhiyun 	__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)307*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
308*4882a593Smuzhiyun 				       pte_t *ptep)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	return __pte(pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, 0, 0));
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)314*4882a593Smuzhiyun static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
315*4882a593Smuzhiyun 				      pte_t *ptep)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)320*4882a593Smuzhiyun static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
321*4882a593Smuzhiyun 					   pte_t *ptep, pte_t entry,
322*4882a593Smuzhiyun 					   unsigned long address,
323*4882a593Smuzhiyun 					   int psize)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	unsigned long set = pte_val(entry) &
326*4882a593Smuzhiyun 		(_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	pte_update(vma->vm_mm, address, ptep, 0, set, 0);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	flush_tlb_page(vma, address);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
334*4882a593Smuzhiyun #define pte_same(A,B)	(((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0)
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun #define pmd_page(pmd)		\
337*4882a593Smuzhiyun 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun /*
340*4882a593Smuzhiyun  * Encode and decode a swap entry.
341*4882a593Smuzhiyun  * Note that the bits we use in a PTE for representing a swap entry
342*4882a593Smuzhiyun  * must not include the _PAGE_PRESENT bit or the _PAGE_HASHPTE bit (if used).
343*4882a593Smuzhiyun  *   -- paulus
344*4882a593Smuzhiyun  */
345*4882a593Smuzhiyun #define __swp_type(entry)		((entry).val & 0x1f)
346*4882a593Smuzhiyun #define __swp_offset(entry)		((entry).val >> 5)
347*4882a593Smuzhiyun #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
348*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
349*4882a593Smuzhiyun #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /* Generic accessors to PTE bits */
pte_write(pte_t pte)352*4882a593Smuzhiyun static inline int pte_write(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_RW);}
pte_read(pte_t pte)353*4882a593Smuzhiyun static inline int pte_read(pte_t pte)		{ return 1; }
pte_dirty(pte_t pte)354*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_DIRTY); }
pte_young(pte_t pte)355*4882a593Smuzhiyun static inline int pte_young(pte_t pte)		{ return !!(pte_val(pte) & _PAGE_ACCESSED); }
pte_special(pte_t pte)356*4882a593Smuzhiyun static inline int pte_special(pte_t pte)	{ return !!(pte_val(pte) & _PAGE_SPECIAL); }
pte_none(pte_t pte)357*4882a593Smuzhiyun static inline int pte_none(pte_t pte)		{ return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
pte_exec(pte_t pte)358*4882a593Smuzhiyun static inline bool pte_exec(pte_t pte)		{ return pte_val(pte) & _PAGE_EXEC; }
359*4882a593Smuzhiyun 
pte_present(pte_t pte)360*4882a593Smuzhiyun static inline int pte_present(pte_t pte)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	return pte_val(pte) & _PAGE_PRESENT;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun 
pte_hw_valid(pte_t pte)365*4882a593Smuzhiyun static inline bool pte_hw_valid(pte_t pte)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun 	return pte_val(pte) & _PAGE_PRESENT;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
pte_hashpte(pte_t pte)370*4882a593Smuzhiyun static inline bool pte_hashpte(pte_t pte)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	return !!(pte_val(pte) & _PAGE_HASHPTE);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
pte_ci(pte_t pte)375*4882a593Smuzhiyun static inline bool pte_ci(pte_t pte)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	return !!(pte_val(pte) & _PAGE_NO_CACHE);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /*
381*4882a593Smuzhiyun  * We only find page table entry in the last level
382*4882a593Smuzhiyun  * Hence no need for other accessors
383*4882a593Smuzhiyun  */
384*4882a593Smuzhiyun #define pte_access_permitted pte_access_permitted
pte_access_permitted(pte_t pte,bool write)385*4882a593Smuzhiyun static inline bool pte_access_permitted(pte_t pte, bool write)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	/*
388*4882a593Smuzhiyun 	 * A read-only access is controlled by _PAGE_USER bit.
389*4882a593Smuzhiyun 	 * We have _PAGE_READ set for WRITE and EXECUTE
390*4882a593Smuzhiyun 	 */
391*4882a593Smuzhiyun 	if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte))
392*4882a593Smuzhiyun 		return false;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (write && !pte_write(pte))
395*4882a593Smuzhiyun 		return false;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return true;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /* Conversion functions: convert a page and protection to a page entry,
401*4882a593Smuzhiyun  * and a page entry and page directory to the page they refer to.
402*4882a593Smuzhiyun  *
403*4882a593Smuzhiyun  * Even if PTEs can be unsigned long long, a PFN is always an unsigned
404*4882a593Smuzhiyun  * long for now.
405*4882a593Smuzhiyun  */
pfn_pte(unsigned long pfn,pgprot_t pgprot)406*4882a593Smuzhiyun static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
409*4882a593Smuzhiyun 		     pgprot_val(pgprot));
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
pte_pfn(pte_t pte)412*4882a593Smuzhiyun static inline unsigned long pte_pfn(pte_t pte)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	return pte_val(pte) >> PTE_RPN_SHIFT;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun /* Generic modifiers for PTE bits */
pte_wrprotect(pte_t pte)418*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~_PAGE_RW);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
pte_exprotect(pte_t pte)423*4882a593Smuzhiyun static inline pte_t pte_exprotect(pte_t pte)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~_PAGE_EXEC);
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)428*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~_PAGE_DIRTY);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
pte_mkold(pte_t pte)433*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
pte_mkexec(pte_t pte)438*4882a593Smuzhiyun static inline pte_t pte_mkexec(pte_t pte)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_EXEC);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
pte_mkpte(pte_t pte)443*4882a593Smuzhiyun static inline pte_t pte_mkpte(pte_t pte)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	return pte;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)448*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_RW);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)453*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_DIRTY);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)458*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun 
pte_mkspecial(pte_t pte)463*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_SPECIAL);
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
pte_mkhuge(pte_t pte)468*4882a593Smuzhiyun static inline pte_t pte_mkhuge(pte_t pte)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	return pte;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun 
pte_mkprivileged(pte_t pte)473*4882a593Smuzhiyun static inline pte_t pte_mkprivileged(pte_t pte)
474*4882a593Smuzhiyun {
475*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~_PAGE_USER);
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
pte_mkuser(pte_t pte)478*4882a593Smuzhiyun static inline pte_t pte_mkuser(pte_t pte)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_USER);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
pte_modify(pte_t pte,pgprot_t newprot)483*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun /* This low level function performs the actual PTE insertion
491*4882a593Smuzhiyun  * Setting the PTE depends on the MMU type and other factors. It's
492*4882a593Smuzhiyun  * an horrible mess that I'm not going to try to clean up now but
493*4882a593Smuzhiyun  * I'm keeping it in one place rather than spread around
494*4882a593Smuzhiyun  */
__set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pte,int percpu)495*4882a593Smuzhiyun static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
496*4882a593Smuzhiyun 				pte_t *ptep, pte_t pte, int percpu)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun #if defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT)
499*4882a593Smuzhiyun 	/* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the
500*4882a593Smuzhiyun 	 * helper pte_update() which does an atomic update. We need to do that
501*4882a593Smuzhiyun 	 * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a
502*4882a593Smuzhiyun 	 * per-CPU PTE such as a kmap_atomic, we do a simple update preserving
503*4882a593Smuzhiyun 	 * the hash bits instead (ie, same as the non-SMP case)
504*4882a593Smuzhiyun 	 */
505*4882a593Smuzhiyun 	if (percpu)
506*4882a593Smuzhiyun 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
507*4882a593Smuzhiyun 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
508*4882a593Smuzhiyun 	else
509*4882a593Smuzhiyun 		pte_update(mm, addr, ptep, ~_PAGE_HASHPTE, pte_val(pte), 0);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun #elif defined(CONFIG_PTE_64BIT)
512*4882a593Smuzhiyun 	/* Second case is 32-bit with 64-bit PTE.  In this case, we
513*4882a593Smuzhiyun 	 * can just store as long as we do the two halves in the right order
514*4882a593Smuzhiyun 	 * with a barrier in between. This is possible because we take care,
515*4882a593Smuzhiyun 	 * in the hash code, to pre-invalidate if the PTE was already hashed,
516*4882a593Smuzhiyun 	 * which synchronizes us with any concurrent invalidation.
517*4882a593Smuzhiyun 	 * In the percpu case, we also fallback to the simple update preserving
518*4882a593Smuzhiyun 	 * the hash bits
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	if (percpu) {
521*4882a593Smuzhiyun 		*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
522*4882a593Smuzhiyun 			      | (pte_val(pte) & ~_PAGE_HASHPTE));
523*4882a593Smuzhiyun 		return;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 	if (pte_val(*ptep) & _PAGE_HASHPTE)
526*4882a593Smuzhiyun 		flush_hash_entry(mm, ptep, addr);
527*4882a593Smuzhiyun 	__asm__ __volatile__("\
528*4882a593Smuzhiyun 		stw%X0 %2,%0\n\
529*4882a593Smuzhiyun 		eieio\n\
530*4882a593Smuzhiyun 		stw%X1 %L2,%1"
531*4882a593Smuzhiyun 	: "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
532*4882a593Smuzhiyun 	: "r" (pte) : "memory");
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun #else
535*4882a593Smuzhiyun 	/* Third case is 32-bit hash table in UP mode, we need to preserve
536*4882a593Smuzhiyun 	 * the _PAGE_HASHPTE bit since we may not have invalidated the previous
537*4882a593Smuzhiyun 	 * translation in the hash yet (done in a subsequent flush_tlb_xxx())
538*4882a593Smuzhiyun 	 * and see we need to keep track that this PTE needs invalidating
539*4882a593Smuzhiyun 	 */
540*4882a593Smuzhiyun 	*ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE)
541*4882a593Smuzhiyun 		      | (pte_val(pte) & ~_PAGE_HASHPTE));
542*4882a593Smuzhiyun #endif
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun  * Macro to mark a page protection value as "uncacheable".
547*4882a593Smuzhiyun  */
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun #define _PAGE_CACHE_CTL	(_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
550*4882a593Smuzhiyun 			 _PAGE_WRITETHRU)
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t prot)553*4882a593Smuzhiyun static inline pgprot_t pgprot_noncached(pgprot_t prot)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
556*4882a593Smuzhiyun 			_PAGE_NO_CACHE | _PAGE_GUARDED);
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun #define pgprot_noncached_wc pgprot_noncached_wc
pgprot_noncached_wc(pgprot_t prot)560*4882a593Smuzhiyun static inline pgprot_t pgprot_noncached_wc(pgprot_t prot)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
563*4882a593Smuzhiyun 			_PAGE_NO_CACHE);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun #define pgprot_cached pgprot_cached
pgprot_cached(pgprot_t prot)567*4882a593Smuzhiyun static inline pgprot_t pgprot_cached(pgprot_t prot)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
570*4882a593Smuzhiyun 			_PAGE_COHERENT);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun #define pgprot_cached_wthru pgprot_cached_wthru
pgprot_cached_wthru(pgprot_t prot)574*4882a593Smuzhiyun static inline pgprot_t pgprot_cached_wthru(pgprot_t prot)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	return __pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) |
577*4882a593Smuzhiyun 			_PAGE_COHERENT | _PAGE_WRITETHRU);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun #define pgprot_cached_noncoherent pgprot_cached_noncoherent
pgprot_cached_noncoherent(pgprot_t prot)581*4882a593Smuzhiyun static inline pgprot_t pgprot_cached_noncoherent(pgprot_t prot)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun 	return __pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t prot)587*4882a593Smuzhiyun static inline pgprot_t pgprot_writecombine(pgprot_t prot)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	return pgprot_noncached_wc(prot);
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun #endif /*  _ASM_POWERPC_BOOK3S_32_PGTABLE_H */
595