xref: /OK3568_Linux_fs/kernel/arch/powerpc/include/asm/nohash/32/pgtable.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H
3*4882a593Smuzhiyun #define _ASM_POWERPC_NOHASH_32_PGTABLE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun #include <asm-generic/pgtable-nopmd.h>
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __ASSEMBLY__
8*4882a593Smuzhiyun #include <linux/sched.h>
9*4882a593Smuzhiyun #include <linux/threads.h>
10*4882a593Smuzhiyun #include <asm/mmu.h>			/* For sub-arch specific PPC_PIN_SIZE */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifdef CONFIG_44x
13*4882a593Smuzhiyun extern int icache_44x_need_flush;
14*4882a593Smuzhiyun #endif
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define PTE_INDEX_SIZE	PTE_SHIFT
19*4882a593Smuzhiyun #define PMD_INDEX_SIZE	0
20*4882a593Smuzhiyun #define PUD_INDEX_SIZE	0
21*4882a593Smuzhiyun #define PGD_INDEX_SIZE	(32 - PGDIR_SHIFT)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define PMD_CACHE_INDEX	PMD_INDEX_SIZE
24*4882a593Smuzhiyun #define PUD_CACHE_INDEX	PUD_INDEX_SIZE
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #ifndef __ASSEMBLY__
27*4882a593Smuzhiyun #define PTE_TABLE_SIZE	(sizeof(pte_t) << PTE_INDEX_SIZE)
28*4882a593Smuzhiyun #define PMD_TABLE_SIZE	0
29*4882a593Smuzhiyun #define PUD_TABLE_SIZE	0
30*4882a593Smuzhiyun #define PGD_TABLE_SIZE	(sizeof(pgd_t) << PGD_INDEX_SIZE)
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1)
33*4882a593Smuzhiyun #endif	/* __ASSEMBLY__ */
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define PTRS_PER_PTE	(1 << PTE_INDEX_SIZE)
36*4882a593Smuzhiyun #define PTRS_PER_PGD	(1 << PGD_INDEX_SIZE)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun /*
39*4882a593Smuzhiyun  * The normal case is that PTEs are 32-bits and we have a 1-page
40*4882a593Smuzhiyun  * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages.  -- paulus
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * For any >32-bit physical address platform, we can use the following
43*4882a593Smuzhiyun  * two level page table layout where the pgdir is 8KB and the MS 13 bits
44*4882a593Smuzhiyun  * are an index to the second level table.  The combined pgdir/pmd first
45*4882a593Smuzhiyun  * level has 2048 entries and the second level has 512 64-bit PTE entries.
46*4882a593Smuzhiyun  * -Matt
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun /* PGDIR_SHIFT determines what a top-level page table entry can map */
49*4882a593Smuzhiyun #define PGDIR_SHIFT	(PAGE_SHIFT + PTE_INDEX_SIZE)
50*4882a593Smuzhiyun #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
51*4882a593Smuzhiyun #define PGDIR_MASK	(~(PGDIR_SIZE-1))
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /* Bits to mask out from a PGD to get to the PUD page */
54*4882a593Smuzhiyun #define PGD_MASKED_BITS		0
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define USER_PTRS_PER_PGD	(TASK_SIZE / PGDIR_SIZE)
57*4882a593Smuzhiyun #define FIRST_USER_ADDRESS	0UL
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define pte_ERROR(e) \
60*4882a593Smuzhiyun 	pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \
61*4882a593Smuzhiyun 		(unsigned long long)pte_val(e))
62*4882a593Smuzhiyun #define pgd_ERROR(e) \
63*4882a593Smuzhiyun 	pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #ifndef __ASSEMBLY__
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
68*4882a593Smuzhiyun void unmap_kernel_page(unsigned long va);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary
75*4882a593Smuzhiyun  * value (for now) on others, from where we can start layout kernel
76*4882a593Smuzhiyun  * virtual space that goes below PKMAP and FIXMAP
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun #include <asm/fixmap.h>
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun  * ioremap_bot starts at that address. Early ioremaps move down from there,
82*4882a593Smuzhiyun  * until mem_init() at which point this becomes the top of the vmalloc
83*4882a593Smuzhiyun  * and ioremap space
84*4882a593Smuzhiyun  */
85*4882a593Smuzhiyun #ifdef CONFIG_HIGHMEM
86*4882a593Smuzhiyun #define IOREMAP_TOP	PKMAP_BASE
87*4882a593Smuzhiyun #else
88*4882a593Smuzhiyun #define IOREMAP_TOP	FIXADDR_START
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun /* PPC32 shares vmalloc area with ioremap */
92*4882a593Smuzhiyun #define IOREMAP_START	VMALLOC_START
93*4882a593Smuzhiyun #define IOREMAP_END	VMALLOC_END
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /*
96*4882a593Smuzhiyun  * Just any arbitrary offset to the start of the vmalloc VM area: the
97*4882a593Smuzhiyun  * current 16MB value just means that there will be a 64MB "hole" after the
98*4882a593Smuzhiyun  * physical memory until the kernel virtual memory starts.  That means that
99*4882a593Smuzhiyun  * any out-of-bounds memory accesses will hopefully be caught.
100*4882a593Smuzhiyun  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
101*4882a593Smuzhiyun  * area for the same reason. ;)
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * We no longer map larger than phys RAM with the BATs so we don't have
104*4882a593Smuzhiyun  * to worry about the VMALLOC_OFFSET causing problems.  We do have to worry
105*4882a593Smuzhiyun  * about clashes between our early calls to ioremap() that start growing down
106*4882a593Smuzhiyun  * from IOREMAP_TOP being run into the VM area allocations (growing upwards
107*4882a593Smuzhiyun  * from VMALLOC_START).  For this reason we have ioremap_bot to check when
108*4882a593Smuzhiyun  * we actually run into our mappings setup in the early boot with the VM
109*4882a593Smuzhiyun  * system.  This really does become a problem for machines with good amounts
110*4882a593Smuzhiyun  * of RAM.  -- Cort
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun #define VMALLOC_OFFSET (0x1000000) /* 16M */
113*4882a593Smuzhiyun #ifdef PPC_PIN_SIZE
114*4882a593Smuzhiyun #define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
115*4882a593Smuzhiyun #else
116*4882a593Smuzhiyun #define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)))
117*4882a593Smuzhiyun #endif
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #ifdef CONFIG_KASAN_VMALLOC
120*4882a593Smuzhiyun #define VMALLOC_END	ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
121*4882a593Smuzhiyun #else
122*4882a593Smuzhiyun #define VMALLOC_END	ioremap_bot
123*4882a593Smuzhiyun #endif
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun /*
126*4882a593Smuzhiyun  * Bits in a linux-style PTE.  These match the bits in the
127*4882a593Smuzhiyun  * (hardware-defined) PowerPC PTE as closely as possible.
128*4882a593Smuzhiyun  */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun #if defined(CONFIG_40x)
131*4882a593Smuzhiyun #include <asm/nohash/32/pte-40x.h>
132*4882a593Smuzhiyun #elif defined(CONFIG_44x)
133*4882a593Smuzhiyun #include <asm/nohash/32/pte-44x.h>
134*4882a593Smuzhiyun #elif defined(CONFIG_FSL_BOOKE) && defined(CONFIG_PTE_64BIT)
135*4882a593Smuzhiyun #include <asm/nohash/pte-book3e.h>
136*4882a593Smuzhiyun #elif defined(CONFIG_FSL_BOOKE)
137*4882a593Smuzhiyun #include <asm/nohash/32/pte-fsl-booke.h>
138*4882a593Smuzhiyun #elif defined(CONFIG_PPC_8xx)
139*4882a593Smuzhiyun #include <asm/nohash/32/pte-8xx.h>
140*4882a593Smuzhiyun #endif
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun  * Location of the PFN in the PTE. Most 32-bit platforms use the same
144*4882a593Smuzhiyun  * as _PAGE_SHIFT here (ie, naturally aligned).
145*4882a593Smuzhiyun  * Platform who don't just pre-define the value so we don't override it here.
146*4882a593Smuzhiyun  */
147*4882a593Smuzhiyun #ifndef PTE_RPN_SHIFT
148*4882a593Smuzhiyun #define PTE_RPN_SHIFT	(PAGE_SHIFT)
149*4882a593Smuzhiyun #endif
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /*
152*4882a593Smuzhiyun  * The mask covered by the RPN must be a ULL on 32-bit platforms with
153*4882a593Smuzhiyun  * 64-bit PTEs.
154*4882a593Smuzhiyun  */
155*4882a593Smuzhiyun #if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT)
156*4882a593Smuzhiyun #define PTE_RPN_MASK	(~((1ULL << PTE_RPN_SHIFT) - 1))
157*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS 36
158*4882a593Smuzhiyun #else
159*4882a593Smuzhiyun #define PTE_RPN_MASK	(~((1UL << PTE_RPN_SHIFT) - 1))
160*4882a593Smuzhiyun #define MAX_POSSIBLE_PHYSMEM_BITS 32
161*4882a593Smuzhiyun #endif
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun  * _PAGE_CHG_MASK masks of bits that are to be preserved across
165*4882a593Smuzhiyun  * pgprot changes.
166*4882a593Smuzhiyun  */
167*4882a593Smuzhiyun #define _PAGE_CHG_MASK	(PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #ifndef __ASSEMBLY__
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #define pte_clear(mm, addr, ptep) \
172*4882a593Smuzhiyun 	do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0)
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #ifndef pte_mkwrite
pte_mkwrite(pte_t pte)175*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_RW);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun #endif
180*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)181*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_DIRTY);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)186*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_ACCESSED);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun #ifndef pte_wrprotect
pte_wrprotect(pte_t pte)192*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~_PAGE_RW);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun #endif
197*4882a593Smuzhiyun 
pte_mkexec(pte_t pte)198*4882a593Smuzhiyun static inline pte_t pte_mkexec(pte_t pte)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	return __pte(pte_val(pte) | _PAGE_EXEC);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun #define pmd_none(pmd)		(!pmd_val(pmd))
204*4882a593Smuzhiyun #define	pmd_bad(pmd)		(pmd_val(pmd) & _PMD_BAD)
205*4882a593Smuzhiyun #define	pmd_present(pmd)	(pmd_val(pmd) & _PMD_PRESENT_MASK)
pmd_clear(pmd_t * pmdp)206*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	*pmdp = __pmd(0);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun  * PTE updates. This function is called whenever an existing
213*4882a593Smuzhiyun  * valid PTE is updated. This does -not- include set_pte_at()
214*4882a593Smuzhiyun  * which nowadays only sets a new PTE.
215*4882a593Smuzhiyun  *
216*4882a593Smuzhiyun  * Depending on the type of MMU, we may need to use atomic updates
217*4882a593Smuzhiyun  * and the PTE may be either 32 or 64 bit wide. In the later case,
218*4882a593Smuzhiyun  * when using atomic updates, only the low part of the PTE is
219*4882a593Smuzhiyun  * accessed atomically.
220*4882a593Smuzhiyun  *
221*4882a593Smuzhiyun  * In addition, on 44x, we also maintain a global flag indicating
222*4882a593Smuzhiyun  * that an executable user mapping was modified, which is needed
223*4882a593Smuzhiyun  * to properly flush the virtually tagged instruction cache of
224*4882a593Smuzhiyun  * those implementations.
225*4882a593Smuzhiyun  *
226*4882a593Smuzhiyun  * On the 8xx, the page tables are a bit special. For 16k pages, we have
227*4882a593Smuzhiyun  * 4 identical entries. For 512k pages, we have 128 entries as if it was
228*4882a593Smuzhiyun  * 4k pages, but they are flagged as 512k pages for the hardware.
229*4882a593Smuzhiyun  * For other page sizes, we have a single entry in the table.
230*4882a593Smuzhiyun  */
231*4882a593Smuzhiyun #ifdef CONFIG_PPC_8xx
232*4882a593Smuzhiyun static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr);
233*4882a593Smuzhiyun static int hugepd_ok(hugepd_t hpd);
234*4882a593Smuzhiyun 
number_of_cells_per_pte(pmd_t * pmd,pte_basic_t val,int huge)235*4882a593Smuzhiyun static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	if (!huge)
238*4882a593Smuzhiyun 		return PAGE_SIZE / SZ_4K;
239*4882a593Smuzhiyun 	else if (hugepd_ok(*((hugepd_t *)pmd)))
240*4882a593Smuzhiyun 		return 1;
241*4882a593Smuzhiyun 	else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE))
242*4882a593Smuzhiyun 		return SZ_16K / SZ_4K;
243*4882a593Smuzhiyun 	else
244*4882a593Smuzhiyun 		return SZ_512K / SZ_4K;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun 
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * p,unsigned long clr,unsigned long set,int huge)247*4882a593Smuzhiyun static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
248*4882a593Smuzhiyun 				     unsigned long clr, unsigned long set, int huge)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	pte_basic_t *entry = &p->pte;
251*4882a593Smuzhiyun 	pte_basic_t old = pte_val(*p);
252*4882a593Smuzhiyun 	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
253*4882a593Smuzhiyun 	int num, i;
254*4882a593Smuzhiyun 	pmd_t *pmd = pmd_off(mm, addr);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	num = number_of_cells_per_pte(pmd, new, huge);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	for (i = 0; i < num; i++, entry++, new += SZ_4K)
259*4882a593Smuzhiyun 		*entry = new;
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return old;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #ifdef CONFIG_PPC_16K_PAGES
265*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET
ptep_get(pte_t * ptep)266*4882a593Smuzhiyun static inline pte_t ptep_get(pte_t *ptep)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	pte_basic_t val = READ_ONCE(ptep->pte);
269*4882a593Smuzhiyun 	pte_t pte = {val, val, val, val};
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	return pte;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun #endif /* CONFIG_PPC_16K_PAGES */
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun #else
pte_update(struct mm_struct * mm,unsigned long addr,pte_t * p,unsigned long clr,unsigned long set,int huge)276*4882a593Smuzhiyun static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
277*4882a593Smuzhiyun 				     unsigned long clr, unsigned long set, int huge)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	pte_basic_t old = pte_val(*p);
280*4882a593Smuzhiyun 	pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	*p = __pte(new);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun #ifdef CONFIG_44x
285*4882a593Smuzhiyun 	if ((old & _PAGE_USER) && (old & _PAGE_EXEC))
286*4882a593Smuzhiyun 		icache_44x_need_flush = 1;
287*4882a593Smuzhiyun #endif
288*4882a593Smuzhiyun 	return old;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun #endif
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
__ptep_test_and_clear_young(struct mm_struct * mm,unsigned long addr,pte_t * ptep)293*4882a593Smuzhiyun static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
294*4882a593Smuzhiyun 					      unsigned long addr, pte_t *ptep)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	unsigned long old;
297*4882a593Smuzhiyun 	old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
298*4882a593Smuzhiyun 	return (old & _PAGE_ACCESSED) != 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun #define ptep_test_and_clear_young(__vma, __addr, __ptep) \
301*4882a593Smuzhiyun 	__ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep)
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)304*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
305*4882a593Smuzhiyun 				       pte_t *ptep)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	return __pte(pte_update(mm, addr, ptep, ~0, 0, 0));
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long addr,pte_t * ptep)311*4882a593Smuzhiyun static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
312*4882a593Smuzhiyun 				      pte_t *ptep)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun 	unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0)));
315*4882a593Smuzhiyun 	unsigned long set = pte_val(pte_wrprotect(__pte(0)));
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	pte_update(mm, addr, ptep, clr, set, 0);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
__ptep_set_access_flags(struct vm_area_struct * vma,pte_t * ptep,pte_t entry,unsigned long address,int psize)320*4882a593Smuzhiyun static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
321*4882a593Smuzhiyun 					   pte_t *ptep, pte_t entry,
322*4882a593Smuzhiyun 					   unsigned long address,
323*4882a593Smuzhiyun 					   int psize)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	pte_t pte_set = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(0)))));
326*4882a593Smuzhiyun 	pte_t pte_clr = pte_mkyoung(pte_mkdirty(pte_mkwrite(pte_mkexec(__pte(~0)))));
327*4882a593Smuzhiyun 	unsigned long set = pte_val(entry) & pte_val(pte_set);
328*4882a593Smuzhiyun 	unsigned long clr = ~pte_val(entry) & ~pte_val(pte_clr);
329*4882a593Smuzhiyun 	int huge = psize > mmu_virtual_psize ? 1 : 0;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	pte_update(vma->vm_mm, address, ptep, clr, set, huge);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	flush_tlb_page(vma, address);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun 
pte_young(pte_t pte)336*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	return pte_val(pte) & _PAGE_ACCESSED;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
342*4882a593Smuzhiyun #define pte_same(A,B)	((pte_val(A) ^ pte_val(B)) == 0)
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun  * Note that on Book E processors, the pmd contains the kernel virtual
346*4882a593Smuzhiyun  * (lowmem) address of the pte page.  The physical address is less useful
347*4882a593Smuzhiyun  * because everything runs with translation enabled (even the TLB miss
348*4882a593Smuzhiyun  * handler).  On everything else the pmd contains the physical address
349*4882a593Smuzhiyun  * of the pte page.  -- paulus
350*4882a593Smuzhiyun  */
351*4882a593Smuzhiyun #ifndef CONFIG_BOOKE
352*4882a593Smuzhiyun #define pmd_page(pmd)		\
353*4882a593Smuzhiyun 	pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
354*4882a593Smuzhiyun #else
355*4882a593Smuzhiyun #define pmd_page_vaddr(pmd)	\
356*4882a593Smuzhiyun 	((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1)))
357*4882a593Smuzhiyun #define pmd_page(pmd)		\
358*4882a593Smuzhiyun 	pfn_to_page((__pa(pmd_val(pmd)) >> PAGE_SHIFT))
359*4882a593Smuzhiyun #endif
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun /*
362*4882a593Smuzhiyun  * Encode and decode a swap entry.
363*4882a593Smuzhiyun  * Note that the bits we use in a PTE for representing a swap entry
364*4882a593Smuzhiyun  * must not include the _PAGE_PRESENT bit.
365*4882a593Smuzhiyun  *   -- paulus
366*4882a593Smuzhiyun  */
367*4882a593Smuzhiyun #define __swp_type(entry)		((entry).val & 0x1f)
368*4882a593Smuzhiyun #define __swp_offset(entry)		((entry).val >> 5)
369*4882a593Smuzhiyun #define __swp_entry(type, offset)	((swp_entry_t) { (type) | ((offset) << 5) })
370*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) >> 3 })
371*4882a593Smuzhiyun #define __swp_entry_to_pte(x)		((pte_t) { (x).val << 3 })
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun #endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */
376