xref: /OK3568_Linux_fs/kernel/arch/sparc/include/asm/pgtable_32.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _SPARC_PGTABLE_H
3*4882a593Smuzhiyun #define _SPARC_PGTABLE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*  asm/pgtable.h:  Defines and functions used to work
6*4882a593Smuzhiyun  *                        with Sparc page tables.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  *  Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
9*4882a593Smuzhiyun  *  Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <linux/const.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define PMD_SHIFT		18
15*4882a593Smuzhiyun #define PMD_SIZE        	(1UL << PMD_SHIFT)
16*4882a593Smuzhiyun #define PMD_MASK        	(~(PMD_SIZE-1))
17*4882a593Smuzhiyun #define PMD_ALIGN(__addr) 	(((__addr) + ~PMD_MASK) & PMD_MASK)
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #define PGDIR_SHIFT     	24
20*4882a593Smuzhiyun #define PGDIR_SIZE      	(1UL << PGDIR_SHIFT)
21*4882a593Smuzhiyun #define PGDIR_MASK      	(~(PGDIR_SIZE-1))
22*4882a593Smuzhiyun #define PGDIR_ALIGN(__addr) 	(((__addr) + ~PGDIR_MASK) & PGDIR_MASK)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #ifndef __ASSEMBLY__
25*4882a593Smuzhiyun #include <asm-generic/pgtable-nopud.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/spinlock.h>
28*4882a593Smuzhiyun #include <linux/mm_types.h>
29*4882a593Smuzhiyun #include <asm/types.h>
30*4882a593Smuzhiyun #include <asm/pgtsrmmu.h>
31*4882a593Smuzhiyun #include <asm/vaddrs.h>
32*4882a593Smuzhiyun #include <asm/oplib.h>
33*4882a593Smuzhiyun #include <asm/cpu_type.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct vm_area_struct;
37*4882a593Smuzhiyun struct page;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun void load_mmu(void);
40*4882a593Smuzhiyun unsigned long calc_highpages(void);
41*4882a593Smuzhiyun unsigned long __init bootmem_init(unsigned long *pages_avail);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define pte_ERROR(e)   __builtin_trap()
44*4882a593Smuzhiyun #define pmd_ERROR(e)   __builtin_trap()
45*4882a593Smuzhiyun #define pgd_ERROR(e)   __builtin_trap()
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define PTRS_PER_PTE    	64
48*4882a593Smuzhiyun #define PTRS_PER_PMD    	64
49*4882a593Smuzhiyun #define PTRS_PER_PGD    	256
50*4882a593Smuzhiyun #define USER_PTRS_PER_PGD	PAGE_OFFSET / PGDIR_SIZE
51*4882a593Smuzhiyun #define FIRST_USER_ADDRESS	0UL
52*4882a593Smuzhiyun #define PTE_SIZE		(PTRS_PER_PTE*4)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define PAGE_NONE	SRMMU_PAGE_NONE
55*4882a593Smuzhiyun #define PAGE_SHARED	SRMMU_PAGE_SHARED
56*4882a593Smuzhiyun #define PAGE_COPY	SRMMU_PAGE_COPY
57*4882a593Smuzhiyun #define PAGE_READONLY	SRMMU_PAGE_RDONLY
58*4882a593Smuzhiyun #define PAGE_KERNEL	SRMMU_PAGE_KERNEL
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* Top-level page directory - dummy used by init-mm.
61*4882a593Smuzhiyun  * srmmu.c will assign the real one (which is dynamically sized) */
62*4882a593Smuzhiyun #define swapper_pg_dir NULL
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun void paging_init(void);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun extern unsigned long ptr_in_current_pgd;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*         xwr */
69*4882a593Smuzhiyun #define __P000  PAGE_NONE
70*4882a593Smuzhiyun #define __P001  PAGE_READONLY
71*4882a593Smuzhiyun #define __P010  PAGE_COPY
72*4882a593Smuzhiyun #define __P011  PAGE_COPY
73*4882a593Smuzhiyun #define __P100  PAGE_READONLY
74*4882a593Smuzhiyun #define __P101  PAGE_READONLY
75*4882a593Smuzhiyun #define __P110  PAGE_COPY
76*4882a593Smuzhiyun #define __P111  PAGE_COPY
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define __S000	PAGE_NONE
79*4882a593Smuzhiyun #define __S001	PAGE_READONLY
80*4882a593Smuzhiyun #define __S010	PAGE_SHARED
81*4882a593Smuzhiyun #define __S011	PAGE_SHARED
82*4882a593Smuzhiyun #define __S100	PAGE_READONLY
83*4882a593Smuzhiyun #define __S101	PAGE_READONLY
84*4882a593Smuzhiyun #define __S110	PAGE_SHARED
85*4882a593Smuzhiyun #define __S111	PAGE_SHARED
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* First physical page can be anywhere, the following is needed so that
88*4882a593Smuzhiyun  * va-->pa and vice versa conversions work properly without performance
89*4882a593Smuzhiyun  * hit for all __pa()/__va() operations.
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun extern unsigned long phys_base;
92*4882a593Smuzhiyun extern unsigned long pfn_base;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero: used
96*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
97*4882a593Smuzhiyun  */
98*4882a593Smuzhiyun extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /*
103*4882a593Smuzhiyun  * In general all page table modifications should use the V8 atomic
104*4882a593Smuzhiyun  * swap instruction.  This insures the mmu and the cpu are in sync
105*4882a593Smuzhiyun  * with respect to ref/mod bits in the page tables.
106*4882a593Smuzhiyun  */
srmmu_swap(unsigned long * addr,unsigned long value)107*4882a593Smuzhiyun static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	__asm__ __volatile__("swap [%2], %0" :
110*4882a593Smuzhiyun 			"=&r" (value) : "0" (value), "r" (addr) : "memory");
111*4882a593Smuzhiyun 	return value;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun /* Certain architectures need to do special things when pte's
115*4882a593Smuzhiyun  * within a page table are directly modified.  Thus, the following
116*4882a593Smuzhiyun  * hook is made available.
117*4882a593Smuzhiyun  */
118*4882a593Smuzhiyun 
set_pte(pte_t * ptep,pte_t pteval)119*4882a593Smuzhiyun static inline void set_pte(pte_t *ptep, pte_t pteval)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	srmmu_swap((unsigned long *)ptep, pte_val(pteval));
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
125*4882a593Smuzhiyun 
srmmu_device_memory(unsigned long x)126*4882a593Smuzhiyun static inline int srmmu_device_memory(unsigned long x)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	return ((x & 0xF0000000) != 0);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
pmd_page(pmd_t pmd)131*4882a593Smuzhiyun static inline struct page *pmd_page(pmd_t pmd)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun 	if (srmmu_device_memory(pmd_val(pmd)))
134*4882a593Smuzhiyun 		BUG();
135*4882a593Smuzhiyun 	return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
__pmd_page(pmd_t pmd)138*4882a593Smuzhiyun static inline unsigned long __pmd_page(pmd_t pmd)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	unsigned long v;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (srmmu_device_memory(pmd_val(pmd)))
143*4882a593Smuzhiyun 		BUG();
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	v = pmd_val(pmd) & SRMMU_PTD_PMASK;
146*4882a593Smuzhiyun 	return (unsigned long)__nocache_va(v << 4);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
pmd_page_vaddr(pmd_t pmd)149*4882a593Smuzhiyun static inline unsigned long pmd_page_vaddr(pmd_t pmd)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	unsigned long v = pmd_val(pmd) & SRMMU_PTD_PMASK;
152*4882a593Smuzhiyun 	return (unsigned long)__nocache_va(v << 4);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
pud_page_vaddr(pud_t pud)155*4882a593Smuzhiyun static inline unsigned long pud_page_vaddr(pud_t pud)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	if (srmmu_device_memory(pud_val(pud))) {
158*4882a593Smuzhiyun 		return ~0;
159*4882a593Smuzhiyun 	} else {
160*4882a593Smuzhiyun 		unsigned long v = pud_val(pud) & SRMMU_PTD_PMASK;
161*4882a593Smuzhiyun 		return (unsigned long)__nocache_va(v << 4);
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
pte_present(pte_t pte)165*4882a593Smuzhiyun static inline int pte_present(pte_t pte)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun 
pte_none(pte_t pte)170*4882a593Smuzhiyun static inline int pte_none(pte_t pte)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	return !pte_val(pte);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
__pte_clear(pte_t * ptep)175*4882a593Smuzhiyun static inline void __pte_clear(pte_t *ptep)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	set_pte(ptep, __pte(0));
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)180*4882a593Smuzhiyun static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	__pte_clear(ptep);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
pmd_bad(pmd_t pmd)185*4882a593Smuzhiyun static inline int pmd_bad(pmd_t pmd)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun 
pmd_present(pmd_t pmd)190*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
pmd_none(pmd_t pmd)195*4882a593Smuzhiyun static inline int pmd_none(pmd_t pmd)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return !pmd_val(pmd);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
pmd_clear(pmd_t * pmdp)200*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	set_pte((pte_t *)&pmd_val(*pmdp), __pte(0));
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
pud_none(pud_t pud)205*4882a593Smuzhiyun static inline int pud_none(pud_t pud)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	return !(pud_val(pud) & 0xFFFFFFF);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
pud_bad(pud_t pud)210*4882a593Smuzhiyun static inline int pud_bad(pud_t pud)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	return (pud_val(pud) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun 
pud_present(pud_t pud)215*4882a593Smuzhiyun static inline int pud_present(pud_t pud)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	return ((pud_val(pud) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
pud_clear(pud_t * pudp)220*4882a593Smuzhiyun static inline void pud_clear(pud_t *pudp)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun 	set_pte((pte_t *)pudp, __pte(0));
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun  * The following only work if pte_present() is true.
227*4882a593Smuzhiyun  * Undefined behaviour if not..
228*4882a593Smuzhiyun  */
pte_write(pte_t pte)229*4882a593Smuzhiyun static inline int pte_write(pte_t pte)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	return pte_val(pte) & SRMMU_WRITE;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
pte_dirty(pte_t pte)234*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	return pte_val(pte) & SRMMU_DIRTY;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun 
pte_young(pte_t pte)239*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun 	return pte_val(pte) & SRMMU_REF;
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
pte_wrprotect(pte_t pte)244*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~SRMMU_WRITE);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun 
pte_mkclean(pte_t pte)249*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~SRMMU_DIRTY);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
pte_mkold(pte_t pte)254*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	return __pte(pte_val(pte) & ~SRMMU_REF);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
pte_mkwrite(pte_t pte)259*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	return __pte(pte_val(pte) | SRMMU_WRITE);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
pte_mkdirty(pte_t pte)264*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	return __pte(pte_val(pte) | SRMMU_DIRTY);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
pte_mkyoung(pte_t pte)269*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	return __pte(pte_val(pte) | SRMMU_REF);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun #define pfn_pte(pfn, prot)		mk_pte(pfn_to_page(pfn), prot)
275*4882a593Smuzhiyun 
pte_pfn(pte_t pte)276*4882a593Smuzhiyun static inline unsigned long pte_pfn(pte_t pte)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun 	if (srmmu_device_memory(pte_val(pte))) {
279*4882a593Smuzhiyun 		/* Just return something that will cause
280*4882a593Smuzhiyun 		 * pfn_valid() to return false.  This makes
281*4882a593Smuzhiyun 		 * copy_one_pte() to just directly copy to
282*4882a593Smuzhiyun 		 * PTE over.
283*4882a593Smuzhiyun 		 */
284*4882a593Smuzhiyun 		return ~0UL;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 	return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun #define pte_page(pte)	pfn_to_page(pte_pfn(pte))
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun  * Conversion functions: convert a page and protection to a page entry,
293*4882a593Smuzhiyun  * and a page entry and page directory to the page they refer to.
294*4882a593Smuzhiyun  */
mk_pte(struct page * page,pgprot_t pgprot)295*4882a593Smuzhiyun static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
mk_pte_phys(unsigned long page,pgprot_t pgprot)300*4882a593Smuzhiyun static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
301*4882a593Smuzhiyun {
302*4882a593Smuzhiyun 	return __pte(((page) >> 4) | pgprot_val(pgprot));
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
mk_pte_io(unsigned long page,pgprot_t pgprot,int space)305*4882a593Smuzhiyun static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun #define pgprot_noncached pgprot_noncached
pgprot_noncached(pgprot_t prot)311*4882a593Smuzhiyun static inline pgprot_t pgprot_noncached(pgprot_t prot)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	pgprot_val(prot) &= ~pgprot_val(__pgprot(SRMMU_CACHE));
314*4882a593Smuzhiyun 	return prot;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
pte_modify(pte_t pte,pgprot_t newprot)318*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
321*4882a593Smuzhiyun 		pgprot_val(newprot));
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun struct seq_file;
325*4882a593Smuzhiyun void mmu_info(struct seq_file *m);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun /* Fault handler stuff... */
328*4882a593Smuzhiyun #define FAULT_CODE_PROT     0x1
329*4882a593Smuzhiyun #define FAULT_CODE_WRITE    0x2
330*4882a593Smuzhiyun #define FAULT_CODE_USER     0x4
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun #define update_mmu_cache(vma, address, ptep) do { } while (0)
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
335*4882a593Smuzhiyun                       unsigned long xva, unsigned int len);
336*4882a593Smuzhiyun void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun /* Encode and de-code a swap entry */
__swp_type(swp_entry_t entry)339*4882a593Smuzhiyun static inline unsigned long __swp_type(swp_entry_t entry)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
__swp_offset(swp_entry_t entry)344*4882a593Smuzhiyun static inline unsigned long __swp_offset(swp_entry_t entry)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
__swp_entry(unsigned long type,unsigned long offset)349*4882a593Smuzhiyun static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	return (swp_entry_t) {
352*4882a593Smuzhiyun 		(type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
353*4882a593Smuzhiyun 		| (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun #define __pte_to_swp_entry(pte)		((swp_entry_t) { pte_val(pte) })
357*4882a593Smuzhiyun #define __swp_entry_to_pte(x)		((pte_t) { (x).val })
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun static inline unsigned long
__get_phys(unsigned long addr)360*4882a593Smuzhiyun __get_phys (unsigned long addr)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	switch (sparc_cpu_model){
363*4882a593Smuzhiyun 	case sun4m:
364*4882a593Smuzhiyun 	case sun4d:
365*4882a593Smuzhiyun 		return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
366*4882a593Smuzhiyun 	default:
367*4882a593Smuzhiyun 		return 0;
368*4882a593Smuzhiyun 	}
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun static inline int
__get_iospace(unsigned long addr)372*4882a593Smuzhiyun __get_iospace (unsigned long addr)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	switch (sparc_cpu_model){
375*4882a593Smuzhiyun 	case sun4m:
376*4882a593Smuzhiyun 	case sun4d:
377*4882a593Smuzhiyun 		return (srmmu_get_pte (addr) >> 28);
378*4882a593Smuzhiyun 	default:
379*4882a593Smuzhiyun 		return -1;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun extern unsigned long *sparc_valid_addr_bitmap;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
386*4882a593Smuzhiyun #define kern_addr_valid(addr) \
387*4882a593Smuzhiyun 	(test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /*
390*4882a593Smuzhiyun  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
391*4882a593Smuzhiyun  * its high 4 bits.  These macros/functions put it there or get it from there.
392*4882a593Smuzhiyun  */
393*4882a593Smuzhiyun #define MK_IOSPACE_PFN(space, pfn)	(pfn | (space << (BITS_PER_LONG - 4)))
394*4882a593Smuzhiyun #define GET_IOSPACE(pfn)		(pfn >> (BITS_PER_LONG - 4))
395*4882a593Smuzhiyun #define GET_PFN(pfn)			(pfn & 0x0fffffffUL)
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
398*4882a593Smuzhiyun 		    unsigned long, pgprot_t);
399*4882a593Smuzhiyun 
io_remap_pfn_range(struct vm_area_struct * vma,unsigned long from,unsigned long pfn,unsigned long size,pgprot_t prot)400*4882a593Smuzhiyun static inline int io_remap_pfn_range(struct vm_area_struct *vma,
401*4882a593Smuzhiyun 				     unsigned long from, unsigned long pfn,
402*4882a593Smuzhiyun 				     unsigned long size, pgprot_t prot)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	unsigned long long offset, space, phys_base;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
407*4882a593Smuzhiyun 	space = GET_IOSPACE(pfn);
408*4882a593Smuzhiyun 	phys_base = offset | (space << 32ULL);
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun #define io_remap_pfn_range io_remap_pfn_range
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
415*4882a593Smuzhiyun #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
416*4882a593Smuzhiyun ({									  \
417*4882a593Smuzhiyun 	int __changed = !pte_same(*(__ptep), __entry);			  \
418*4882a593Smuzhiyun 	if (__changed) {						  \
419*4882a593Smuzhiyun 		set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
420*4882a593Smuzhiyun 		flush_tlb_page(__vma, __address);			  \
421*4882a593Smuzhiyun 	}								  \
422*4882a593Smuzhiyun 	__changed;							  \
423*4882a593Smuzhiyun })
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun #endif /* !(__ASSEMBLY__) */
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun #define VMALLOC_START           _AC(0xfe600000,UL)
428*4882a593Smuzhiyun #define VMALLOC_END             _AC(0xffc00000,UL)
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /* We provide our own get_unmapped_area to cope with VA holes for userland */
431*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun #endif /* !(_SPARC_PGTABLE_H) */
434