1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
3*4882a593Smuzhiyun * Copyright (C) 2009 Wind River Systems Inc
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Based on asm/pgtable-32.h from mips which is:
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
8*4882a593Smuzhiyun * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
11*4882a593Smuzhiyun * License. See the file "COPYING" in the main directory of this archive
12*4882a593Smuzhiyun * for more details.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #ifndef _ASM_NIOS2_PGTABLE_H
16*4882a593Smuzhiyun #define _ASM_NIOS2_PGTABLE_H
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/io.h>
19*4882a593Smuzhiyun #include <linux/bug.h>
20*4882a593Smuzhiyun #include <asm/page.h>
21*4882a593Smuzhiyun #include <asm/cacheflush.h>
22*4882a593Smuzhiyun #include <asm/tlbflush.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <asm/pgtable-bits.h>
25*4882a593Smuzhiyun #include <asm-generic/pgtable-nopmd.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define FIRST_USER_ADDRESS 0UL
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define VMALLOC_START CONFIG_NIOS2_KERNEL_MMU_REGION_BASE
30*4882a593Smuzhiyun #define VMALLOC_END (CONFIG_NIOS2_KERNEL_REGION_BASE - 1)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun struct mm_struct;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Helper macro */
35*4882a593Smuzhiyun #define MKP(x, w, r) __pgprot(_PAGE_PRESENT | _PAGE_CACHED | \
36*4882a593Smuzhiyun ((x) ? _PAGE_EXEC : 0) | \
37*4882a593Smuzhiyun ((r) ? _PAGE_READ : 0) | \
38*4882a593Smuzhiyun ((w) ? _PAGE_WRITE : 0))
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun * These are the macros that generic kernel code needs
41*4882a593Smuzhiyun * (to populate protection_map[])
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /* Remove W bit on private pages for COW support */
45*4882a593Smuzhiyun #define __P000 MKP(0, 0, 0)
46*4882a593Smuzhiyun #define __P001 MKP(0, 0, 1)
47*4882a593Smuzhiyun #define __P010 MKP(0, 0, 0) /* COW */
48*4882a593Smuzhiyun #define __P011 MKP(0, 0, 1) /* COW */
49*4882a593Smuzhiyun #define __P100 MKP(1, 0, 0)
50*4882a593Smuzhiyun #define __P101 MKP(1, 0, 1)
51*4882a593Smuzhiyun #define __P110 MKP(1, 0, 0) /* COW */
52*4882a593Smuzhiyun #define __P111 MKP(1, 0, 1) /* COW */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Shared pages can have exact HW mapping */
55*4882a593Smuzhiyun #define __S000 MKP(0, 0, 0)
56*4882a593Smuzhiyun #define __S001 MKP(0, 0, 1)
57*4882a593Smuzhiyun #define __S010 MKP(0, 1, 0)
58*4882a593Smuzhiyun #define __S011 MKP(0, 1, 1)
59*4882a593Smuzhiyun #define __S100 MKP(1, 0, 0)
60*4882a593Smuzhiyun #define __S101 MKP(1, 0, 1)
61*4882a593Smuzhiyun #define __S110 MKP(1, 1, 0)
62*4882a593Smuzhiyun #define __S111 MKP(1, 1, 1)
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* Used all over the kernel */
65*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
66*4882a593Smuzhiyun _PAGE_WRITE | _PAGE_EXEC | _PAGE_GLOBAL)
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_CACHED | _PAGE_READ | \
69*4882a593Smuzhiyun _PAGE_WRITE | _PAGE_ACCESSED)
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define PAGE_COPY MKP(0, 0, 1)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define PGD_ORDER 0
74*4882a593Smuzhiyun #define PTE_ORDER 0
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
77*4882a593Smuzhiyun #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define USER_PTRS_PER_PGD \
80*4882a593Smuzhiyun (CONFIG_NIOS2_KERNEL_MMU_REGION_BASE / PGDIR_SIZE)
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define PGDIR_SHIFT 22
83*4882a593Smuzhiyun #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
84*4882a593Smuzhiyun #define PGDIR_MASK (~(PGDIR_SIZE-1))
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun * ZERO_PAGE is a global shared page that is always zero: used
88*4882a593Smuzhiyun * for zero-mapped memory areas etc..
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
91*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
94*4882a593Smuzhiyun extern pte_t invalid_pte_table[PAGE_SIZE/sizeof(pte_t)];
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun * (pmds are folded into puds so this doesn't get actually called,
98*4882a593Smuzhiyun * but the define is needed for a generic inline function.)
99*4882a593Smuzhiyun */
set_pmd(pmd_t * pmdptr,pmd_t pmdval)100*4882a593Smuzhiyun static inline void set_pmd(pmd_t *pmdptr, pmd_t pmdval)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun *pmdptr = pmdval;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
pte_write(pte_t pte)105*4882a593Smuzhiyun static inline int pte_write(pte_t pte) \
106*4882a593Smuzhiyun { return pte_val(pte) & _PAGE_WRITE; }
pte_dirty(pte_t pte)107*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte) \
108*4882a593Smuzhiyun { return pte_val(pte) & _PAGE_DIRTY; }
pte_young(pte_t pte)109*4882a593Smuzhiyun static inline int pte_young(pte_t pte) \
110*4882a593Smuzhiyun { return pte_val(pte) & _PAGE_ACCESSED; }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #define pgprot_noncached pgprot_noncached
113*4882a593Smuzhiyun
pgprot_noncached(pgprot_t _prot)114*4882a593Smuzhiyun static inline pgprot_t pgprot_noncached(pgprot_t _prot)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun unsigned long prot = pgprot_val(_prot);
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun prot &= ~_PAGE_CACHED;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return __pgprot(prot);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
pte_none(pte_t pte)123*4882a593Smuzhiyun static inline int pte_none(pte_t pte)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun return !(pte_val(pte) & ~(_PAGE_GLOBAL|0xf));
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
pte_present(pte_t pte)128*4882a593Smuzhiyun static inline int pte_present(pte_t pte) \
129*4882a593Smuzhiyun { return pte_val(pte) & _PAGE_PRESENT; }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun * The following only work if pte_present() is true.
133*4882a593Smuzhiyun * Undefined behaviour if not..
134*4882a593Smuzhiyun */
pte_wrprotect(pte_t pte)135*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun pte_val(pte) &= ~_PAGE_WRITE;
138*4882a593Smuzhiyun return pte;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
pte_mkclean(pte_t pte)141*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun pte_val(pte) &= ~_PAGE_DIRTY;
144*4882a593Smuzhiyun return pte;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
pte_mkold(pte_t pte)147*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun pte_val(pte) &= ~_PAGE_ACCESSED;
150*4882a593Smuzhiyun return pte;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
pte_mkwrite(pte_t pte)153*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun pte_val(pte) |= _PAGE_WRITE;
156*4882a593Smuzhiyun return pte;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
pte_mkdirty(pte_t pte)159*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun pte_val(pte) |= _PAGE_DIRTY;
162*4882a593Smuzhiyun return pte;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
pte_mkyoung(pte_t pte)165*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun pte_val(pte) |= _PAGE_ACCESSED;
168*4882a593Smuzhiyun return pte;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
pte_modify(pte_t pte,pgprot_t newprot)171*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun const unsigned long mask = _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
176*4882a593Smuzhiyun return pte;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
pmd_present(pmd_t pmd)179*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun return (pmd_val(pmd) != (unsigned long) invalid_pte_table)
182*4882a593Smuzhiyun && (pmd_val(pmd) != 0UL);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
pmd_clear(pmd_t * pmdp)185*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun pmd_val(*pmdp) = (unsigned long) invalid_pte_table;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #define pte_pfn(pte) (pte_val(pte) & 0xfffff)
191*4882a593Smuzhiyun #define pfn_pte(pfn, prot) (__pte(pfn | pgprot_val(prot)))
192*4882a593Smuzhiyun #define pte_page(pte) (pfn_to_page(pte_pfn(pte)))
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Store a linux PTE into the linux page table.
196*4882a593Smuzhiyun */
set_pte(pte_t * ptep,pte_t pteval)197*4882a593Smuzhiyun static inline void set_pte(pte_t *ptep, pte_t pteval)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun *ptep = pteval;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval)202*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
203*4882a593Smuzhiyun pte_t *ptep, pte_t pteval)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun unsigned long paddr = (unsigned long)page_to_virt(pte_page(pteval));
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun flush_dcache_range(paddr, paddr + PAGE_SIZE);
208*4882a593Smuzhiyun set_pte(ptep, pteval);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
pmd_none(pmd_t pmd)211*4882a593Smuzhiyun static inline int pmd_none(pmd_t pmd)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun return (pmd_val(pmd) ==
214*4882a593Smuzhiyun (unsigned long) invalid_pte_table) || (pmd_val(pmd) == 0UL);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
218*4882a593Smuzhiyun
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)219*4882a593Smuzhiyun static inline void pte_clear(struct mm_struct *mm,
220*4882a593Smuzhiyun unsigned long addr, pte_t *ptep)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun pte_t null;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun pte_val(null) = (addr >> PAGE_SHIFT) & 0xf;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun set_pte_at(mm, addr, ptep, null);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * Conversion functions: convert a page and protection to a page entry,
231*4882a593Smuzhiyun * and a page entry and page directory to the page they refer to.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun #define mk_pte(page, prot) (pfn_pte(page_to_pfn(page), prot))
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /*
236*4882a593Smuzhiyun * Conversion functions: convert a page and protection to a page entry,
237*4882a593Smuzhiyun * and a page entry and page directory to the page they refer to.
238*4882a593Smuzhiyun */
239*4882a593Smuzhiyun #define pmd_phys(pmd) virt_to_phys((void *)pmd_val(pmd))
240*4882a593Smuzhiyun #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
241*4882a593Smuzhiyun
pmd_page_vaddr(pmd_t pmd)242*4882a593Smuzhiyun static inline unsigned long pmd_page_vaddr(pmd_t pmd)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun return pmd_val(pmd);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #define pte_ERROR(e) \
248*4882a593Smuzhiyun pr_err("%s:%d: bad pte %08lx.\n", \
249*4882a593Smuzhiyun __FILE__, __LINE__, pte_val(e))
250*4882a593Smuzhiyun #define pgd_ERROR(e) \
251*4882a593Smuzhiyun pr_err("%s:%d: bad pgd %08lx.\n", \
252*4882a593Smuzhiyun __FILE__, __LINE__, pgd_val(e))
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /*
255*4882a593Smuzhiyun * Encode and decode a swap entry (must be !pte_none(pte) && !pte_present(pte):
256*4882a593Smuzhiyun *
257*4882a593Smuzhiyun * 31 30 29 28 27 26 25 24 23 22 21 20 19 18 ... 1 0
258*4882a593Smuzhiyun * 0 0 0 0 type. 0 0 0 0 0 0 offset.........
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * This gives us up to 2**2 = 4 swap files and 2**20 * 4K = 4G per swap file.
261*4882a593Smuzhiyun *
262*4882a593Smuzhiyun * Note that the offset field is always non-zero, thus !pte_none(pte) is always
263*4882a593Smuzhiyun * true.
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun #define __swp_type(swp) (((swp).val >> 26) & 0x3)
266*4882a593Smuzhiyun #define __swp_offset(swp) ((swp).val & 0xfffff)
267*4882a593Smuzhiyun #define __swp_entry(type, off) ((swp_entry_t) { (((type) & 0x3) << 26) \
268*4882a593Smuzhiyun | ((off) & 0xfffff) })
269*4882a593Smuzhiyun #define __swp_entry_to_pte(swp) ((pte_t) { (swp).val })
270*4882a593Smuzhiyun #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun #define kern_addr_valid(addr) (1)
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun extern void __init paging_init(void);
275*4882a593Smuzhiyun extern void __init mmu_init(void);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun extern void update_mmu_cache(struct vm_area_struct *vma,
278*4882a593Smuzhiyun unsigned long address, pte_t *pte);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun #endif /* _ASM_NIOS2_PGTABLE_H */
281