1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * vineetg: May 2011
6*4882a593Smuzhiyun * -Folded PAGE_PRESENT (used by VM) and PAGE_VALID (used by MMU) into 1.
7*4882a593Smuzhiyun * They are semantically the same although in different contexts
8*4882a593Smuzhiyun * VALID marks a TLB entry exists and it will only happen if PRESENT
9*4882a593Smuzhiyun * - Utilise some unused free bits to confine PTE flags to 12 bits
10*4882a593Smuzhiyun * This is a must for 4k pg-sz
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * vineetg: Mar 2011 - changes to accommodate MMU TLB Page Descriptor mods
13*4882a593Smuzhiyun * -TLB Locking never really existed, except for initial specs
14*4882a593Smuzhiyun * -SILENT_xxx not needed for our port
15*4882a593Smuzhiyun * -Per my request, MMU V3 changes the layout of some of the bits
16*4882a593Smuzhiyun * to avoid a few shifts in TLB Miss handlers.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * vineetg: April 2010
19*4882a593Smuzhiyun * -PGD entry no longer contains any flags. If empty it is 0, otherwise has
20*4882a593Smuzhiyun * Pg-Tbl ptr. Thus pmd_present(), pmd_valid(), pmd_set( ) become simpler
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun * vineetg: April 2010
23*4882a593Smuzhiyun * -Switched form 8:11:13 split for page table lookup to 11:8:13
24*4882a593Smuzhiyun * -this speeds up page table allocation itself as we now have to memset 1K
25*4882a593Smuzhiyun * instead of 8k per page table.
26*4882a593Smuzhiyun * -TODO: Right now page table alloc is 8K and rest 7K is unused
27*4882a593Smuzhiyun * need to optimise it
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
30*4882a593Smuzhiyun */
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #ifndef _ASM_ARC_PGTABLE_H
33*4882a593Smuzhiyun #define _ASM_ARC_PGTABLE_H
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/bits.h>
36*4882a593Smuzhiyun #include <asm-generic/pgtable-nopmd.h>
37*4882a593Smuzhiyun #include <asm/page.h>
38*4882a593Smuzhiyun #include <asm/mmu.h> /* to propagate CONFIG_ARC_MMU_VER <n> */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /**************************************************************************
41*4882a593Smuzhiyun * Page Table Flags
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * ARC700 MMU only deals with softare managed TLB entries.
44*4882a593Smuzhiyun * Page Tables are purely for Linux VM's consumption and the bits below are
45*4882a593Smuzhiyun * suited to that (uniqueness). Hence some are not implemented in the TLB and
46*4882a593Smuzhiyun * some have different value in TLB.
47*4882a593Smuzhiyun * e.g. MMU v2: K_READ bit is 8 and so is GLOBAL (possible because they live in
48*4882a593Smuzhiyun * seperate PD0 and PD1, which combined forms a translation entry)
49*4882a593Smuzhiyun * while for PTE perspective, they are 8 and 9 respectively
50*4882a593Smuzhiyun * with MMU v3: Most bits (except SHARED) represent the exact hardware pos
51*4882a593Smuzhiyun * (saves some bit shift ops in TLB Miss hdlrs)
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #if (CONFIG_ARC_MMU_VER <= 2)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */
57*4882a593Smuzhiyun #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */
58*4882a593Smuzhiyun #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */
59*4882a593Smuzhiyun #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */
60*4882a593Smuzhiyun #define _PAGE_READ (1<<5) /* Page has user read perm (H) */
61*4882a593Smuzhiyun #define _PAGE_DIRTY (1<<6) /* Page modified (dirty) (S) */
62*4882a593Smuzhiyun #define _PAGE_SPECIAL (1<<7)
63*4882a593Smuzhiyun #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
64*4882a593Smuzhiyun #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun #else /* MMU v3 onwards */
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */
69*4882a593Smuzhiyun #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */
70*4882a593Smuzhiyun #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */
71*4882a593Smuzhiyun #define _PAGE_READ (1<<3) /* Page has user read perm (H) */
72*4882a593Smuzhiyun #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */
73*4882a593Smuzhiyun #define _PAGE_DIRTY (1<<5) /* Page modified (dirty) (S) */
74*4882a593Smuzhiyun #define _PAGE_SPECIAL (1<<6)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #if (CONFIG_ARC_MMU_VER >= 4)
77*4882a593Smuzhiyun #define _PAGE_WTHRU (1<<7) /* Page cache mode write-thru (H) */
78*4882a593Smuzhiyun #endif
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */
81*4882a593Smuzhiyun #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #if (CONFIG_ARC_MMU_VER >= 4)
84*4882a593Smuzhiyun #define _PAGE_HW_SZ (1<<10) /* Page Size indicator (H): 0 normal, 1 super */
85*4882a593Smuzhiyun #endif
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr
88*4882a593Smuzhiyun usable for shared TLB entries (H) */
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun #define _PAGE_UNUSED_BIT (1<<12)
91*4882a593Smuzhiyun #endif
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* vmalloc permissions */
94*4882a593Smuzhiyun #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \
95*4882a593Smuzhiyun _PAGE_GLOBAL | _PAGE_PRESENT)
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #ifndef CONFIG_ARC_CACHE_PAGES
98*4882a593Smuzhiyun #undef _PAGE_CACHEABLE
99*4882a593Smuzhiyun #define _PAGE_CACHEABLE 0
100*4882a593Smuzhiyun #endif
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #ifndef _PAGE_HW_SZ
103*4882a593Smuzhiyun #define _PAGE_HW_SZ 0
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Defaults for every user page */
107*4882a593Smuzhiyun #define ___DEF (_PAGE_PRESENT | _PAGE_CACHEABLE)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Set of bits not changed in pte_modify */
110*4882a593Smuzhiyun #define _PAGE_CHG_MASK (PAGE_MASK_PHYS | _PAGE_ACCESSED | _PAGE_DIRTY | \
111*4882a593Smuzhiyun _PAGE_SPECIAL)
112*4882a593Smuzhiyun /* More Abbrevaited helpers */
113*4882a593Smuzhiyun #define PAGE_U_NONE __pgprot(___DEF)
114*4882a593Smuzhiyun #define PAGE_U_R __pgprot(___DEF | _PAGE_READ)
115*4882a593Smuzhiyun #define PAGE_U_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE)
116*4882a593Smuzhiyun #define PAGE_U_X_R __pgprot(___DEF | _PAGE_READ | _PAGE_EXECUTE)
117*4882a593Smuzhiyun #define PAGE_U_X_W_R __pgprot(___DEF | _PAGE_READ | _PAGE_WRITE | \
118*4882a593Smuzhiyun _PAGE_EXECUTE)
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun #define PAGE_SHARED PAGE_U_W_R
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of
123*4882a593Smuzhiyun * user vaddr space - visible in all addr spaces, but kernel mode only
124*4882a593Smuzhiyun * Thus Global, all-kernel-access, no-user-access, cached
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_CACHEABLE)
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* ioremap */
129*4882a593Smuzhiyun #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS)
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /* Masks for actual TLB "PD"s */
132*4882a593Smuzhiyun #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_HW_SZ)
133*4882a593Smuzhiyun #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ)
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK_PHYS | _PAGE_CACHEABLE)
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /**************************************************************************
138*4882a593Smuzhiyun * Mapping of vm_flags (Generic VM) to PTE flags (arch specific)
139*4882a593Smuzhiyun *
140*4882a593Smuzhiyun * Certain cases have 1:1 mapping
141*4882a593Smuzhiyun * e.g. __P101 means VM_READ, VM_EXEC and !VM_SHARED
142*4882a593Smuzhiyun * which directly corresponds to PAGE_U_X_R
143*4882a593Smuzhiyun *
144*4882a593Smuzhiyun * Other rules which cause the divergence from 1:1 mapping
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * 1. Although ARC700 can do exclusive execute/write protection (meaning R
147*4882a593Smuzhiyun * can be tracked independet of X/W unlike some other CPUs), still to
148*4882a593Smuzhiyun * keep things consistent with other archs:
149*4882a593Smuzhiyun * -Write implies Read: W => R
150*4882a593Smuzhiyun * -Execute implies Read: X => R
151*4882a593Smuzhiyun *
152*4882a593Smuzhiyun * 2. Pvt Writable doesn't have Write Enabled initially: Pvt-W => !W
153*4882a593Smuzhiyun * This is to enable COW mechanism
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun /* xwr */
156*4882a593Smuzhiyun #define __P000 PAGE_U_NONE
157*4882a593Smuzhiyun #define __P001 PAGE_U_R
158*4882a593Smuzhiyun #define __P010 PAGE_U_R /* Pvt-W => !W */
159*4882a593Smuzhiyun #define __P011 PAGE_U_R /* Pvt-W => !W */
160*4882a593Smuzhiyun #define __P100 PAGE_U_X_R /* X => R */
161*4882a593Smuzhiyun #define __P101 PAGE_U_X_R
162*4882a593Smuzhiyun #define __P110 PAGE_U_X_R /* Pvt-W => !W and X => R */
163*4882a593Smuzhiyun #define __P111 PAGE_U_X_R /* Pvt-W => !W */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun #define __S000 PAGE_U_NONE
166*4882a593Smuzhiyun #define __S001 PAGE_U_R
167*4882a593Smuzhiyun #define __S010 PAGE_U_W_R /* W => R */
168*4882a593Smuzhiyun #define __S011 PAGE_U_W_R
169*4882a593Smuzhiyun #define __S100 PAGE_U_X_R /* X => R */
170*4882a593Smuzhiyun #define __S101 PAGE_U_X_R
171*4882a593Smuzhiyun #define __S110 PAGE_U_X_W_R /* X => R */
172*4882a593Smuzhiyun #define __S111 PAGE_U_X_W_R
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /****************************************************************
175*4882a593Smuzhiyun * 2 tier (PGD:PTE) software page walker
176*4882a593Smuzhiyun *
177*4882a593Smuzhiyun * [31] 32 bit virtual address [0]
178*4882a593Smuzhiyun * -------------------------------------------------------
179*4882a593Smuzhiyun * | | <------------ PGDIR_SHIFT ----------> |
180*4882a593Smuzhiyun * | | |
181*4882a593Smuzhiyun * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
182*4882a593Smuzhiyun * -------------------------------------------------------
183*4882a593Smuzhiyun * | | |
184*4882a593Smuzhiyun * | | --> off in page frame
185*4882a593Smuzhiyun * | ---> index into Page Table
186*4882a593Smuzhiyun * ----> index into Page Directory
187*4882a593Smuzhiyun *
188*4882a593Smuzhiyun * In a single page size configuration, only PAGE_SHIFT is fixed
189*4882a593Smuzhiyun * So both PGD and PTE sizing can be tweaked
190*4882a593Smuzhiyun * e.g. 8K page (PAGE_SHIFT 13) can have
191*4882a593Smuzhiyun * - PGDIR_SHIFT 21 -> 11:8:13 address split
192*4882a593Smuzhiyun * - PGDIR_SHIFT 24 -> 8:11:13 address split
193*4882a593Smuzhiyun *
194*4882a593Smuzhiyun * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
195*4882a593Smuzhiyun * so the sizing flexibility is gone.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun #if defined(CONFIG_ARC_HUGEPAGE_16M)
199*4882a593Smuzhiyun #define PGDIR_SHIFT 24
200*4882a593Smuzhiyun #elif defined(CONFIG_ARC_HUGEPAGE_2M)
201*4882a593Smuzhiyun #define PGDIR_SHIFT 21
202*4882a593Smuzhiyun #else
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * Only Normal page support so "hackable" (see comment above)
205*4882a593Smuzhiyun * Default value provides 11:8:13 (8K), 11:9:12 (4K)
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun #define PGDIR_SHIFT 21
208*4882a593Smuzhiyun #endif
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
211*4882a593Smuzhiyun #define BITS_FOR_PGD (32 - PGDIR_SHIFT)
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun #define PGDIR_SIZE BIT(PGDIR_SHIFT) /* vaddr span, not PDG sz */
214*4882a593Smuzhiyun #define PGDIR_MASK (~(PGDIR_SIZE-1))
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #define PTRS_PER_PTE BIT(BITS_FOR_PTE)
217*4882a593Smuzhiyun #define PTRS_PER_PGD BIT(BITS_FOR_PGD)
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * Number of entries a user land program use.
221*4882a593Smuzhiyun * TASK_SIZE is the maximum vaddr that can be used by a userland program.
222*4882a593Smuzhiyun */
223*4882a593Smuzhiyun #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /*
226*4882a593Smuzhiyun * No special requirements for lowest virtual address we permit any user space
227*4882a593Smuzhiyun * mapping to be mapped at.
228*4882a593Smuzhiyun */
229*4882a593Smuzhiyun #define FIRST_USER_ADDRESS 0UL
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /****************************************************************
233*4882a593Smuzhiyun * Bucket load of VM Helpers
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun #ifndef __ASSEMBLY__
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun #define pte_ERROR(e) \
239*4882a593Smuzhiyun pr_crit("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
240*4882a593Smuzhiyun #define pgd_ERROR(e) \
241*4882a593Smuzhiyun pr_crit("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* the zero page used for uninitialized and anonymous pages */
244*4882a593Smuzhiyun extern char empty_zero_page[PAGE_SIZE];
245*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #define set_pte(pteptr, pteval) ((*(pteptr)) = (pteval))
248*4882a593Smuzhiyun #define set_pmd(pmdptr, pmdval) (*(pmdptr) = pmdval)
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun /* find the page descriptor of the Page Tbl ref by PMD entry */
251*4882a593Smuzhiyun #define pmd_page(pmd) virt_to_page(pmd_val(pmd) & PAGE_MASK)
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* find the logical addr (phy for ARC) of the Page Tbl ref by PMD entry */
254*4882a593Smuzhiyun #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* In a 2 level sys, setup the PGD entry with PTE value */
pmd_set(pmd_t * pmdp,pte_t * ptep)257*4882a593Smuzhiyun static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun pmd_val(*pmdp) = (unsigned long)ptep;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun #define pte_none(x) (!pte_val(x))
263*4882a593Smuzhiyun #define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
264*4882a593Smuzhiyun #define pte_clear(mm, addr, ptep) set_pte_at(mm, addr, ptep, __pte(0))
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun #define pmd_none(x) (!pmd_val(x))
267*4882a593Smuzhiyun #define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK))
268*4882a593Smuzhiyun #define pmd_present(x) (pmd_val(x))
269*4882a593Smuzhiyun #define pmd_leaf(x) (pmd_val(x) & _PAGE_HW_SZ)
270*4882a593Smuzhiyun #define pmd_clear(xp) do { pmd_val(*(xp)) = 0; } while (0)
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun #define pte_page(pte) pfn_to_page(pte_pfn(pte))
273*4882a593Smuzhiyun #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
274*4882a593Smuzhiyun #define pfn_pte(pfn, prot) __pte(__pfn_to_phys(pfn) | pgprot_val(prot))
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
277*4882a593Smuzhiyun #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Zoo of pte_xxx function */
280*4882a593Smuzhiyun #define pte_read(pte) (pte_val(pte) & _PAGE_READ)
281*4882a593Smuzhiyun #define pte_write(pte) (pte_val(pte) & _PAGE_WRITE)
282*4882a593Smuzhiyun #define pte_dirty(pte) (pte_val(pte) & _PAGE_DIRTY)
283*4882a593Smuzhiyun #define pte_young(pte) (pte_val(pte) & _PAGE_ACCESSED)
284*4882a593Smuzhiyun #define pte_special(pte) (pte_val(pte) & _PAGE_SPECIAL)
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun #define PTE_BIT_FUNC(fn, op) \
287*4882a593Smuzhiyun static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun PTE_BIT_FUNC(mknotpresent, &= ~(_PAGE_PRESENT));
290*4882a593Smuzhiyun PTE_BIT_FUNC(wrprotect, &= ~(_PAGE_WRITE));
291*4882a593Smuzhiyun PTE_BIT_FUNC(mkwrite, |= (_PAGE_WRITE));
292*4882a593Smuzhiyun PTE_BIT_FUNC(mkclean, &= ~(_PAGE_DIRTY));
293*4882a593Smuzhiyun PTE_BIT_FUNC(mkdirty, |= (_PAGE_DIRTY));
294*4882a593Smuzhiyun PTE_BIT_FUNC(mkold, &= ~(_PAGE_ACCESSED));
295*4882a593Smuzhiyun PTE_BIT_FUNC(mkyoung, |= (_PAGE_ACCESSED));
296*4882a593Smuzhiyun PTE_BIT_FUNC(exprotect, &= ~(_PAGE_EXECUTE));
297*4882a593Smuzhiyun PTE_BIT_FUNC(mkexec, |= (_PAGE_EXECUTE));
298*4882a593Smuzhiyun PTE_BIT_FUNC(mkspecial, |= (_PAGE_SPECIAL));
299*4882a593Smuzhiyun PTE_BIT_FUNC(mkhuge, |= (_PAGE_HW_SZ));
300*4882a593Smuzhiyun
pte_modify(pte_t pte,pgprot_t newprot)301*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Macro to mark a page protection as uncacheable */
307*4882a593Smuzhiyun #define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CACHEABLE))
308*4882a593Smuzhiyun
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval)309*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
310*4882a593Smuzhiyun pte_t *ptep, pte_t pteval)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun set_pte(ptep, pteval);
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun * Macro to quickly access the PGD entry, utlising the fact that some
317*4882a593Smuzhiyun * arch may cache the pointer to Page Directory of "current" task
318*4882a593Smuzhiyun * in a MMU register
319*4882a593Smuzhiyun *
320*4882a593Smuzhiyun * Thus task->mm->pgd (3 pointer dereferences, cache misses etc simply
321*4882a593Smuzhiyun * becomes read a register
322*4882a593Smuzhiyun *
323*4882a593Smuzhiyun * ********CAUTION*******:
324*4882a593Smuzhiyun * Kernel code might be dealing with some mm_struct of NON "current"
325*4882a593Smuzhiyun * Thus use this macro only when you are certain that "current" is current
326*4882a593Smuzhiyun * e.g. when dealing with signal frame setup code etc
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun #ifdef ARC_USE_SCRATCH_REG
329*4882a593Smuzhiyun #define pgd_offset_fast(mm, addr) \
330*4882a593Smuzhiyun ({ \
331*4882a593Smuzhiyun pgd_t *pgd_base = (pgd_t *) read_aux_reg(ARC_REG_SCRATCH_DATA0); \
332*4882a593Smuzhiyun pgd_base + pgd_index(addr); \
333*4882a593Smuzhiyun })
334*4882a593Smuzhiyun #else
335*4882a593Smuzhiyun #define pgd_offset_fast(mm, addr) pgd_offset(mm, addr)
336*4882a593Smuzhiyun #endif
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[] __aligned(PAGE_SIZE);
339*4882a593Smuzhiyun void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
340*4882a593Smuzhiyun pte_t *ptep);
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Encode swap {type,off} tuple into PTE
343*4882a593Smuzhiyun * We reserve 13 bits for 5-bit @type, keeping bits 12-5 zero, ensuring that
344*4882a593Smuzhiyun * PAGE_PRESENT is zero in a PTE holding swap "identifier"
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun #define __swp_entry(type, off) ((swp_entry_t) { \
347*4882a593Smuzhiyun ((type) & 0x1f) | ((off) << 13) })
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun /* Decode a PTE containing swap "identifier "into constituents */
350*4882a593Smuzhiyun #define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
351*4882a593Smuzhiyun #define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* NOPs, to keep generic kernel happy */
354*4882a593Smuzhiyun #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
355*4882a593Smuzhiyun #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun #define kern_addr_valid(addr) (1)
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * remap a physical page `pfn' of size `size' with page protection `prot'
361*4882a593Smuzhiyun * into virtual address `from'
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun #ifdef CONFIG_TRANSPARENT_HUGEPAGE
364*4882a593Smuzhiyun #include <asm/hugepage.h>
365*4882a593Smuzhiyun #endif
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* to cope with aliasing VIPT cache */
368*4882a593Smuzhiyun #define HAVE_ARCH_UNMAPPED_AREA
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun #endif
373