1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 Regents of the University of California
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef _ASM_RISCV_PGTABLE_H
7*4882a593Smuzhiyun #define _ASM_RISCV_PGTABLE_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/mmzone.h>
10*4882a593Smuzhiyun #include <linux/sizes.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <asm/pgtable-bits.h>
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #ifndef __ASSEMBLY__
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /* Page Upper Directory not used in RISC-V */
17*4882a593Smuzhiyun #include <asm-generic/pgtable-nopud.h>
18*4882a593Smuzhiyun #include <asm/page.h>
19*4882a593Smuzhiyun #include <asm/tlbflush.h>
20*4882a593Smuzhiyun #include <linux/mm_types.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #ifdef CONFIG_MMU
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
25*4882a593Smuzhiyun #define VMALLOC_END (PAGE_OFFSET - 1)
26*4882a593Smuzhiyun #define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define BPF_JIT_REGION_SIZE (SZ_128M)
29*4882a593Smuzhiyun #define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
30*4882a593Smuzhiyun #define BPF_JIT_REGION_END (VMALLOC_END)
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun * Roughly size the vmemmap space to be large enough to fit enough
34*4882a593Smuzhiyun * struct pages to map half the virtual address space. Then
35*4882a593Smuzhiyun * position vmemmap directly below the VMALLOC region.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun #define VMEMMAP_SHIFT \
38*4882a593Smuzhiyun (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
39*4882a593Smuzhiyun #define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
40*4882a593Smuzhiyun #define VMEMMAP_END (VMALLOC_START - 1)
41*4882a593Smuzhiyun #define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
45*4882a593Smuzhiyun * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun #define vmemmap ((struct page *)VMEMMAP_START)
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #define PCI_IO_SIZE SZ_16M
50*4882a593Smuzhiyun #define PCI_IO_END VMEMMAP_START
51*4882a593Smuzhiyun #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define FIXADDR_TOP PCI_IO_START
54*4882a593Smuzhiyun #ifdef CONFIG_64BIT
55*4882a593Smuzhiyun #define FIXADDR_SIZE PMD_SIZE
56*4882a593Smuzhiyun #else
57*4882a593Smuzhiyun #define FIXADDR_SIZE PGDIR_SIZE
58*4882a593Smuzhiyun #endif
59*4882a593Smuzhiyun #define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #endif
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #ifdef CONFIG_64BIT
64*4882a593Smuzhiyun #include <asm/pgtable-64.h>
65*4882a593Smuzhiyun #else
66*4882a593Smuzhiyun #include <asm/pgtable-32.h>
67*4882a593Smuzhiyun #endif /* CONFIG_64BIT */
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #ifdef CONFIG_MMU
70*4882a593Smuzhiyun /* Number of entries in the page global directory */
71*4882a593Smuzhiyun #define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
72*4882a593Smuzhiyun /* Number of entries in the page table */
73*4882a593Smuzhiyun #define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* Number of PGD entries that a user-mode program can use */
76*4882a593Smuzhiyun #define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Page protection bits */
79*4882a593Smuzhiyun #define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
82*4882a593Smuzhiyun #define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
83*4882a593Smuzhiyun #define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
84*4882a593Smuzhiyun #define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
85*4882a593Smuzhiyun #define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
86*4882a593Smuzhiyun #define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
87*4882a593Smuzhiyun _PAGE_EXEC | _PAGE_WRITE)
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define PAGE_COPY PAGE_READ
90*4882a593Smuzhiyun #define PAGE_COPY_EXEC PAGE_EXEC
91*4882a593Smuzhiyun #define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
92*4882a593Smuzhiyun #define PAGE_SHARED PAGE_WRITE
93*4882a593Smuzhiyun #define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun #define _PAGE_KERNEL (_PAGE_READ \
96*4882a593Smuzhiyun | _PAGE_WRITE \
97*4882a593Smuzhiyun | _PAGE_PRESENT \
98*4882a593Smuzhiyun | _PAGE_ACCESSED \
99*4882a593Smuzhiyun | _PAGE_DIRTY)
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
102*4882a593Smuzhiyun #define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
103*4882a593Smuzhiyun #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
104*4882a593Smuzhiyun #define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
105*4882a593Smuzhiyun | _PAGE_EXEC)
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun #define PAGE_TABLE __pgprot(_PAGE_TABLE)
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
111*4882a593Smuzhiyun * change the properties of memory regions.
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun #define _PAGE_IOREMAP _PAGE_KERNEL
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[];
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* MAP_PRIVATE permissions: xwr (copy-on-write) */
118*4882a593Smuzhiyun #define __P000 PAGE_NONE
119*4882a593Smuzhiyun #define __P001 PAGE_READ
120*4882a593Smuzhiyun #define __P010 PAGE_COPY
121*4882a593Smuzhiyun #define __P011 PAGE_COPY
122*4882a593Smuzhiyun #define __P100 PAGE_EXEC
123*4882a593Smuzhiyun #define __P101 PAGE_READ_EXEC
124*4882a593Smuzhiyun #define __P110 PAGE_COPY_EXEC
125*4882a593Smuzhiyun #define __P111 PAGE_COPY_READ_EXEC
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* MAP_SHARED permissions: xwr */
128*4882a593Smuzhiyun #define __S000 PAGE_NONE
129*4882a593Smuzhiyun #define __S001 PAGE_READ
130*4882a593Smuzhiyun #define __S010 PAGE_SHARED
131*4882a593Smuzhiyun #define __S011 PAGE_SHARED
132*4882a593Smuzhiyun #define __S100 PAGE_EXEC
133*4882a593Smuzhiyun #define __S101 PAGE_READ_EXEC
134*4882a593Smuzhiyun #define __S110 PAGE_SHARED_EXEC
135*4882a593Smuzhiyun #define __S111 PAGE_SHARED_EXEC
136*4882a593Smuzhiyun
pmd_present(pmd_t pmd)137*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
pmd_none(pmd_t pmd)142*4882a593Smuzhiyun static inline int pmd_none(pmd_t pmd)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun return (pmd_val(pmd) == 0);
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
pmd_bad(pmd_t pmd)147*4882a593Smuzhiyun static inline int pmd_bad(pmd_t pmd)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun return !pmd_present(pmd);
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun #define pmd_leaf pmd_leaf
pmd_leaf(pmd_t pmd)153*4882a593Smuzhiyun static inline int pmd_leaf(pmd_t pmd)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun return pmd_present(pmd) &&
156*4882a593Smuzhiyun (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
set_pmd(pmd_t * pmdp,pmd_t pmd)159*4882a593Smuzhiyun static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun *pmdp = pmd;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
pmd_clear(pmd_t * pmdp)164*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *pmdp)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun set_pmd(pmdp, __pmd(0));
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
pfn_pgd(unsigned long pfn,pgprot_t prot)169*4882a593Smuzhiyun static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
_pgd_pfn(pgd_t pgd)174*4882a593Smuzhiyun static inline unsigned long _pgd_pfn(pgd_t pgd)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
pmd_page(pmd_t pmd)179*4882a593Smuzhiyun static inline struct page *pmd_page(pmd_t pmd)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
pmd_page_vaddr(pmd_t pmd)184*4882a593Smuzhiyun static inline unsigned long pmd_page_vaddr(pmd_t pmd)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /* Yields the page frame number (PFN) of a page table entry */
pte_pfn(pte_t pte)190*4882a593Smuzhiyun static inline unsigned long pte_pfn(pte_t pte)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun return (pte_val(pte) >> _PAGE_PFN_SHIFT);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun #define pte_page(x) pfn_to_page(pte_pfn(x))
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Constructs a page table entry */
pfn_pte(unsigned long pfn,pgprot_t prot)198*4882a593Smuzhiyun static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun #define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
204*4882a593Smuzhiyun
pte_present(pte_t pte)205*4882a593Smuzhiyun static inline int pte_present(pte_t pte)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
pte_none(pte_t pte)210*4882a593Smuzhiyun static inline int pte_none(pte_t pte)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun return (pte_val(pte) == 0);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
pte_write(pte_t pte)215*4882a593Smuzhiyun static inline int pte_write(pte_t pte)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun return pte_val(pte) & _PAGE_WRITE;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
pte_exec(pte_t pte)220*4882a593Smuzhiyun static inline int pte_exec(pte_t pte)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return pte_val(pte) & _PAGE_EXEC;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
pte_huge(pte_t pte)225*4882a593Smuzhiyun static inline int pte_huge(pte_t pte)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun return pte_present(pte)
228*4882a593Smuzhiyun && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
pte_dirty(pte_t pte)231*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun return pte_val(pte) & _PAGE_DIRTY;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
pte_young(pte_t pte)236*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun return pte_val(pte) & _PAGE_ACCESSED;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
pte_special(pte_t pte)241*4882a593Smuzhiyun static inline int pte_special(pte_t pte)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun return pte_val(pte) & _PAGE_SPECIAL;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* static inline pte_t pte_rdprotect(pte_t pte) */
247*4882a593Smuzhiyun
pte_wrprotect(pte_t pte)248*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun return __pte(pte_val(pte) & ~(_PAGE_WRITE));
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* static inline pte_t pte_mkread(pte_t pte) */
254*4882a593Smuzhiyun
pte_mkwrite(pte_t pte)255*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun return __pte(pte_val(pte) | _PAGE_WRITE);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* static inline pte_t pte_mkexec(pte_t pte) */
261*4882a593Smuzhiyun
pte_mkdirty(pte_t pte)262*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun return __pte(pte_val(pte) | _PAGE_DIRTY);
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun
pte_mkclean(pte_t pte)267*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
pte_mkyoung(pte_t pte)272*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun return __pte(pte_val(pte) | _PAGE_ACCESSED);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
pte_mkold(pte_t pte)277*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
pte_mkspecial(pte_t pte)282*4882a593Smuzhiyun static inline pte_t pte_mkspecial(pte_t pte)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun return __pte(pte_val(pte) | _PAGE_SPECIAL);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
pte_mkhuge(pte_t pte)287*4882a593Smuzhiyun static inline pte_t pte_mkhuge(pte_t pte)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun return pte;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun /* Modify page protection bits */
pte_modify(pte_t pte,pgprot_t newprot)293*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
294*4882a593Smuzhiyun {
295*4882a593Smuzhiyun return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun #define pgd_ERROR(e) \
299*4882a593Smuzhiyun pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Commit new configuration to MMU hardware */
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)303*4882a593Smuzhiyun static inline void update_mmu_cache(struct vm_area_struct *vma,
304*4882a593Smuzhiyun unsigned long address, pte_t *ptep)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun /*
307*4882a593Smuzhiyun * The kernel assumes that TLBs don't cache invalid entries, but
308*4882a593Smuzhiyun * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
309*4882a593Smuzhiyun * cache flush; it is necessary even after writing invalid entries.
310*4882a593Smuzhiyun * Relying on flush_tlb_fix_spurious_fault would suffice, but
311*4882a593Smuzhiyun * the extra traps reduce performance. So, eagerly SFENCE.VMA.
312*4882a593Smuzhiyun */
313*4882a593Smuzhiyun local_flush_tlb_page(address);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun #define __HAVE_ARCH_PTE_SAME
pte_same(pte_t pte_a,pte_t pte_b)317*4882a593Smuzhiyun static inline int pte_same(pte_t pte_a, pte_t pte_b)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun return pte_val(pte_a) == pte_val(pte_b);
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /*
323*4882a593Smuzhiyun * Certain architectures need to do special things when PTEs within
324*4882a593Smuzhiyun * a page table are directly modified. Thus, the following hook is
325*4882a593Smuzhiyun * made available.
326*4882a593Smuzhiyun */
set_pte(pte_t * ptep,pte_t pteval)327*4882a593Smuzhiyun static inline void set_pte(pte_t *ptep, pte_t pteval)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun *ptep = pteval;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun void flush_icache_pte(pte_t pte);
333*4882a593Smuzhiyun
set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t pteval)334*4882a593Smuzhiyun static inline void set_pte_at(struct mm_struct *mm,
335*4882a593Smuzhiyun unsigned long addr, pte_t *ptep, pte_t pteval)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun if (pte_present(pteval) && pte_exec(pteval))
338*4882a593Smuzhiyun flush_icache_pte(pteval);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun set_pte(ptep, pteval);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
pte_clear(struct mm_struct * mm,unsigned long addr,pte_t * ptep)343*4882a593Smuzhiyun static inline void pte_clear(struct mm_struct *mm,
344*4882a593Smuzhiyun unsigned long addr, pte_t *ptep)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun set_pte_at(mm, addr, ptep, __pte(0));
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)350*4882a593Smuzhiyun static inline int ptep_set_access_flags(struct vm_area_struct *vma,
351*4882a593Smuzhiyun unsigned long address, pte_t *ptep,
352*4882a593Smuzhiyun pte_t entry, int dirty)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun if (!pte_same(*ptep, entry))
355*4882a593Smuzhiyun set_pte_at(vma->vm_mm, address, ptep, entry);
356*4882a593Smuzhiyun /*
357*4882a593Smuzhiyun * update_mmu_cache will unconditionally execute, handling both
358*4882a593Smuzhiyun * the case that the PTE changed and the spurious fault case.
359*4882a593Smuzhiyun */
360*4882a593Smuzhiyun return true;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
ptep_get_and_clear(struct mm_struct * mm,unsigned long address,pte_t * ptep)364*4882a593Smuzhiyun static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
365*4882a593Smuzhiyun unsigned long address, pte_t *ptep)
366*4882a593Smuzhiyun {
367*4882a593Smuzhiyun return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
ptep_test_and_clear_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)371*4882a593Smuzhiyun static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
372*4882a593Smuzhiyun unsigned long address,
373*4882a593Smuzhiyun pte_t *ptep)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun if (!pte_young(*ptep))
376*4882a593Smuzhiyun return 0;
377*4882a593Smuzhiyun return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_SET_WRPROTECT
ptep_set_wrprotect(struct mm_struct * mm,unsigned long address,pte_t * ptep)381*4882a593Smuzhiyun static inline void ptep_set_wrprotect(struct mm_struct *mm,
382*4882a593Smuzhiyun unsigned long address, pte_t *ptep)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)388*4882a593Smuzhiyun static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
389*4882a593Smuzhiyun unsigned long address, pte_t *ptep)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun /*
392*4882a593Smuzhiyun * This comment is borrowed from x86, but applies equally to RISC-V:
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * Clearing the accessed bit without a TLB flush
395*4882a593Smuzhiyun * doesn't cause data corruption. [ It could cause incorrect
396*4882a593Smuzhiyun * page aging and the (mistaken) reclaim of hot pages, but the
397*4882a593Smuzhiyun * chance of that should be relatively low. ]
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * So as a performance optimization don't flush the TLB when
400*4882a593Smuzhiyun * clearing the accessed bit, it will eventually be flushed by
401*4882a593Smuzhiyun * a context switch or a VM operation anyway. [ In the rare
402*4882a593Smuzhiyun * event of it not getting flushed for a long time the delay
403*4882a593Smuzhiyun * shouldn't really matter because there's no real memory
404*4882a593Smuzhiyun * pressure for swapout to react to. ]
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun return ptep_test_and_clear_young(vma, address, ptep);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /*
410*4882a593Smuzhiyun * Encode and decode a swap entry
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * Format of swap PTE:
413*4882a593Smuzhiyun * bit 0: _PAGE_PRESENT (zero)
414*4882a593Smuzhiyun * bit 1: _PAGE_PROT_NONE (zero)
415*4882a593Smuzhiyun * bits 2 to 6: swap type
416*4882a593Smuzhiyun * bits 7 to XLEN-1: swap offset
417*4882a593Smuzhiyun */
418*4882a593Smuzhiyun #define __SWP_TYPE_SHIFT 2
419*4882a593Smuzhiyun #define __SWP_TYPE_BITS 5
420*4882a593Smuzhiyun #define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
421*4882a593Smuzhiyun #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun #define MAX_SWAPFILES_CHECK() \
424*4882a593Smuzhiyun BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
427*4882a593Smuzhiyun #define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
428*4882a593Smuzhiyun #define __swp_entry(type, offset) ((swp_entry_t) \
429*4882a593Smuzhiyun { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
432*4882a593Smuzhiyun #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * In the RV64 Linux scheme, we give the user half of the virtual-address space
436*4882a593Smuzhiyun * and give the kernel the other (upper) half.
437*4882a593Smuzhiyun */
438*4882a593Smuzhiyun #ifdef CONFIG_64BIT
439*4882a593Smuzhiyun #define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
440*4882a593Smuzhiyun #else
441*4882a593Smuzhiyun #define KERN_VIRT_START FIXADDR_START
442*4882a593Smuzhiyun #endif
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun /*
445*4882a593Smuzhiyun * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
446*4882a593Smuzhiyun * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
447*4882a593Smuzhiyun */
448*4882a593Smuzhiyun #ifdef CONFIG_64BIT
449*4882a593Smuzhiyun #define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
450*4882a593Smuzhiyun #else
451*4882a593Smuzhiyun #define TASK_SIZE FIXADDR_START
452*4882a593Smuzhiyun #endif
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun #else /* CONFIG_MMU */
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun #define PAGE_SHARED __pgprot(0)
457*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(0)
458*4882a593Smuzhiyun #define swapper_pg_dir NULL
459*4882a593Smuzhiyun #define TASK_SIZE 0xffffffffUL
460*4882a593Smuzhiyun #define VMALLOC_START 0
461*4882a593Smuzhiyun #define VMALLOC_END TASK_SIZE
462*4882a593Smuzhiyun
__kernel_map_pages(struct page * page,int numpages,int enable)463*4882a593Smuzhiyun static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun #endif /* !CONFIG_MMU */
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun #define kern_addr_valid(addr) (1) /* FIXME */
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun extern void *dtb_early_va;
470*4882a593Smuzhiyun extern uintptr_t dtb_early_pa;
471*4882a593Smuzhiyun void setup_bootmem(void);
472*4882a593Smuzhiyun void paging_init(void);
473*4882a593Smuzhiyun void misc_mem_init(void);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun #define FIRST_USER_ADDRESS 0
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /*
478*4882a593Smuzhiyun * ZERO_PAGE is a global shared page that is always zero,
479*4882a593Smuzhiyun * used for zero-mapped memory areas, etc.
480*4882a593Smuzhiyun */
481*4882a593Smuzhiyun extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
482*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun #endif /* _ASM_RISCV_PGTABLE_H */
487