1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #ifndef __ASM_CSKY_PGTABLE_H
5*4882a593Smuzhiyun #define __ASM_CSKY_PGTABLE_H
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <asm/fixmap.h>
8*4882a593Smuzhiyun #include <asm/memory.h>
9*4882a593Smuzhiyun #include <asm/addrspace.h>
10*4882a593Smuzhiyun #include <abi/pgtable-bits.h>
11*4882a593Smuzhiyun #include <asm-generic/pgtable-nopmd.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #define PGDIR_SHIFT 22
14*4882a593Smuzhiyun #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
15*4882a593Smuzhiyun #define PGDIR_MASK (~(PGDIR_SIZE-1))
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
18*4882a593Smuzhiyun #define FIRST_USER_ADDRESS 0UL
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /*
21*4882a593Smuzhiyun * C-SKY is two-level paging structure:
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun #define PGD_ORDER 0
24*4882a593Smuzhiyun #define PTE_ORDER 0
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t))
27*4882a593Smuzhiyun #define PTRS_PER_PMD 1
28*4882a593Smuzhiyun #define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define pte_ERROR(e) \
31*4882a593Smuzhiyun pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, (e).pte_low)
32*4882a593Smuzhiyun #define pgd_ERROR(e) \
33*4882a593Smuzhiyun pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define pmd_page(pmd) (pfn_to_page(pmd_phys(pmd) >> PAGE_SHIFT))
36*4882a593Smuzhiyun #define pte_clear(mm, addr, ptep) set_pte((ptep), \
37*4882a593Smuzhiyun (((unsigned int) addr & PAGE_OFFSET) ? __pte(_PAGE_GLOBAL) : __pte(0)))
38*4882a593Smuzhiyun #define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
39*4882a593Smuzhiyun #define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
40*4882a593Smuzhiyun #define pte_pfn(x) ((unsigned long)((x).pte_low >> PAGE_SHIFT))
41*4882a593Smuzhiyun #define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << PAGE_SHIFT) \
42*4882a593Smuzhiyun | pgprot_val(prot))
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define __READABLE (_PAGE_READ | _PAGE_VALID | _PAGE_ACCESSED)
45*4882a593Smuzhiyun #define __WRITEABLE (_PAGE_WRITE | _PAGE_DIRTY | _PAGE_MODIFIED)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED | \
48*4882a593Smuzhiyun _CACHE_MASK)
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun #define __swp_type(x) (((x).val >> 4) & 0xff)
51*4882a593Smuzhiyun #define __swp_offset(x) ((x).val >> 12)
52*4882a593Smuzhiyun #define __swp_entry(type, offset) ((swp_entry_t) {((type) << 4) | \
53*4882a593Smuzhiyun ((offset) << 12) })
54*4882a593Smuzhiyun #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
55*4882a593Smuzhiyun #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define pte_page(x) pfn_to_page(pte_pfn(x))
58*4882a593Smuzhiyun #define __mk_pte(page_nr, pgprot) __pte(((page_nr) << PAGE_SHIFT) | \
59*4882a593Smuzhiyun pgprot_val(pgprot))
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * CSKY can't do page protection for execute, and considers that the same like
63*4882a593Smuzhiyun * read. Also, write permissions imply read permissions. This is the closest
64*4882a593Smuzhiyun * we can get by reasonable means..
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun #define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHED)
67*4882a593Smuzhiyun #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
68*4882a593Smuzhiyun _CACHE_CACHED)
69*4882a593Smuzhiyun #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
70*4882a593Smuzhiyun #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | _CACHE_CACHED)
71*4882a593Smuzhiyun #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
72*4882a593Smuzhiyun _PAGE_GLOBAL | _CACHE_CACHED)
73*4882a593Smuzhiyun #define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
74*4882a593Smuzhiyun _CACHE_CACHED)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun #define _PAGE_IOREMAP \
77*4882a593Smuzhiyun (_PAGE_PRESENT | __READABLE | __WRITEABLE | _PAGE_GLOBAL | \
78*4882a593Smuzhiyun _CACHE_UNCACHED | _PAGE_SO)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define __P000 PAGE_NONE
81*4882a593Smuzhiyun #define __P001 PAGE_READONLY
82*4882a593Smuzhiyun #define __P010 PAGE_COPY
83*4882a593Smuzhiyun #define __P011 PAGE_COPY
84*4882a593Smuzhiyun #define __P100 PAGE_READONLY
85*4882a593Smuzhiyun #define __P101 PAGE_READONLY
86*4882a593Smuzhiyun #define __P110 PAGE_COPY
87*4882a593Smuzhiyun #define __P111 PAGE_COPY
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #define __S000 PAGE_NONE
90*4882a593Smuzhiyun #define __S001 PAGE_READONLY
91*4882a593Smuzhiyun #define __S010 PAGE_SHARED
92*4882a593Smuzhiyun #define __S011 PAGE_SHARED
93*4882a593Smuzhiyun #define __S100 PAGE_READONLY
94*4882a593Smuzhiyun #define __S101 PAGE_READONLY
95*4882a593Smuzhiyun #define __S110 PAGE_SHARED
96*4882a593Smuzhiyun #define __S111 PAGE_SHARED
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
99*4882a593Smuzhiyun #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun extern void load_pgd(unsigned long pg_dir);
102*4882a593Smuzhiyun extern pte_t invalid_pte_table[PTRS_PER_PTE];
103*4882a593Smuzhiyun
set_pte(pte_t * p,pte_t pte)104*4882a593Smuzhiyun static inline void set_pte(pte_t *p, pte_t pte)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun *p = pte;
107*4882a593Smuzhiyun #if defined(CONFIG_CPU_NEED_TLBSYNC)
108*4882a593Smuzhiyun dcache_wb_line((u32)p);
109*4882a593Smuzhiyun #endif
110*4882a593Smuzhiyun /* prevent out of order excution */
111*4882a593Smuzhiyun smp_mb();
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun #define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
114*4882a593Smuzhiyun
pmd_page_vaddr(pmd_t pmd)115*4882a593Smuzhiyun static inline pte_t *pmd_page_vaddr(pmd_t pmd)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun unsigned long ptr;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun ptr = pmd_val(pmd);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun return __va(ptr);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun #define pmd_phys(pmd) pmd_val(pmd)
125*4882a593Smuzhiyun
set_pmd(pmd_t * p,pmd_t pmd)126*4882a593Smuzhiyun static inline void set_pmd(pmd_t *p, pmd_t pmd)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun *p = pmd;
129*4882a593Smuzhiyun #if defined(CONFIG_CPU_NEED_TLBSYNC)
130*4882a593Smuzhiyun dcache_wb_line((u32)p);
131*4882a593Smuzhiyun #endif
132*4882a593Smuzhiyun /* prevent specul excute */
133*4882a593Smuzhiyun smp_mb();
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun
pmd_none(pmd_t pmd)137*4882a593Smuzhiyun static inline int pmd_none(pmd_t pmd)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun return pmd_val(pmd) == __pa(invalid_pte_table);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun #define pmd_bad(pmd) (pmd_val(pmd) & ~PAGE_MASK)
143*4882a593Smuzhiyun
pmd_present(pmd_t pmd)144*4882a593Smuzhiyun static inline int pmd_present(pmd_t pmd)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun return (pmd_val(pmd) != __pa(invalid_pte_table));
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
pmd_clear(pmd_t * p)149*4882a593Smuzhiyun static inline void pmd_clear(pmd_t *p)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun pmd_val(*p) = (__pa(invalid_pte_table));
152*4882a593Smuzhiyun #if defined(CONFIG_CPU_NEED_TLBSYNC)
153*4882a593Smuzhiyun dcache_wb_line((u32)p);
154*4882a593Smuzhiyun #endif
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /*
158*4882a593Smuzhiyun * The following only work if pte_present() is true.
159*4882a593Smuzhiyun * Undefined behaviour if not..
160*4882a593Smuzhiyun */
pte_read(pte_t pte)161*4882a593Smuzhiyun static inline int pte_read(pte_t pte)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return pte.pte_low & _PAGE_READ;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
pte_write(pte_t pte)166*4882a593Smuzhiyun static inline int pte_write(pte_t pte)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun return (pte).pte_low & _PAGE_WRITE;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
pte_dirty(pte_t pte)171*4882a593Smuzhiyun static inline int pte_dirty(pte_t pte)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun return (pte).pte_low & _PAGE_MODIFIED;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
pte_young(pte_t pte)176*4882a593Smuzhiyun static inline int pte_young(pte_t pte)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun return (pte).pte_low & _PAGE_ACCESSED;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
pte_wrprotect(pte_t pte)181*4882a593Smuzhiyun static inline pte_t pte_wrprotect(pte_t pte)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_WRITE | _PAGE_DIRTY);
184*4882a593Smuzhiyun return pte;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
pte_mkclean(pte_t pte)187*4882a593Smuzhiyun static inline pte_t pte_mkclean(pte_t pte)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_DIRTY);
190*4882a593Smuzhiyun return pte;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
pte_mkold(pte_t pte)193*4882a593Smuzhiyun static inline pte_t pte_mkold(pte_t pte)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_VALID);
196*4882a593Smuzhiyun return pte;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
pte_mkwrite(pte_t pte)199*4882a593Smuzhiyun static inline pte_t pte_mkwrite(pte_t pte)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun pte_val(pte) |= _PAGE_WRITE;
202*4882a593Smuzhiyun if (pte_val(pte) & _PAGE_MODIFIED)
203*4882a593Smuzhiyun pte_val(pte) |= _PAGE_DIRTY;
204*4882a593Smuzhiyun return pte;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
pte_mkdirty(pte_t pte)207*4882a593Smuzhiyun static inline pte_t pte_mkdirty(pte_t pte)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun pte_val(pte) |= _PAGE_MODIFIED;
210*4882a593Smuzhiyun if (pte_val(pte) & _PAGE_WRITE)
211*4882a593Smuzhiyun pte_val(pte) |= _PAGE_DIRTY;
212*4882a593Smuzhiyun return pte;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
pte_mkyoung(pte_t pte)215*4882a593Smuzhiyun static inline pte_t pte_mkyoung(pte_t pte)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun pte_val(pte) |= _PAGE_ACCESSED;
218*4882a593Smuzhiyun if (pte_val(pte) & _PAGE_READ)
219*4882a593Smuzhiyun pte_val(pte) |= _PAGE_VALID;
220*4882a593Smuzhiyun return pte;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun #define __HAVE_PHYS_MEM_ACCESS_PROT
224*4882a593Smuzhiyun struct file;
225*4882a593Smuzhiyun extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
226*4882a593Smuzhiyun unsigned long size, pgprot_t vma_prot);
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /*
229*4882a593Smuzhiyun * Macro to make mark a page protection value as "uncacheable". Note
230*4882a593Smuzhiyun * that "protection" is really a misnomer here as the protection value
231*4882a593Smuzhiyun * contains the memory attribute bits, dirty bits, and various other
232*4882a593Smuzhiyun * bits as well.
233*4882a593Smuzhiyun */
234*4882a593Smuzhiyun #define pgprot_noncached pgprot_noncached
235*4882a593Smuzhiyun
pgprot_noncached(pgprot_t _prot)236*4882a593Smuzhiyun static inline pgprot_t pgprot_noncached(pgprot_t _prot)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun unsigned long prot = pgprot_val(_prot);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED | _PAGE_SO;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun return __pgprot(prot);
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun #define pgprot_writecombine pgprot_writecombine
pgprot_writecombine(pgprot_t _prot)246*4882a593Smuzhiyun static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun unsigned long prot = pgprot_val(_prot);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun prot = (prot & ~_CACHE_MASK) | _CACHE_UNCACHED;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun return __pgprot(prot);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /*
256*4882a593Smuzhiyun * Conversion functions: convert a page and protection to a page entry,
257*4882a593Smuzhiyun * and a page entry and page directory to the page they refer to.
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
pte_modify(pte_t pte,pgprot_t newprot)260*4882a593Smuzhiyun static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
263*4882a593Smuzhiyun (pgprot_val(newprot)));
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
267*4882a593Smuzhiyun extern void paging_init(void);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
270*4882a593Smuzhiyun pte_t *pte);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
273*4882a593Smuzhiyun #define kern_addr_valid(addr) (1)
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
276*4882a593Smuzhiyun remap_pfn_range(vma, vaddr, pfn, size, prot)
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun #endif /* __ASM_CSKY_PGTABLE_H */
279