xref: /OK3568_Linux_fs/kernel/arch/m68k/include/asm/pgtable_mm.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _M68K_PGTABLE_H
3*4882a593Smuzhiyun #define _M68K_PGTABLE_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #if defined(CONFIG_SUN3) || defined(CONFIG_COLDFIRE)
7*4882a593Smuzhiyun #include <asm-generic/pgtable-nopmd.h>
8*4882a593Smuzhiyun #else
9*4882a593Smuzhiyun #include <asm-generic/pgtable-nopud.h>
10*4882a593Smuzhiyun #endif
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/setup.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #ifndef __ASSEMBLY__
15*4882a593Smuzhiyun #include <asm/processor.h>
16*4882a593Smuzhiyun #include <linux/sched.h>
17*4882a593Smuzhiyun #include <linux/threads.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * This file contains the functions and defines necessary to modify and use
21*4882a593Smuzhiyun  * the m68k page table tree.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/virtconvert.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun /* Certain architectures need to do special things when pte's
27*4882a593Smuzhiyun  * within a page table are directly modified.  Thus, the following
28*4882a593Smuzhiyun  * hook is made available.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun #define set_pte(pteptr, pteval)					\
31*4882a593Smuzhiyun 	do{							\
32*4882a593Smuzhiyun 		*(pteptr) = (pteval);				\
33*4882a593Smuzhiyun 	} while(0)
34*4882a593Smuzhiyun #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun /* PMD_SHIFT determines the size of the area a second-level page table can map */
38*4882a593Smuzhiyun #if CONFIG_PGTABLE_LEVELS == 3
39*4882a593Smuzhiyun #define PMD_SHIFT	18
40*4882a593Smuzhiyun #endif
41*4882a593Smuzhiyun #define PMD_SIZE	(1UL << PMD_SHIFT)
42*4882a593Smuzhiyun #define PMD_MASK	(~(PMD_SIZE-1))
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* PGDIR_SHIFT determines what a third-level page table entry can map */
45*4882a593Smuzhiyun #ifdef CONFIG_SUN3
46*4882a593Smuzhiyun #define PGDIR_SHIFT     17
47*4882a593Smuzhiyun #elif defined(CONFIG_COLDFIRE)
48*4882a593Smuzhiyun #define PGDIR_SHIFT     22
49*4882a593Smuzhiyun #else
50*4882a593Smuzhiyun #define PGDIR_SHIFT	25
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
53*4882a593Smuzhiyun #define PGDIR_MASK	(~(PGDIR_SIZE-1))
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * entries per page directory level: the m68k is configured as three-level,
57*4882a593Smuzhiyun  * so we do have PMD level physically.
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun #ifdef CONFIG_SUN3
60*4882a593Smuzhiyun #define PTRS_PER_PTE   16
61*4882a593Smuzhiyun #define __PAGETABLE_PMD_FOLDED 1
62*4882a593Smuzhiyun #define PTRS_PER_PMD   1
63*4882a593Smuzhiyun #define PTRS_PER_PGD   2048
64*4882a593Smuzhiyun #elif defined(CONFIG_COLDFIRE)
65*4882a593Smuzhiyun #define PTRS_PER_PTE	512
66*4882a593Smuzhiyun #define __PAGETABLE_PMD_FOLDED 1
67*4882a593Smuzhiyun #define PTRS_PER_PMD	1
68*4882a593Smuzhiyun #define PTRS_PER_PGD	1024
69*4882a593Smuzhiyun #else
70*4882a593Smuzhiyun #define PTRS_PER_PTE	64
71*4882a593Smuzhiyun #define PTRS_PER_PMD	128
72*4882a593Smuzhiyun #define PTRS_PER_PGD	128
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun #define USER_PTRS_PER_PGD	(TASK_SIZE/PGDIR_SIZE)
75*4882a593Smuzhiyun #define FIRST_USER_ADDRESS	0UL
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* Virtual address region for use by kernel_map() */
78*4882a593Smuzhiyun #ifdef CONFIG_SUN3
79*4882a593Smuzhiyun #define KMAP_START	0x0dc00000
80*4882a593Smuzhiyun #define KMAP_END	0x0e000000
81*4882a593Smuzhiyun #elif defined(CONFIG_COLDFIRE)
82*4882a593Smuzhiyun #define KMAP_START	0xe0000000
83*4882a593Smuzhiyun #define KMAP_END	0xf0000000
84*4882a593Smuzhiyun #else
85*4882a593Smuzhiyun #define	KMAP_START	0xd0000000
86*4882a593Smuzhiyun #define	KMAP_END	0xf0000000
87*4882a593Smuzhiyun #endif
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifdef CONFIG_SUN3
90*4882a593Smuzhiyun extern unsigned long m68k_vmalloc_end;
91*4882a593Smuzhiyun #define VMALLOC_START 0x0f800000
92*4882a593Smuzhiyun #define VMALLOC_END m68k_vmalloc_end
93*4882a593Smuzhiyun #elif defined(CONFIG_COLDFIRE)
94*4882a593Smuzhiyun #define VMALLOC_START	0xd0000000
95*4882a593Smuzhiyun #define VMALLOC_END	0xe0000000
96*4882a593Smuzhiyun #else
97*4882a593Smuzhiyun /* Just any arbitrary offset to the start of the vmalloc VM area: the
98*4882a593Smuzhiyun  * current 8MB value just means that there will be a 8MB "hole" after the
99*4882a593Smuzhiyun  * physical memory until the kernel virtual memory starts.  That means that
100*4882a593Smuzhiyun  * any out-of-bounds memory accesses will hopefully be caught.
101*4882a593Smuzhiyun  * The vmalloc() routines leaves a hole of 4kB between each vmalloced
102*4882a593Smuzhiyun  * area for the same reason. ;)
103*4882a593Smuzhiyun  */
104*4882a593Smuzhiyun #define VMALLOC_OFFSET	(8*1024*1024)
105*4882a593Smuzhiyun #define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
106*4882a593Smuzhiyun #define VMALLOC_END KMAP_START
107*4882a593Smuzhiyun #endif
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* zero page used for uninitialized stuff */
110*4882a593Smuzhiyun extern void *empty_zero_page;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun  * ZERO_PAGE is a global shared page that is always zero: used
114*4882a593Smuzhiyun  * for zero-mapped memory areas etc..
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun #define ZERO_PAGE(vaddr)	(virt_to_page(empty_zero_page))
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /* number of bits that fit into a memory pointer */
119*4882a593Smuzhiyun #define BITS_PER_PTR			(8*sizeof(unsigned long))
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* to align the pointer to a pointer address */
122*4882a593Smuzhiyun #define PTR_MASK			(~(sizeof(void*)-1))
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /* sizeof(void*)==1<<SIZEOF_PTR_LOG2 */
125*4882a593Smuzhiyun /* 64-bit machines, beware!  SRB. */
126*4882a593Smuzhiyun #define SIZEOF_PTR_LOG2			       2
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun extern void kernel_set_cachemode(void *addr, unsigned long size, int cmode);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun  * The m68k doesn't have any external MMU info: the kernel page
132*4882a593Smuzhiyun  * tables contain all the necessary information.  The Sun3 does, but
133*4882a593Smuzhiyun  * they are updated on demand.
134*4882a593Smuzhiyun  */
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)135*4882a593Smuzhiyun static inline void update_mmu_cache(struct vm_area_struct *vma,
136*4882a593Smuzhiyun 				    unsigned long address, pte_t *ptep)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #define kern_addr_valid(addr)	(1)
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* MMU-specific headers */
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #ifdef CONFIG_SUN3
147*4882a593Smuzhiyun #include <asm/sun3_pgtable.h>
148*4882a593Smuzhiyun #elif defined(CONFIG_COLDFIRE)
149*4882a593Smuzhiyun #include <asm/mcf_pgtable.h>
150*4882a593Smuzhiyun #else
151*4882a593Smuzhiyun #include <asm/motorola_pgtable.h>
152*4882a593Smuzhiyun #endif
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun #ifndef __ASSEMBLY__
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun  * Macro to mark a page protection value as "uncacheable".
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun #ifdef CONFIG_COLDFIRE
159*4882a593Smuzhiyun # define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | CF_PAGE_NOCACHE))
160*4882a593Smuzhiyun #else
161*4882a593Smuzhiyun #ifdef SUN3_PAGE_NOCACHE
162*4882a593Smuzhiyun # define __SUN3_PAGE_NOCACHE	SUN3_PAGE_NOCACHE
163*4882a593Smuzhiyun #else
164*4882a593Smuzhiyun # define __SUN3_PAGE_NOCACHE	0
165*4882a593Smuzhiyun #endif
166*4882a593Smuzhiyun #define pgprot_noncached(prot)							\
167*4882a593Smuzhiyun 	(MMU_IS_SUN3								\
168*4882a593Smuzhiyun 	 ? (__pgprot(pgprot_val(prot) | __SUN3_PAGE_NOCACHE))			\
169*4882a593Smuzhiyun 	 : ((MMU_IS_851 || MMU_IS_030)						\
170*4882a593Smuzhiyun 	    ? (__pgprot(pgprot_val(prot) | _PAGE_NOCACHE030))			\
171*4882a593Smuzhiyun 	    : (MMU_IS_040 || MMU_IS_060)					\
172*4882a593Smuzhiyun 	    ? (__pgprot((pgprot_val(prot) & _CACHEMASK040) | _PAGE_NOCACHE_S))	\
173*4882a593Smuzhiyun 	    : (prot)))
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun pgprot_t pgprot_dmacoherent(pgprot_t prot);
176*4882a593Smuzhiyun #define pgprot_dmacoherent(prot)	pgprot_dmacoherent(prot)
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #endif /* CONFIG_COLDFIRE */
179*4882a593Smuzhiyun #endif /* !__ASSEMBLY__ */
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun #endif /* _M68K_PGTABLE_H */
182