1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _M68K_PAGE_MM_H
3*4882a593Smuzhiyun #define _M68K_PAGE_MM_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #ifndef __ASSEMBLY__
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/compiler.h>
8*4882a593Smuzhiyun #include <asm/module.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /*
11*4882a593Smuzhiyun * We don't need to check for alignment etc.
12*4882a593Smuzhiyun */
13*4882a593Smuzhiyun #ifdef CPU_M68040_OR_M68060_ONLY
copy_page(void * to,void * from)14*4882a593Smuzhiyun static inline void copy_page(void *to, void *from)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun unsigned long tmp;
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun __asm__ __volatile__("1:\t"
19*4882a593Smuzhiyun ".chip 68040\n\t"
20*4882a593Smuzhiyun "move16 %1@+,%0@+\n\t"
21*4882a593Smuzhiyun "move16 %1@+,%0@+\n\t"
22*4882a593Smuzhiyun ".chip 68k\n\t"
23*4882a593Smuzhiyun "dbra %2,1b\n\t"
24*4882a593Smuzhiyun : "=a" (to), "=a" (from), "=d" (tmp)
25*4882a593Smuzhiyun : "0" (to), "1" (from) , "2" (PAGE_SIZE / 32 - 1)
26*4882a593Smuzhiyun );
27*4882a593Smuzhiyun }
28*4882a593Smuzhiyun
clear_page(void * page)29*4882a593Smuzhiyun static inline void clear_page(void *page)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun unsigned long tmp;
32*4882a593Smuzhiyun unsigned long *sp = page;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun *sp++ = 0;
35*4882a593Smuzhiyun *sp++ = 0;
36*4882a593Smuzhiyun *sp++ = 0;
37*4882a593Smuzhiyun *sp++ = 0;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun __asm__ __volatile__("1:\t"
40*4882a593Smuzhiyun ".chip 68040\n\t"
41*4882a593Smuzhiyun "move16 %2@+,%0@+\n\t"
42*4882a593Smuzhiyun ".chip 68k\n\t"
43*4882a593Smuzhiyun "subqw #8,%2\n\t"
44*4882a593Smuzhiyun "subqw #8,%2\n\t"
45*4882a593Smuzhiyun "dbra %1,1b\n\t"
46*4882a593Smuzhiyun : "=a" (sp), "=d" (tmp)
47*4882a593Smuzhiyun : "a" (page), "0" (sp),
48*4882a593Smuzhiyun "1" ((PAGE_SIZE - 16) / 16 - 1));
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #else
52*4882a593Smuzhiyun #define clear_page(page) memset((page), 0, PAGE_SIZE)
53*4882a593Smuzhiyun #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define clear_user_page(addr, vaddr, page) \
57*4882a593Smuzhiyun do { clear_page(addr); \
58*4882a593Smuzhiyun flush_dcache_page(page); \
59*4882a593Smuzhiyun } while (0)
60*4882a593Smuzhiyun #define copy_user_page(to, from, vaddr, page) \
61*4882a593Smuzhiyun do { copy_page(to, from); \
62*4882a593Smuzhiyun flush_dcache_page(page); \
63*4882a593Smuzhiyun } while (0)
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun extern unsigned long m68k_memoffset;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun #ifndef CONFIG_SUN3
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun #define WANT_PAGE_VIRTUAL
70*4882a593Smuzhiyun
___pa(void * vaddr)71*4882a593Smuzhiyun static inline unsigned long ___pa(void *vaddr)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun unsigned long paddr;
74*4882a593Smuzhiyun asm (
75*4882a593Smuzhiyun "1: addl #0,%0\n"
76*4882a593Smuzhiyun m68k_fixup(%c2, 1b+2)
77*4882a593Smuzhiyun : "=r" (paddr)
78*4882a593Smuzhiyun : "0" (vaddr), "i" (m68k_fixup_memoffset));
79*4882a593Smuzhiyun return paddr;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun #define __pa(vaddr) ___pa((void *)(long)(vaddr))
__va(unsigned long paddr)82*4882a593Smuzhiyun static inline void *__va(unsigned long paddr)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun void *vaddr;
85*4882a593Smuzhiyun asm (
86*4882a593Smuzhiyun "1: subl #0,%0\n"
87*4882a593Smuzhiyun m68k_fixup(%c2, 1b+2)
88*4882a593Smuzhiyun : "=r" (vaddr)
89*4882a593Smuzhiyun : "0" (paddr), "i" (m68k_fixup_memoffset));
90*4882a593Smuzhiyun return vaddr;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun #else /* !CONFIG_SUN3 */
94*4882a593Smuzhiyun /* This #define is a horrible hack to suppress lots of warnings. --m */
95*4882a593Smuzhiyun #define __pa(x) ___pa((unsigned long)(x))
___pa(unsigned long x)96*4882a593Smuzhiyun static inline unsigned long ___pa(unsigned long x)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun if(x == 0)
99*4882a593Smuzhiyun return 0;
100*4882a593Smuzhiyun if(x >= PAGE_OFFSET)
101*4882a593Smuzhiyun return (x-PAGE_OFFSET);
102*4882a593Smuzhiyun else
103*4882a593Smuzhiyun return (x+0x2000000);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
__va(unsigned long x)106*4882a593Smuzhiyun static inline void *__va(unsigned long x)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun if(x == 0)
109*4882a593Smuzhiyun return (void *)0;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun if(x < 0x2000000)
112*4882a593Smuzhiyun return (void *)(x+PAGE_OFFSET);
113*4882a593Smuzhiyun else
114*4882a593Smuzhiyun return (void *)(x-0x2000000);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun #endif /* CONFIG_SUN3 */
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /*
119*4882a593Smuzhiyun * NOTE: virtual isn't really correct, actually it should be the offset into the
120*4882a593Smuzhiyun * memory node, but we have no highmem, so that works for now.
121*4882a593Smuzhiyun * TODO: implement (fast) pfn<->pgdat_idx conversion functions, this makes lots
122*4882a593Smuzhiyun * of the shifts unnecessary.
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun #define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
125*4882a593Smuzhiyun #define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun extern int m68k_virt_to_node_shift;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun #ifdef CONFIG_SINGLE_MEMORY_CHUNK
130*4882a593Smuzhiyun #define __virt_to_node(addr) (&pg_data_map[0])
131*4882a593Smuzhiyun #else
132*4882a593Smuzhiyun extern struct pglist_data *pg_data_table[];
133*4882a593Smuzhiyun
__virt_to_node_shift(void)134*4882a593Smuzhiyun static inline __attribute_const__ int __virt_to_node_shift(void)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun int shift;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun asm (
139*4882a593Smuzhiyun "1: moveq #0,%0\n"
140*4882a593Smuzhiyun m68k_fixup(%c1, 1b)
141*4882a593Smuzhiyun : "=d" (shift)
142*4882a593Smuzhiyun : "i" (m68k_fixup_vnode_shift));
143*4882a593Smuzhiyun return shift;
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #define __virt_to_node(addr) (pg_data_table[(unsigned long)(addr) >> __virt_to_node_shift()])
147*4882a593Smuzhiyun #endif
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #define virt_to_page(addr) ({ \
150*4882a593Smuzhiyun pfn_to_page(virt_to_pfn(addr)); \
151*4882a593Smuzhiyun })
152*4882a593Smuzhiyun #define page_to_virt(page) ({ \
153*4882a593Smuzhiyun pfn_to_virt(page_to_pfn(page)); \
154*4882a593Smuzhiyun })
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun #define pfn_to_page(pfn) ({ \
157*4882a593Smuzhiyun unsigned long __pfn = (pfn); \
158*4882a593Smuzhiyun struct pglist_data *pgdat; \
159*4882a593Smuzhiyun pgdat = __virt_to_node((unsigned long)pfn_to_virt(__pfn)); \
160*4882a593Smuzhiyun pgdat->node_mem_map + (__pfn - pgdat->node_start_pfn); \
161*4882a593Smuzhiyun })
162*4882a593Smuzhiyun #define page_to_pfn(_page) ({ \
163*4882a593Smuzhiyun const struct page *__p = (_page); \
164*4882a593Smuzhiyun struct pglist_data *pgdat; \
165*4882a593Smuzhiyun pgdat = &pg_data_map[page_to_nid(__p)]; \
166*4882a593Smuzhiyun ((__p) - pgdat->node_mem_map) + pgdat->node_start_pfn; \
167*4882a593Smuzhiyun })
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun #define virt_addr_valid(kaddr) ((void *)(kaddr) >= (void *)PAGE_OFFSET && (void *)(kaddr) < high_memory)
170*4882a593Smuzhiyun #define pfn_valid(pfn) virt_addr_valid(pfn_to_virt(pfn))
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #endif /* __ASSEMBLY__ */
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #endif /* _M68K_PAGE_MM_H */
175