1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/m68k/sun3/sun3dvma.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2000 Sam Creasey
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Contains common routines for sun3/sun3x DVMA management.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/memblock.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/gfp.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <asm/page.h>
19*4882a593Smuzhiyun #include <asm/dvma.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #undef DVMA_DEBUG
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #ifdef CONFIG_SUN3X
24*4882a593Smuzhiyun extern void dvma_unmap_iommu(unsigned long baddr, int len);
25*4882a593Smuzhiyun #else
dvma_unmap_iommu(unsigned long a,int b)26*4882a593Smuzhiyun static inline void dvma_unmap_iommu(unsigned long a, int b)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #ifdef CONFIG_SUN3
32*4882a593Smuzhiyun extern void sun3_dvma_init(void);
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static unsigned long *iommu_use;
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT)
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)])
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun struct hole {
42*4882a593Smuzhiyun unsigned long start;
43*4882a593Smuzhiyun unsigned long end;
44*4882a593Smuzhiyun unsigned long size;
45*4882a593Smuzhiyun struct list_head list;
46*4882a593Smuzhiyun };
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static struct list_head hole_list;
49*4882a593Smuzhiyun static struct list_head hole_cache;
50*4882a593Smuzhiyun static struct hole initholes[64];
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #ifdef DVMA_DEBUG
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun static unsigned long dvma_allocs;
55*4882a593Smuzhiyun static unsigned long dvma_frees;
56*4882a593Smuzhiyun static unsigned long long dvma_alloc_bytes;
57*4882a593Smuzhiyun static unsigned long long dvma_free_bytes;
58*4882a593Smuzhiyun
print_use(void)59*4882a593Smuzhiyun static void print_use(void)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun int i;
63*4882a593Smuzhiyun int j = 0;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun pr_info("dvma entry usage:\n");
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) {
68*4882a593Smuzhiyun if(!iommu_use[i])
69*4882a593Smuzhiyun continue;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun j++;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun pr_info("dvma entry: %08x len %08lx\n",
74*4882a593Smuzhiyun (i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun pr_info("%d entries in use total\n", j);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun pr_info("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees);
80*4882a593Smuzhiyun pr_info("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes,
81*4882a593Smuzhiyun dvma_free_bytes);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
print_holes(struct list_head * holes)84*4882a593Smuzhiyun static void print_holes(struct list_head *holes)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct list_head *cur;
88*4882a593Smuzhiyun struct hole *hole;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun pr_info("listing dvma holes\n");
91*4882a593Smuzhiyun list_for_each(cur, holes) {
92*4882a593Smuzhiyun hole = list_entry(cur, struct hole, list);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if((hole->start == 0) && (hole->end == 0) && (hole->size == 0))
95*4882a593Smuzhiyun continue;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun pr_info("hole: start %08lx end %08lx size %08lx\n",
98*4882a593Smuzhiyun hole->start, hole->end, hole->size);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun pr_info("end of hole listing...\n");
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun #endif /* DVMA_DEBUG */
104*4882a593Smuzhiyun
refill(void)105*4882a593Smuzhiyun static inline int refill(void)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct hole *hole;
109*4882a593Smuzhiyun struct hole *prev = NULL;
110*4882a593Smuzhiyun struct list_head *cur;
111*4882a593Smuzhiyun int ret = 0;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun list_for_each(cur, &hole_list) {
114*4882a593Smuzhiyun hole = list_entry(cur, struct hole, list);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if(!prev) {
117*4882a593Smuzhiyun prev = hole;
118*4882a593Smuzhiyun continue;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if(hole->end == prev->start) {
122*4882a593Smuzhiyun hole->size += prev->size;
123*4882a593Smuzhiyun hole->end = prev->end;
124*4882a593Smuzhiyun list_move(&(prev->list), &hole_cache);
125*4882a593Smuzhiyun ret++;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun return ret;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
rmcache(void)133*4882a593Smuzhiyun static inline struct hole *rmcache(void)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct hole *ret;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if(list_empty(&hole_cache)) {
138*4882a593Smuzhiyun if(!refill()) {
139*4882a593Smuzhiyun pr_crit("out of dvma hole cache!\n");
140*4882a593Smuzhiyun BUG();
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun ret = list_entry(hole_cache.next, struct hole, list);
145*4882a593Smuzhiyun list_del(&(ret->list));
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun return ret;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
get_baddr(int len,unsigned long align)151*4882a593Smuzhiyun static inline unsigned long get_baddr(int len, unsigned long align)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun struct list_head *cur;
155*4882a593Smuzhiyun struct hole *hole;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if(list_empty(&hole_list)) {
158*4882a593Smuzhiyun #ifdef DVMA_DEBUG
159*4882a593Smuzhiyun pr_crit("out of dvma holes! (printing hole cache)\n");
160*4882a593Smuzhiyun print_holes(&hole_cache);
161*4882a593Smuzhiyun print_use();
162*4882a593Smuzhiyun #endif
163*4882a593Smuzhiyun BUG();
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun list_for_each(cur, &hole_list) {
167*4882a593Smuzhiyun unsigned long newlen;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun hole = list_entry(cur, struct hole, list);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if(align > DVMA_PAGE_SIZE)
172*4882a593Smuzhiyun newlen = len + ((hole->end - len) & (align-1));
173*4882a593Smuzhiyun else
174*4882a593Smuzhiyun newlen = len;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if(hole->size > newlen) {
177*4882a593Smuzhiyun hole->end -= newlen;
178*4882a593Smuzhiyun hole->size -= newlen;
179*4882a593Smuzhiyun dvma_entry_use(hole->end) = newlen;
180*4882a593Smuzhiyun #ifdef DVMA_DEBUG
181*4882a593Smuzhiyun dvma_allocs++;
182*4882a593Smuzhiyun dvma_alloc_bytes += newlen;
183*4882a593Smuzhiyun #endif
184*4882a593Smuzhiyun return hole->end;
185*4882a593Smuzhiyun } else if(hole->size == newlen) {
186*4882a593Smuzhiyun list_move(&(hole->list), &hole_cache);
187*4882a593Smuzhiyun dvma_entry_use(hole->start) = newlen;
188*4882a593Smuzhiyun #ifdef DVMA_DEBUG
189*4882a593Smuzhiyun dvma_allocs++;
190*4882a593Smuzhiyun dvma_alloc_bytes += newlen;
191*4882a593Smuzhiyun #endif
192*4882a593Smuzhiyun return hole->start;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun pr_crit("unable to find dvma hole!\n");
198*4882a593Smuzhiyun BUG();
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
free_baddr(unsigned long baddr)202*4882a593Smuzhiyun static inline int free_baddr(unsigned long baddr)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun unsigned long len;
206*4882a593Smuzhiyun struct hole *hole;
207*4882a593Smuzhiyun struct list_head *cur;
208*4882a593Smuzhiyun unsigned long orig_baddr;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun orig_baddr = baddr;
211*4882a593Smuzhiyun len = dvma_entry_use(baddr);
212*4882a593Smuzhiyun dvma_entry_use(baddr) = 0;
213*4882a593Smuzhiyun baddr &= DVMA_PAGE_MASK;
214*4882a593Smuzhiyun dvma_unmap_iommu(baddr, len);
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #ifdef DVMA_DEBUG
217*4882a593Smuzhiyun dvma_frees++;
218*4882a593Smuzhiyun dvma_free_bytes += len;
219*4882a593Smuzhiyun #endif
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun list_for_each(cur, &hole_list) {
222*4882a593Smuzhiyun hole = list_entry(cur, struct hole, list);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if(hole->end == baddr) {
225*4882a593Smuzhiyun hole->end += len;
226*4882a593Smuzhiyun hole->size += len;
227*4882a593Smuzhiyun return 0;
228*4882a593Smuzhiyun } else if(hole->start == (baddr + len)) {
229*4882a593Smuzhiyun hole->start = baddr;
230*4882a593Smuzhiyun hole->size += len;
231*4882a593Smuzhiyun return 0;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun hole = rmcache();
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun hole->start = baddr;
239*4882a593Smuzhiyun hole->end = baddr + len;
240*4882a593Smuzhiyun hole->size = len;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun // list_add_tail(&(hole->list), cur);
243*4882a593Smuzhiyun list_add(&(hole->list), cur);
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
dvma_init(void)249*4882a593Smuzhiyun void __init dvma_init(void)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun struct hole *hole;
253*4882a593Smuzhiyun int i;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun INIT_LIST_HEAD(&hole_list);
256*4882a593Smuzhiyun INIT_LIST_HEAD(&hole_cache);
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /* prepare the hole cache */
259*4882a593Smuzhiyun for(i = 0; i < 64; i++)
260*4882a593Smuzhiyun list_add(&(initholes[i].list), &hole_cache);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun hole = rmcache();
263*4882a593Smuzhiyun hole->start = DVMA_START;
264*4882a593Smuzhiyun hole->end = DVMA_END;
265*4882a593Smuzhiyun hole->size = DVMA_SIZE;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun list_add(&(hole->list), &hole_list);
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun iommu_use = memblock_alloc(IOMMU_TOTAL_ENTRIES * sizeof(unsigned long),
270*4882a593Smuzhiyun SMP_CACHE_BYTES);
271*4882a593Smuzhiyun if (!iommu_use)
272*4882a593Smuzhiyun panic("%s: Failed to allocate %zu bytes\n", __func__,
273*4882a593Smuzhiyun IOMMU_TOTAL_ENTRIES * sizeof(unsigned long));
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun dvma_unmap_iommu(DVMA_START, DVMA_SIZE);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun #ifdef CONFIG_SUN3
278*4882a593Smuzhiyun sun3_dvma_init();
279*4882a593Smuzhiyun #endif
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
dvma_map_align(unsigned long kaddr,int len,int align)283*4882a593Smuzhiyun unsigned long dvma_map_align(unsigned long kaddr, int len, int align)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun unsigned long baddr;
287*4882a593Smuzhiyun unsigned long off;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if(!len)
290*4882a593Smuzhiyun len = 0x800;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun if(!kaddr || !len) {
293*4882a593Smuzhiyun // pr_err("error: kaddr %lx len %x\n", kaddr, len);
294*4882a593Smuzhiyun // *(int *)4 = 0;
295*4882a593Smuzhiyun return 0;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun pr_debug("dvma_map request %08x bytes from %08lx\n", len, kaddr);
299*4882a593Smuzhiyun off = kaddr & ~DVMA_PAGE_MASK;
300*4882a593Smuzhiyun kaddr &= PAGE_MASK;
301*4882a593Smuzhiyun len += off;
302*4882a593Smuzhiyun len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if(align == 0)
305*4882a593Smuzhiyun align = DVMA_PAGE_SIZE;
306*4882a593Smuzhiyun else
307*4882a593Smuzhiyun align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun baddr = get_baddr(len, align);
310*4882a593Smuzhiyun // pr_info("using baddr %lx\n", baddr);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if(!dvma_map_iommu(kaddr, baddr, len))
313*4882a593Smuzhiyun return (baddr + off);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun pr_crit("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr,
316*4882a593Smuzhiyun len);
317*4882a593Smuzhiyun BUG();
318*4882a593Smuzhiyun return 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun EXPORT_SYMBOL(dvma_map_align);
321*4882a593Smuzhiyun
dvma_unmap(void * baddr)322*4882a593Smuzhiyun void dvma_unmap(void *baddr)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun unsigned long addr;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun addr = (unsigned long)baddr;
327*4882a593Smuzhiyun /* check if this is a vme mapping */
328*4882a593Smuzhiyun if(!(addr & 0x00f00000))
329*4882a593Smuzhiyun addr |= 0xf00000;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun free_baddr(addr);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun EXPORT_SYMBOL(dvma_unmap);
337*4882a593Smuzhiyun
dvma_malloc_align(unsigned long len,unsigned long align)338*4882a593Smuzhiyun void *dvma_malloc_align(unsigned long len, unsigned long align)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun unsigned long kaddr;
341*4882a593Smuzhiyun unsigned long baddr;
342*4882a593Smuzhiyun unsigned long vaddr;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if(!len)
345*4882a593Smuzhiyun return NULL;
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun pr_debug("dvma_malloc request %lx bytes\n", len);
348*4882a593Smuzhiyun len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0)
351*4882a593Smuzhiyun return NULL;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) {
354*4882a593Smuzhiyun free_pages(kaddr, get_order(len));
355*4882a593Smuzhiyun return NULL;
356*4882a593Smuzhiyun }
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun vaddr = dvma_btov(baddr);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if(dvma_map_cpu(kaddr, vaddr, len) < 0) {
361*4882a593Smuzhiyun dvma_unmap((void *)baddr);
362*4882a593Smuzhiyun free_pages(kaddr, get_order(len));
363*4882a593Smuzhiyun return NULL;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun pr_debug("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr,
367*4882a593Smuzhiyun baddr);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun return (void *)vaddr;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun EXPORT_SYMBOL(dvma_malloc_align);
373*4882a593Smuzhiyun
dvma_free(void * vaddr)374*4882a593Smuzhiyun void dvma_free(void *vaddr)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun return;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun EXPORT_SYMBOL(dvma_free);
381