xref: /OK3568_Linux_fs/kernel/arch/m68k/sun3x/dvma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Virtual DMA allocation
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * 11/26/2000 -- disabled the existing code because it didn't work for
8*4882a593Smuzhiyun  * me in 2.4.  Replaced with a significantly more primitive version
9*4882a593Smuzhiyun  * similar to the sun3 code.  the old functionality was probably more
10*4882a593Smuzhiyun  * desirable, but....   -- Sam Creasey (sammy@oh.verio.com)
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/init.h>
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun #include <linux/mm.h>
18*4882a593Smuzhiyun #include <linux/memblock.h>
19*4882a593Smuzhiyun #include <linux/vmalloc.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include <asm/sun3x.h>
22*4882a593Smuzhiyun #include <asm/dvma.h>
23*4882a593Smuzhiyun #include <asm/io.h>
24*4882a593Smuzhiyun #include <asm/page.h>
25*4882a593Smuzhiyun #include <asm/tlbflush.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* IOMMU support */
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define IOMMU_ADDR_MASK            0x03ffe000
30*4882a593Smuzhiyun #define IOMMU_CACHE_INHIBIT        0x00000040
31*4882a593Smuzhiyun #define IOMMU_FULL_BLOCK           0x00000020
32*4882a593Smuzhiyun #define IOMMU_MODIFIED             0x00000010
33*4882a593Smuzhiyun #define IOMMU_USED                 0x00000008
34*4882a593Smuzhiyun #define IOMMU_WRITE_PROTECT        0x00000004
35*4882a593Smuzhiyun #define IOMMU_DT_MASK              0x00000003
36*4882a593Smuzhiyun #define IOMMU_DT_INVALID           0x00000000
37*4882a593Smuzhiyun #define IOMMU_DT_VALID             0x00000001
38*4882a593Smuzhiyun #define IOMMU_DT_BAD               0x00000002
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define dvma_entry_paddr(index)		(iommu_pte[index] & IOMMU_ADDR_MASK)
45*4882a593Smuzhiyun #define dvma_entry_vaddr(index,paddr)	((index << DVMA_PAGE_SHIFT) |  \
46*4882a593Smuzhiyun 					 (paddr & (DVMA_PAGE_SIZE-1)))
47*4882a593Smuzhiyun #if 0
48*4882a593Smuzhiyun #define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
49*4882a593Smuzhiyun 					    (addr & IOMMU_ADDR_MASK) | \
50*4882a593Smuzhiyun 				             IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
51*4882a593Smuzhiyun #else
52*4882a593Smuzhiyun #define dvma_entry_set(index,addr)	(iommu_pte[index] =            \
53*4882a593Smuzhiyun 					    (addr & IOMMU_ADDR_MASK) | \
54*4882a593Smuzhiyun 				             IOMMU_DT_VALID)
55*4882a593Smuzhiyun #endif
56*4882a593Smuzhiyun #define dvma_entry_clr(index)		(iommu_pte[index] = IOMMU_DT_INVALID)
57*4882a593Smuzhiyun #define dvma_entry_hash(addr)		((addr >> DVMA_PAGE_SHIFT) ^ \
58*4882a593Smuzhiyun 					 ((addr & 0x03c00000) >>     \
59*4882a593Smuzhiyun 						(DVMA_PAGE_SHIFT+4)))
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #ifdef DEBUG
62*4882a593Smuzhiyun /* code to print out a dvma mapping for debugging purposes */
dvma_print(unsigned long dvma_addr)63*4882a593Smuzhiyun void dvma_print (unsigned long dvma_addr)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	unsigned long index;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	index = dvma_addr >> DVMA_PAGE_SHIFT;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	pr_info("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
71*4882a593Smuzhiyun 		dvma_entry_paddr(index));
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun #endif
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* create a virtual mapping for a page assigned within the IOMMU
77*4882a593Smuzhiyun    so that the cpu can reach it easily */
dvma_map_cpu(unsigned long kaddr,unsigned long vaddr,int len)78*4882a593Smuzhiyun inline int dvma_map_cpu(unsigned long kaddr,
79*4882a593Smuzhiyun 			       unsigned long vaddr, int len)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	pgd_t *pgd;
82*4882a593Smuzhiyun 	p4d_t *p4d;
83*4882a593Smuzhiyun 	pud_t *pud;
84*4882a593Smuzhiyun 	unsigned long end;
85*4882a593Smuzhiyun 	int ret = 0;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	kaddr &= PAGE_MASK;
88*4882a593Smuzhiyun 	vaddr &= PAGE_MASK;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	end = PAGE_ALIGN(vaddr + len);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	pr_debug("dvma: mapping kern %08lx to virt %08lx\n", kaddr, vaddr);
93*4882a593Smuzhiyun 	pgd = pgd_offset_k(vaddr);
94*4882a593Smuzhiyun 	p4d = p4d_offset(pgd, vaddr);
95*4882a593Smuzhiyun 	pud = pud_offset(p4d, vaddr);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	do {
98*4882a593Smuzhiyun 		pmd_t *pmd;
99*4882a593Smuzhiyun 		unsigned long end2;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 		if((pmd = pmd_alloc(&init_mm, pud, vaddr)) == NULL) {
102*4882a593Smuzhiyun 			ret = -ENOMEM;
103*4882a593Smuzhiyun 			goto out;
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
107*4882a593Smuzhiyun 			end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
108*4882a593Smuzhiyun 		else
109*4882a593Smuzhiyun 			end2 = end;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 		do {
112*4882a593Smuzhiyun 			pte_t *pte;
113*4882a593Smuzhiyun 			unsigned long end3;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 			if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
116*4882a593Smuzhiyun 				ret = -ENOMEM;
117*4882a593Smuzhiyun 				goto out;
118*4882a593Smuzhiyun 			}
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 			if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
121*4882a593Smuzhiyun 				end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
122*4882a593Smuzhiyun 			else
123*4882a593Smuzhiyun 				end3 = end2;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 			do {
126*4882a593Smuzhiyun 				pr_debug("mapping %08lx phys to %08lx\n",
127*4882a593Smuzhiyun 					 __pa(kaddr), vaddr);
128*4882a593Smuzhiyun 				set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
129*4882a593Smuzhiyun 						     PAGE_KERNEL));
130*4882a593Smuzhiyun 				pte++;
131*4882a593Smuzhiyun 				kaddr += PAGE_SIZE;
132*4882a593Smuzhiyun 				vaddr += PAGE_SIZE;
133*4882a593Smuzhiyun 			} while(vaddr < end3);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 		} while(vaddr < end2);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	} while(vaddr < end);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	flush_tlb_all();
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun  out:
142*4882a593Smuzhiyun 	return ret;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 
dvma_map_iommu(unsigned long kaddr,unsigned long baddr,int len)146*4882a593Smuzhiyun inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
147*4882a593Smuzhiyun 				 int len)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	unsigned long end, index;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	index = baddr >> DVMA_PAGE_SHIFT;
152*4882a593Smuzhiyun 	end = ((baddr+len) >> DVMA_PAGE_SHIFT);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if(len & ~DVMA_PAGE_MASK)
155*4882a593Smuzhiyun 		end++;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	for(; index < end ; index++) {
158*4882a593Smuzhiyun //		if(dvma_entry_use(index))
159*4882a593Smuzhiyun //			BUG();
160*4882a593Smuzhiyun //		pr_info("mapping pa %lx to ba %lx\n", __pa(kaddr),
161*4882a593Smuzhiyun //			index << DVMA_PAGE_SHIFT);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		dvma_entry_set(index, __pa(kaddr));
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 		iommu_pte[index] |= IOMMU_FULL_BLOCK;
166*4882a593Smuzhiyun //		dvma_entry_inc(index);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 		kaddr += DVMA_PAGE_SIZE;
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun #ifdef DEBUG
172*4882a593Smuzhiyun 	for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
173*4882a593Smuzhiyun 		dvma_print(index << DVMA_PAGE_SHIFT);
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun 	return 0;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
dvma_unmap_iommu(unsigned long baddr,int len)179*4882a593Smuzhiyun void dvma_unmap_iommu(unsigned long baddr, int len)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	int index, end;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	index = baddr >> DVMA_PAGE_SHIFT;
186*4882a593Smuzhiyun 	end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	for(; index < end ; index++) {
189*4882a593Smuzhiyun 		pr_debug("freeing bus mapping %08x\n",
190*4882a593Smuzhiyun 			 index << DVMA_PAGE_SHIFT);
191*4882a593Smuzhiyun #if 0
192*4882a593Smuzhiyun 		if(!dvma_entry_use(index))
193*4882a593Smuzhiyun 			pr_info("dvma_unmap freeing unused entry %04x\n",
194*4882a593Smuzhiyun 				index);
195*4882a593Smuzhiyun 		else
196*4882a593Smuzhiyun 			dvma_entry_dec(index);
197*4882a593Smuzhiyun #endif
198*4882a593Smuzhiyun 		dvma_entry_clr(index);
199*4882a593Smuzhiyun 	}
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun }
202