xref: /OK3568_Linux_fs/kernel/arch/sparc/mm/io-unit.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * io-unit.c:  IO-UNIT specific routines for memory management.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/mm.h>
13*4882a593Smuzhiyun #include <linux/bitops.h>
14*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/of_device.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <asm/io.h>
19*4882a593Smuzhiyun #include <asm/io-unit.h>
20*4882a593Smuzhiyun #include <asm/mxcc.h>
21*4882a593Smuzhiyun #include <asm/cacheflush.h>
22*4882a593Smuzhiyun #include <asm/tlbflush.h>
23*4882a593Smuzhiyun #include <asm/dma.h>
24*4882a593Smuzhiyun #include <asm/oplib.h>
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #include "mm_32.h"
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* #define IOUNIT_DEBUG */
29*4882a593Smuzhiyun #ifdef IOUNIT_DEBUG
30*4882a593Smuzhiyun #define IOD(x) printk(x)
31*4882a593Smuzhiyun #else
32*4882a593Smuzhiyun #define IOD(x) do { } while (0)
33*4882a593Smuzhiyun #endif
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
36*4882a593Smuzhiyun #define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static const struct dma_map_ops iounit_dma_ops;
39*4882a593Smuzhiyun 
iounit_iommu_init(struct platform_device * op)40*4882a593Smuzhiyun static void __init iounit_iommu_init(struct platform_device *op)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	struct iounit_struct *iounit;
43*4882a593Smuzhiyun 	iopte_t __iomem *xpt;
44*4882a593Smuzhiyun 	iopte_t __iomem *xptend;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
47*4882a593Smuzhiyun 	if (!iounit) {
48*4882a593Smuzhiyun 		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
49*4882a593Smuzhiyun 		prom_halt();
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	iounit->limit[0] = IOUNIT_BMAP1_START;
53*4882a593Smuzhiyun 	iounit->limit[1] = IOUNIT_BMAP2_START;
54*4882a593Smuzhiyun 	iounit->limit[2] = IOUNIT_BMAPM_START;
55*4882a593Smuzhiyun 	iounit->limit[3] = IOUNIT_BMAPM_END;
56*4882a593Smuzhiyun 	iounit->rotor[1] = IOUNIT_BMAP2_START;
57*4882a593Smuzhiyun 	iounit->rotor[2] = IOUNIT_BMAPM_START;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
60*4882a593Smuzhiyun 	if (!xpt) {
61*4882a593Smuzhiyun 		prom_printf("SUN4D: Cannot map External Page Table.");
62*4882a593Smuzhiyun 		prom_halt();
63*4882a593Smuzhiyun 	}
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	op->dev.archdata.iommu = iounit;
66*4882a593Smuzhiyun 	iounit->page_table = xpt;
67*4882a593Smuzhiyun 	spin_lock_init(&iounit->lock);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
70*4882a593Smuzhiyun 	for (; xpt < xptend; xpt++)
71*4882a593Smuzhiyun 		sbus_writel(0, xpt);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	op->dev.dma_ops = &iounit_dma_ops;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun 
iounit_init(void)76*4882a593Smuzhiyun static int __init iounit_init(void)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	extern void sun4d_init_sbi_irq(void);
79*4882a593Smuzhiyun 	struct device_node *dp;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	for_each_node_by_name(dp, "sbi") {
82*4882a593Smuzhiyun 		struct platform_device *op = of_find_device_by_node(dp);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 		iounit_iommu_init(op);
85*4882a593Smuzhiyun 		of_propagate_archdata(op);
86*4882a593Smuzhiyun 	}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	sun4d_init_sbi_irq();
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun subsys_initcall(iounit_init);
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* One has to hold iounit->lock to call this */
iounit_get_area(struct iounit_struct * iounit,unsigned long vaddr,int size)96*4882a593Smuzhiyun static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	int i, j, k, npages;
99*4882a593Smuzhiyun 	unsigned long rotor, scan, limit;
100*4882a593Smuzhiyun 	iopte_t iopte;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun         npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	/* A tiny bit of magic ingredience :) */
105*4882a593Smuzhiyun 	switch (npages) {
106*4882a593Smuzhiyun 	case 1: i = 0x0231; break;
107*4882a593Smuzhiyun 	case 2: i = 0x0132; break;
108*4882a593Smuzhiyun 	default: i = 0x0213; break;
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun next:	j = (i & 15);
114*4882a593Smuzhiyun 	rotor = iounit->rotor[j - 1];
115*4882a593Smuzhiyun 	limit = iounit->limit[j];
116*4882a593Smuzhiyun 	scan = rotor;
117*4882a593Smuzhiyun nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
118*4882a593Smuzhiyun 	if (scan + npages > limit) {
119*4882a593Smuzhiyun 		if (limit != rotor) {
120*4882a593Smuzhiyun 			limit = rotor;
121*4882a593Smuzhiyun 			scan = iounit->limit[j - 1];
122*4882a593Smuzhiyun 			goto nexti;
123*4882a593Smuzhiyun 		}
124*4882a593Smuzhiyun 		i >>= 4;
125*4882a593Smuzhiyun 		if (!(i & 15))
126*4882a593Smuzhiyun 			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
127*4882a593Smuzhiyun 		goto next;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 	for (k = 1, scan++; k < npages; k++)
130*4882a593Smuzhiyun 		if (test_bit(scan++, iounit->bmap))
131*4882a593Smuzhiyun 			goto nexti;
132*4882a593Smuzhiyun 	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
133*4882a593Smuzhiyun 	scan -= npages;
134*4882a593Smuzhiyun 	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
135*4882a593Smuzhiyun 	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
136*4882a593Smuzhiyun 	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
137*4882a593Smuzhiyun 		set_bit(scan, iounit->bmap);
138*4882a593Smuzhiyun 		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 	IOD(("%08lx\n", vaddr));
141*4882a593Smuzhiyun 	return vaddr;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
iounit_map_page(struct device * dev,struct page * page,unsigned long offset,size_t len,enum dma_data_direction dir,unsigned long attrs)144*4882a593Smuzhiyun static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
145*4882a593Smuzhiyun 		unsigned long offset, size_t len, enum dma_data_direction dir,
146*4882a593Smuzhiyun 		unsigned long attrs)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	void *vaddr = page_address(page) + offset;
149*4882a593Smuzhiyun 	struct iounit_struct *iounit = dev->archdata.iommu;
150*4882a593Smuzhiyun 	unsigned long ret, flags;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* XXX So what is maxphys for us and how do drivers know it? */
153*4882a593Smuzhiyun 	if (!len || len > 256 * 1024)
154*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	spin_lock_irqsave(&iounit->lock, flags);
157*4882a593Smuzhiyun 	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
158*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iounit->lock, flags);
159*4882a593Smuzhiyun 	return ret;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
iounit_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)162*4882a593Smuzhiyun static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
163*4882a593Smuzhiyun 		enum dma_data_direction dir, unsigned long attrs)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct iounit_struct *iounit = dev->archdata.iommu;
166*4882a593Smuzhiyun 	struct scatterlist *sg;
167*4882a593Smuzhiyun 	unsigned long flags;
168*4882a593Smuzhiyun 	int i;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
171*4882a593Smuzhiyun 	spin_lock_irqsave(&iounit->lock, flags);
172*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, i) {
173*4882a593Smuzhiyun 		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
174*4882a593Smuzhiyun 		sg->dma_length = sg->length;
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iounit->lock, flags);
177*4882a593Smuzhiyun 	return nents;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun 
iounit_unmap_page(struct device * dev,dma_addr_t vaddr,size_t len,enum dma_data_direction dir,unsigned long attrs)180*4882a593Smuzhiyun static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
181*4882a593Smuzhiyun 		enum dma_data_direction dir, unsigned long attrs)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct iounit_struct *iounit = dev->archdata.iommu;
184*4882a593Smuzhiyun 	unsigned long flags;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	spin_lock_irqsave(&iounit->lock, flags);
187*4882a593Smuzhiyun 	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
188*4882a593Smuzhiyun 	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
189*4882a593Smuzhiyun 	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
190*4882a593Smuzhiyun 	for (len += vaddr; vaddr < len; vaddr++)
191*4882a593Smuzhiyun 		clear_bit(vaddr, iounit->bmap);
192*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iounit->lock, flags);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
iounit_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)195*4882a593Smuzhiyun static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
196*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct iounit_struct *iounit = dev->archdata.iommu;
199*4882a593Smuzhiyun 	unsigned long flags, vaddr, len;
200*4882a593Smuzhiyun 	struct scatterlist *sg;
201*4882a593Smuzhiyun 	int i;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	spin_lock_irqsave(&iounit->lock, flags);
204*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, i) {
205*4882a593Smuzhiyun 		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
206*4882a593Smuzhiyun 		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
207*4882a593Smuzhiyun 		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
208*4882a593Smuzhiyun 		for (len += vaddr; vaddr < len; vaddr++)
209*4882a593Smuzhiyun 			clear_bit(vaddr, iounit->bmap);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	spin_unlock_irqrestore(&iounit->lock, flags);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun #ifdef CONFIG_SBUS
iounit_alloc(struct device * dev,size_t len,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)215*4882a593Smuzhiyun static void *iounit_alloc(struct device *dev, size_t len,
216*4882a593Smuzhiyun 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun 	struct iounit_struct *iounit = dev->archdata.iommu;
219*4882a593Smuzhiyun 	unsigned long va, addr, page, end, ret;
220*4882a593Smuzhiyun 	pgprot_t dvma_prot;
221*4882a593Smuzhiyun 	iopte_t __iomem *iopte;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* XXX So what is maxphys for us and how do drivers know it? */
224*4882a593Smuzhiyun 	if (!len || len > 256 * 1024)
225*4882a593Smuzhiyun 		return NULL;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	len = PAGE_ALIGN(len);
228*4882a593Smuzhiyun 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
229*4882a593Smuzhiyun 	if (!va)
230*4882a593Smuzhiyun 		return NULL;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	addr = ret = sparc_dma_alloc_resource(dev, len);
233*4882a593Smuzhiyun 	if (!addr)
234*4882a593Smuzhiyun 		goto out_free_pages;
235*4882a593Smuzhiyun 	*dma_handle = addr;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
238*4882a593Smuzhiyun 	end = PAGE_ALIGN((addr + len));
239*4882a593Smuzhiyun 	while(addr < end) {
240*4882a593Smuzhiyun 		page = va;
241*4882a593Smuzhiyun 		{
242*4882a593Smuzhiyun 			pmd_t *pmdp;
243*4882a593Smuzhiyun 			pte_t *ptep;
244*4882a593Smuzhiyun 			long i;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 			pmdp = pmd_off_k(addr);
247*4882a593Smuzhiyun 			ptep = pte_offset_map(pmdp, addr);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 			iopte = iounit->page_table + i;
254*4882a593Smuzhiyun 			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 		addr += PAGE_SIZE;
257*4882a593Smuzhiyun 		va += PAGE_SIZE;
258*4882a593Smuzhiyun 	}
259*4882a593Smuzhiyun 	flush_cache_all();
260*4882a593Smuzhiyun 	flush_tlb_all();
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	return (void *)ret;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun out_free_pages:
265*4882a593Smuzhiyun 	free_pages(va, get_order(len));
266*4882a593Smuzhiyun 	return NULL;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
iounit_free(struct device * dev,size_t size,void * cpu_addr,dma_addr_t dma_addr,unsigned long attrs)269*4882a593Smuzhiyun static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
270*4882a593Smuzhiyun 		dma_addr_t dma_addr, unsigned long attrs)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun 	/* XXX Somebody please fill this in */
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun #endif
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun static const struct dma_map_ops iounit_dma_ops = {
277*4882a593Smuzhiyun #ifdef CONFIG_SBUS
278*4882a593Smuzhiyun 	.alloc			= iounit_alloc,
279*4882a593Smuzhiyun 	.free			= iounit_free,
280*4882a593Smuzhiyun #endif
281*4882a593Smuzhiyun 	.map_page		= iounit_map_page,
282*4882a593Smuzhiyun 	.unmap_page		= iounit_unmap_page,
283*4882a593Smuzhiyun 	.map_sg			= iounit_map_sg,
284*4882a593Smuzhiyun 	.unmap_sg		= iounit_unmap_sg,
285*4882a593Smuzhiyun };
286