xref: /OK3568_Linux_fs/kernel/arch/sparc/mm/iommu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * iommu.c:  IOMMU specific routines for memory management.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1995 David S. Miller  (davem@caip.rutgers.edu)
6*4882a593Smuzhiyun  * Copyright (C) 1995,2002 Pete Zaitcev     (zaitcev@yahoo.com)
7*4882a593Smuzhiyun  * Copyright (C) 1996 Eddie C. Dost    (ecd@skynet.be)
8*4882a593Smuzhiyun  * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/mm.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/io.h>
20*4882a593Smuzhiyun #include <asm/mxcc.h>
21*4882a593Smuzhiyun #include <asm/mbus.h>
22*4882a593Smuzhiyun #include <asm/cacheflush.h>
23*4882a593Smuzhiyun #include <asm/tlbflush.h>
24*4882a593Smuzhiyun #include <asm/bitext.h>
25*4882a593Smuzhiyun #include <asm/iommu.h>
26*4882a593Smuzhiyun #include <asm/dma.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "mm_32.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * This can be sized dynamically, but we will do this
32*4882a593Smuzhiyun  * only when we have a guidance about actual I/O pressures.
33*4882a593Smuzhiyun  */
34*4882a593Smuzhiyun #define IOMMU_RNGE	IOMMU_RNGE_256MB
35*4882a593Smuzhiyun #define IOMMU_START	0xF0000000
36*4882a593Smuzhiyun #define IOMMU_WINSIZE	(256*1024*1024U)
37*4882a593Smuzhiyun #define IOMMU_NPTES	(IOMMU_WINSIZE/PAGE_SIZE)	/* 64K PTEs, 256KB */
38*4882a593Smuzhiyun #define IOMMU_ORDER	6				/* 4096 * (1<<6) */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static int viking_flush;
41*4882a593Smuzhiyun /* viking.S */
42*4882a593Smuzhiyun extern void viking_flush_page(unsigned long page);
43*4882a593Smuzhiyun extern void viking_mxcc_flush_page(unsigned long page);
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Values precomputed according to CPU type.
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun static unsigned int ioperm_noc;		/* Consistent mapping iopte flags */
49*4882a593Smuzhiyun static pgprot_t dvma_prot;		/* Consistent mapping pte flags */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define IOPERM        (IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID)
52*4882a593Smuzhiyun #define MKIOPTE(pfn, perm) (((((pfn)<<8) & IOPTE_PAGE) | (perm)) & ~IOPTE_WAZ)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static const struct dma_map_ops sbus_iommu_dma_gflush_ops;
55*4882a593Smuzhiyun static const struct dma_map_ops sbus_iommu_dma_pflush_ops;
56*4882a593Smuzhiyun 
sbus_iommu_init(struct platform_device * op)57*4882a593Smuzhiyun static void __init sbus_iommu_init(struct platform_device *op)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct iommu_struct *iommu;
60*4882a593Smuzhiyun 	unsigned int impl, vers;
61*4882a593Smuzhiyun 	unsigned long *bitmap;
62*4882a593Smuzhiyun 	unsigned long control;
63*4882a593Smuzhiyun 	unsigned long base;
64*4882a593Smuzhiyun 	unsigned long tmp;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
67*4882a593Smuzhiyun 	if (!iommu) {
68*4882a593Smuzhiyun 		prom_printf("Unable to allocate iommu structure\n");
69*4882a593Smuzhiyun 		prom_halt();
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	iommu->regs = of_ioremap(&op->resource[0], 0, PAGE_SIZE * 3,
73*4882a593Smuzhiyun 				 "iommu_regs");
74*4882a593Smuzhiyun 	if (!iommu->regs) {
75*4882a593Smuzhiyun 		prom_printf("Cannot map IOMMU registers\n");
76*4882a593Smuzhiyun 		prom_halt();
77*4882a593Smuzhiyun 	}
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	control = sbus_readl(&iommu->regs->control);
80*4882a593Smuzhiyun 	impl = (control & IOMMU_CTRL_IMPL) >> 28;
81*4882a593Smuzhiyun 	vers = (control & IOMMU_CTRL_VERS) >> 24;
82*4882a593Smuzhiyun 	control &= ~(IOMMU_CTRL_RNGE);
83*4882a593Smuzhiyun 	control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
84*4882a593Smuzhiyun 	sbus_writel(control, &iommu->regs->control);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	iommu_invalidate(iommu->regs);
87*4882a593Smuzhiyun 	iommu->start = IOMMU_START;
88*4882a593Smuzhiyun 	iommu->end = 0xffffffff;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	/* Allocate IOMMU page table */
91*4882a593Smuzhiyun 	/* Stupid alignment constraints give me a headache.
92*4882a593Smuzhiyun 	   We need 256K or 512K or 1M or 2M area aligned to
93*4882a593Smuzhiyun            its size and current gfp will fortunately give
94*4882a593Smuzhiyun            it to us. */
95*4882a593Smuzhiyun         tmp = __get_free_pages(GFP_KERNEL, IOMMU_ORDER);
96*4882a593Smuzhiyun 	if (!tmp) {
97*4882a593Smuzhiyun 		prom_printf("Unable to allocate iommu table [0x%lx]\n",
98*4882a593Smuzhiyun 			    IOMMU_NPTES * sizeof(iopte_t));
99*4882a593Smuzhiyun 		prom_halt();
100*4882a593Smuzhiyun 	}
101*4882a593Smuzhiyun 	iommu->page_table = (iopte_t *)tmp;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	/* Initialize new table. */
104*4882a593Smuzhiyun 	memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
105*4882a593Smuzhiyun 	flush_cache_all();
106*4882a593Smuzhiyun 	flush_tlb_all();
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	base = __pa((unsigned long)iommu->page_table) >> 4;
109*4882a593Smuzhiyun 	sbus_writel(base, &iommu->regs->base);
110*4882a593Smuzhiyun 	iommu_invalidate(iommu->regs);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
113*4882a593Smuzhiyun 	if (!bitmap) {
114*4882a593Smuzhiyun 		prom_printf("Unable to allocate iommu bitmap [%d]\n",
115*4882a593Smuzhiyun 			    (int)(IOMMU_NPTES>>3));
116*4882a593Smuzhiyun 		prom_halt();
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 	bit_map_init(&iommu->usemap, bitmap, IOMMU_NPTES);
119*4882a593Smuzhiyun 	/* To be coherent on HyperSparc, the page color of DVMA
120*4882a593Smuzhiyun 	 * and physical addresses must match.
121*4882a593Smuzhiyun 	 */
122*4882a593Smuzhiyun 	if (srmmu_modtype == HyperSparc)
123*4882a593Smuzhiyun 		iommu->usemap.num_colors = vac_cache_size >> PAGE_SHIFT;
124*4882a593Smuzhiyun 	else
125*4882a593Smuzhiyun 		iommu->usemap.num_colors = 1;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	printk(KERN_INFO "IOMMU: impl %d vers %d table 0x%p[%d B] map [%d b]\n",
128*4882a593Smuzhiyun 	       impl, vers, iommu->page_table,
129*4882a593Smuzhiyun 	       (int)(IOMMU_NPTES*sizeof(iopte_t)), (int)IOMMU_NPTES);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	op->dev.archdata.iommu = iommu;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (flush_page_for_dma_global)
134*4882a593Smuzhiyun 		op->dev.dma_ops = &sbus_iommu_dma_gflush_ops;
135*4882a593Smuzhiyun 	 else
136*4882a593Smuzhiyun 		op->dev.dma_ops = &sbus_iommu_dma_pflush_ops;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
iommu_init(void)139*4882a593Smuzhiyun static int __init iommu_init(void)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun 	struct device_node *dp;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	for_each_node_by_name(dp, "iommu") {
144*4882a593Smuzhiyun 		struct platform_device *op = of_find_device_by_node(dp);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		sbus_iommu_init(op);
147*4882a593Smuzhiyun 		of_propagate_archdata(op);
148*4882a593Smuzhiyun 	}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return 0;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun subsys_initcall(iommu_init);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /* Flush the iotlb entries to ram. */
156*4882a593Smuzhiyun /* This could be better if we didn't have to flush whole pages. */
iommu_flush_iotlb(iopte_t * iopte,unsigned int niopte)157*4882a593Smuzhiyun static void iommu_flush_iotlb(iopte_t *iopte, unsigned int niopte)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	unsigned long start;
160*4882a593Smuzhiyun 	unsigned long end;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	start = (unsigned long)iopte;
163*4882a593Smuzhiyun 	end = PAGE_ALIGN(start + niopte*sizeof(iopte_t));
164*4882a593Smuzhiyun 	start &= PAGE_MASK;
165*4882a593Smuzhiyun 	if (viking_mxcc_present) {
166*4882a593Smuzhiyun 		while(start < end) {
167*4882a593Smuzhiyun 			viking_mxcc_flush_page(start);
168*4882a593Smuzhiyun 			start += PAGE_SIZE;
169*4882a593Smuzhiyun 		}
170*4882a593Smuzhiyun 	} else if (viking_flush) {
171*4882a593Smuzhiyun 		while(start < end) {
172*4882a593Smuzhiyun 			viking_flush_page(start);
173*4882a593Smuzhiyun 			start += PAGE_SIZE;
174*4882a593Smuzhiyun 		}
175*4882a593Smuzhiyun 	} else {
176*4882a593Smuzhiyun 		while(start < end) {
177*4882a593Smuzhiyun 			__flush_page_to_ram(start);
178*4882a593Smuzhiyun 			start += PAGE_SIZE;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
__sbus_iommu_map_page(struct device * dev,struct page * page,unsigned long offset,size_t len,bool per_page_flush)183*4882a593Smuzhiyun static dma_addr_t __sbus_iommu_map_page(struct device *dev, struct page *page,
184*4882a593Smuzhiyun 		unsigned long offset, size_t len, bool per_page_flush)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	struct iommu_struct *iommu = dev->archdata.iommu;
187*4882a593Smuzhiyun 	phys_addr_t paddr = page_to_phys(page) + offset;
188*4882a593Smuzhiyun 	unsigned long off = paddr & ~PAGE_MASK;
189*4882a593Smuzhiyun 	unsigned long npages = (off + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
190*4882a593Smuzhiyun 	unsigned long pfn = __phys_to_pfn(paddr);
191*4882a593Smuzhiyun 	unsigned int busa, busa0;
192*4882a593Smuzhiyun 	iopte_t *iopte, *iopte0;
193*4882a593Smuzhiyun 	int ioptex, i;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* XXX So what is maxphys for us and how do drivers know it? */
196*4882a593Smuzhiyun 	if (!len || len > 256 * 1024)
197*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/*
200*4882a593Smuzhiyun 	 * We expect unmapped highmem pages to be not in the cache.
201*4882a593Smuzhiyun 	 * XXX Is this a good assumption?
202*4882a593Smuzhiyun 	 * XXX What if someone else unmaps it here and races us?
203*4882a593Smuzhiyun 	 */
204*4882a593Smuzhiyun 	if (per_page_flush && !PageHighMem(page)) {
205*4882a593Smuzhiyun 		unsigned long vaddr, p;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		vaddr = (unsigned long)page_address(page) + offset;
208*4882a593Smuzhiyun 		for (p = vaddr & PAGE_MASK; p < vaddr + len; p += PAGE_SIZE)
209*4882a593Smuzhiyun 			flush_page_for_dma(p);
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	/* page color = pfn of page */
213*4882a593Smuzhiyun 	ioptex = bit_map_string_get(&iommu->usemap, npages, pfn);
214*4882a593Smuzhiyun 	if (ioptex < 0)
215*4882a593Smuzhiyun 		panic("iommu out");
216*4882a593Smuzhiyun 	busa0 = iommu->start + (ioptex << PAGE_SHIFT);
217*4882a593Smuzhiyun 	iopte0 = &iommu->page_table[ioptex];
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	busa = busa0;
220*4882a593Smuzhiyun 	iopte = iopte0;
221*4882a593Smuzhiyun 	for (i = 0; i < npages; i++) {
222*4882a593Smuzhiyun 		iopte_val(*iopte) = MKIOPTE(pfn, IOPERM);
223*4882a593Smuzhiyun 		iommu_invalidate_page(iommu->regs, busa);
224*4882a593Smuzhiyun 		busa += PAGE_SIZE;
225*4882a593Smuzhiyun 		iopte++;
226*4882a593Smuzhiyun 		pfn++;
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	iommu_flush_iotlb(iopte0, npages);
230*4882a593Smuzhiyun 	return busa0 + off;
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun 
sbus_iommu_map_page_gflush(struct device * dev,struct page * page,unsigned long offset,size_t len,enum dma_data_direction dir,unsigned long attrs)233*4882a593Smuzhiyun static dma_addr_t sbus_iommu_map_page_gflush(struct device *dev,
234*4882a593Smuzhiyun 		struct page *page, unsigned long offset, size_t len,
235*4882a593Smuzhiyun 		enum dma_data_direction dir, unsigned long attrs)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	flush_page_for_dma(0);
238*4882a593Smuzhiyun 	return __sbus_iommu_map_page(dev, page, offset, len, false);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
sbus_iommu_map_page_pflush(struct device * dev,struct page * page,unsigned long offset,size_t len,enum dma_data_direction dir,unsigned long attrs)241*4882a593Smuzhiyun static dma_addr_t sbus_iommu_map_page_pflush(struct device *dev,
242*4882a593Smuzhiyun 		struct page *page, unsigned long offset, size_t len,
243*4882a593Smuzhiyun 		enum dma_data_direction dir, unsigned long attrs)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	return __sbus_iommu_map_page(dev, page, offset, len, true);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
__sbus_iommu_map_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs,bool per_page_flush)248*4882a593Smuzhiyun static int __sbus_iommu_map_sg(struct device *dev, struct scatterlist *sgl,
249*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs,
250*4882a593Smuzhiyun 		bool per_page_flush)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	struct scatterlist *sg;
253*4882a593Smuzhiyun 	int j;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, j) {
256*4882a593Smuzhiyun 		sg->dma_address =__sbus_iommu_map_page(dev, sg_page(sg),
257*4882a593Smuzhiyun 				sg->offset, sg->length, per_page_flush);
258*4882a593Smuzhiyun 		if (sg->dma_address == DMA_MAPPING_ERROR)
259*4882a593Smuzhiyun 			return 0;
260*4882a593Smuzhiyun 		sg->dma_length = sg->length;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	return nents;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun 
sbus_iommu_map_sg_gflush(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)266*4882a593Smuzhiyun static int sbus_iommu_map_sg_gflush(struct device *dev, struct scatterlist *sgl,
267*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	flush_page_for_dma(0);
270*4882a593Smuzhiyun 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, false);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
sbus_iommu_map_sg_pflush(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)273*4882a593Smuzhiyun static int sbus_iommu_map_sg_pflush(struct device *dev, struct scatterlist *sgl,
274*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun 	return __sbus_iommu_map_sg(dev, sgl, nents, dir, attrs, true);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
sbus_iommu_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t len,enum dma_data_direction dir,unsigned long attrs)279*4882a593Smuzhiyun static void sbus_iommu_unmap_page(struct device *dev, dma_addr_t dma_addr,
280*4882a593Smuzhiyun 		size_t len, enum dma_data_direction dir, unsigned long attrs)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct iommu_struct *iommu = dev->archdata.iommu;
283*4882a593Smuzhiyun 	unsigned int busa = dma_addr & PAGE_MASK;
284*4882a593Smuzhiyun 	unsigned long off = dma_addr & ~PAGE_MASK;
285*4882a593Smuzhiyun 	unsigned int npages = (off + len + PAGE_SIZE-1) >> PAGE_SHIFT;
286*4882a593Smuzhiyun 	unsigned int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
287*4882a593Smuzhiyun 	unsigned int i;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	BUG_ON(busa < iommu->start);
290*4882a593Smuzhiyun 	for (i = 0; i < npages; i++) {
291*4882a593Smuzhiyun 		iopte_val(iommu->page_table[ioptex + i]) = 0;
292*4882a593Smuzhiyun 		iommu_invalidate_page(iommu->regs, busa);
293*4882a593Smuzhiyun 		busa += PAGE_SIZE;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 	bit_map_clear(&iommu->usemap, ioptex, npages);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
sbus_iommu_unmap_sg(struct device * dev,struct scatterlist * sgl,int nents,enum dma_data_direction dir,unsigned long attrs)298*4882a593Smuzhiyun static void sbus_iommu_unmap_sg(struct device *dev, struct scatterlist *sgl,
299*4882a593Smuzhiyun 		int nents, enum dma_data_direction dir, unsigned long attrs)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct scatterlist *sg;
302*4882a593Smuzhiyun 	int i;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	for_each_sg(sgl, sg, nents, i) {
305*4882a593Smuzhiyun 		sbus_iommu_unmap_page(dev, sg->dma_address, sg->length, dir,
306*4882a593Smuzhiyun 				attrs);
307*4882a593Smuzhiyun 		sg->dma_address = 0x21212121;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun #ifdef CONFIG_SBUS
sbus_iommu_alloc(struct device * dev,size_t len,dma_addr_t * dma_handle,gfp_t gfp,unsigned long attrs)312*4882a593Smuzhiyun static void *sbus_iommu_alloc(struct device *dev, size_t len,
313*4882a593Smuzhiyun 		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct iommu_struct *iommu = dev->archdata.iommu;
316*4882a593Smuzhiyun 	unsigned long va, addr, page, end, ret;
317*4882a593Smuzhiyun 	iopte_t *iopte = iommu->page_table;
318*4882a593Smuzhiyun 	iopte_t *first;
319*4882a593Smuzhiyun 	int ioptex;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	/* XXX So what is maxphys for us and how do drivers know it? */
322*4882a593Smuzhiyun 	if (!len || len > 256 * 1024)
323*4882a593Smuzhiyun 		return NULL;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	len = PAGE_ALIGN(len);
326*4882a593Smuzhiyun 	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
327*4882a593Smuzhiyun 	if (va == 0)
328*4882a593Smuzhiyun 		return NULL;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	addr = ret = sparc_dma_alloc_resource(dev, len);
331*4882a593Smuzhiyun 	if (!addr)
332*4882a593Smuzhiyun 		goto out_free_pages;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	BUG_ON((va & ~PAGE_MASK) != 0);
335*4882a593Smuzhiyun 	BUG_ON((addr & ~PAGE_MASK) != 0);
336*4882a593Smuzhiyun 	BUG_ON((len & ~PAGE_MASK) != 0);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* page color = physical address */
339*4882a593Smuzhiyun 	ioptex = bit_map_string_get(&iommu->usemap, len >> PAGE_SHIFT,
340*4882a593Smuzhiyun 		addr >> PAGE_SHIFT);
341*4882a593Smuzhiyun 	if (ioptex < 0)
342*4882a593Smuzhiyun 		panic("iommu out");
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	iopte += ioptex;
345*4882a593Smuzhiyun 	first = iopte;
346*4882a593Smuzhiyun 	end = addr + len;
347*4882a593Smuzhiyun 	while(addr < end) {
348*4882a593Smuzhiyun 		page = va;
349*4882a593Smuzhiyun 		{
350*4882a593Smuzhiyun 			pmd_t *pmdp;
351*4882a593Smuzhiyun 			pte_t *ptep;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 			if (viking_mxcc_present)
354*4882a593Smuzhiyun 				viking_mxcc_flush_page(page);
355*4882a593Smuzhiyun 			else if (viking_flush)
356*4882a593Smuzhiyun 				viking_flush_page(page);
357*4882a593Smuzhiyun 			else
358*4882a593Smuzhiyun 				__flush_page_to_ram(page);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 			pmdp = pmd_off_k(addr);
361*4882a593Smuzhiyun 			ptep = pte_offset_map(pmdp, addr);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 		iopte_val(*iopte++) =
366*4882a593Smuzhiyun 		    MKIOPTE(page_to_pfn(virt_to_page(page)), ioperm_noc);
367*4882a593Smuzhiyun 		addr += PAGE_SIZE;
368*4882a593Smuzhiyun 		va += PAGE_SIZE;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 	/* P3: why do we need this?
371*4882a593Smuzhiyun 	 *
372*4882a593Smuzhiyun 	 * DAVEM: Because there are several aspects, none of which
373*4882a593Smuzhiyun 	 *        are handled by a single interface.  Some cpus are
374*4882a593Smuzhiyun 	 *        completely not I/O DMA coherent, and some have
375*4882a593Smuzhiyun 	 *        virtually indexed caches.  The driver DMA flushing
376*4882a593Smuzhiyun 	 *        methods handle the former case, but here during
377*4882a593Smuzhiyun 	 *        IOMMU page table modifications, and usage of non-cacheable
378*4882a593Smuzhiyun 	 *        cpu mappings of pages potentially in the cpu caches, we have
379*4882a593Smuzhiyun 	 *        to handle the latter case as well.
380*4882a593Smuzhiyun 	 */
381*4882a593Smuzhiyun 	flush_cache_all();
382*4882a593Smuzhiyun 	iommu_flush_iotlb(first, len >> PAGE_SHIFT);
383*4882a593Smuzhiyun 	flush_tlb_all();
384*4882a593Smuzhiyun 	iommu_invalidate(iommu->regs);
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	*dma_handle = iommu->start + (ioptex << PAGE_SHIFT);
387*4882a593Smuzhiyun 	return (void *)ret;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun out_free_pages:
390*4882a593Smuzhiyun 	free_pages(va, get_order(len));
391*4882a593Smuzhiyun 	return NULL;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
sbus_iommu_free(struct device * dev,size_t len,void * cpu_addr,dma_addr_t busa,unsigned long attrs)394*4882a593Smuzhiyun static void sbus_iommu_free(struct device *dev, size_t len, void *cpu_addr,
395*4882a593Smuzhiyun 			       dma_addr_t busa, unsigned long attrs)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun 	struct iommu_struct *iommu = dev->archdata.iommu;
398*4882a593Smuzhiyun 	iopte_t *iopte = iommu->page_table;
399*4882a593Smuzhiyun 	struct page *page = virt_to_page(cpu_addr);
400*4882a593Smuzhiyun 	int ioptex = (busa - iommu->start) >> PAGE_SHIFT;
401*4882a593Smuzhiyun 	unsigned long end;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (!sparc_dma_free_resource(cpu_addr, len))
404*4882a593Smuzhiyun 		return;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	BUG_ON((busa & ~PAGE_MASK) != 0);
407*4882a593Smuzhiyun 	BUG_ON((len & ~PAGE_MASK) != 0);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	iopte += ioptex;
410*4882a593Smuzhiyun 	end = busa + len;
411*4882a593Smuzhiyun 	while (busa < end) {
412*4882a593Smuzhiyun 		iopte_val(*iopte++) = 0;
413*4882a593Smuzhiyun 		busa += PAGE_SIZE;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 	flush_tlb_all();
416*4882a593Smuzhiyun 	iommu_invalidate(iommu->regs);
417*4882a593Smuzhiyun 	bit_map_clear(&iommu->usemap, ioptex, len >> PAGE_SHIFT);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	__free_pages(page, get_order(len));
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun #endif
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun static const struct dma_map_ops sbus_iommu_dma_gflush_ops = {
424*4882a593Smuzhiyun #ifdef CONFIG_SBUS
425*4882a593Smuzhiyun 	.alloc			= sbus_iommu_alloc,
426*4882a593Smuzhiyun 	.free			= sbus_iommu_free,
427*4882a593Smuzhiyun #endif
428*4882a593Smuzhiyun 	.map_page		= sbus_iommu_map_page_gflush,
429*4882a593Smuzhiyun 	.unmap_page		= sbus_iommu_unmap_page,
430*4882a593Smuzhiyun 	.map_sg			= sbus_iommu_map_sg_gflush,
431*4882a593Smuzhiyun 	.unmap_sg		= sbus_iommu_unmap_sg,
432*4882a593Smuzhiyun };
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun static const struct dma_map_ops sbus_iommu_dma_pflush_ops = {
435*4882a593Smuzhiyun #ifdef CONFIG_SBUS
436*4882a593Smuzhiyun 	.alloc			= sbus_iommu_alloc,
437*4882a593Smuzhiyun 	.free			= sbus_iommu_free,
438*4882a593Smuzhiyun #endif
439*4882a593Smuzhiyun 	.map_page		= sbus_iommu_map_page_pflush,
440*4882a593Smuzhiyun 	.unmap_page		= sbus_iommu_unmap_page,
441*4882a593Smuzhiyun 	.map_sg			= sbus_iommu_map_sg_pflush,
442*4882a593Smuzhiyun 	.unmap_sg		= sbus_iommu_unmap_sg,
443*4882a593Smuzhiyun };
444*4882a593Smuzhiyun 
ld_mmu_iommu(void)445*4882a593Smuzhiyun void __init ld_mmu_iommu(void)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	if (viking_mxcc_present || srmmu_modtype == HyperSparc) {
448*4882a593Smuzhiyun 		dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
449*4882a593Smuzhiyun 		ioperm_noc = IOPTE_CACHE | IOPTE_WRITE | IOPTE_VALID;
450*4882a593Smuzhiyun 	} else {
451*4882a593Smuzhiyun 		dvma_prot = __pgprot(SRMMU_ET_PTE | SRMMU_PRIV);
452*4882a593Smuzhiyun 		ioperm_noc = IOPTE_WRITE | IOPTE_VALID;
453*4882a593Smuzhiyun 	}
454*4882a593Smuzhiyun }
455