xref: /OK3568_Linux_fs/kernel/arch/s390/pci/pci_dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright IBM Corp. 2012
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Author(s):
6*4882a593Smuzhiyun  *   Jan Glauber <jang@linux.vnet.ibm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <linux/iommu-helper.h>
13*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
14*4882a593Smuzhiyun #include <linux/vmalloc.h>
15*4882a593Smuzhiyun #include <linux/pci.h>
16*4882a593Smuzhiyun #include <asm/pci_dma.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static struct kmem_cache *dma_region_table_cache;
19*4882a593Smuzhiyun static struct kmem_cache *dma_page_table_cache;
20*4882a593Smuzhiyun static int s390_iommu_strict;
21*4882a593Smuzhiyun 
zpci_refresh_global(struct zpci_dev * zdev)22*4882a593Smuzhiyun static int zpci_refresh_global(struct zpci_dev *zdev)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun 	return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
25*4882a593Smuzhiyun 				  zdev->iommu_pages * PAGE_SIZE);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
dma_alloc_cpu_table(void)28*4882a593Smuzhiyun unsigned long *dma_alloc_cpu_table(void)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun 	unsigned long *table, *entry;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
33*4882a593Smuzhiyun 	if (!table)
34*4882a593Smuzhiyun 		return NULL;
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun 	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
37*4882a593Smuzhiyun 		*entry = ZPCI_TABLE_INVALID;
38*4882a593Smuzhiyun 	return table;
39*4882a593Smuzhiyun }
40*4882a593Smuzhiyun 
dma_free_cpu_table(void * table)41*4882a593Smuzhiyun static void dma_free_cpu_table(void *table)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	kmem_cache_free(dma_region_table_cache, table);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
dma_alloc_page_table(void)46*4882a593Smuzhiyun static unsigned long *dma_alloc_page_table(void)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	unsigned long *table, *entry;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
51*4882a593Smuzhiyun 	if (!table)
52*4882a593Smuzhiyun 		return NULL;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
55*4882a593Smuzhiyun 		*entry = ZPCI_PTE_INVALID;
56*4882a593Smuzhiyun 	return table;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
dma_free_page_table(void * table)59*4882a593Smuzhiyun static void dma_free_page_table(void *table)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun 	kmem_cache_free(dma_page_table_cache, table);
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
dma_get_seg_table_origin(unsigned long * entry)64*4882a593Smuzhiyun static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	unsigned long *sto;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	if (reg_entry_isvalid(*entry))
69*4882a593Smuzhiyun 		sto = get_rt_sto(*entry);
70*4882a593Smuzhiyun 	else {
71*4882a593Smuzhiyun 		sto = dma_alloc_cpu_table();
72*4882a593Smuzhiyun 		if (!sto)
73*4882a593Smuzhiyun 			return NULL;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 		set_rt_sto(entry, sto);
76*4882a593Smuzhiyun 		validate_rt_entry(entry);
77*4882a593Smuzhiyun 		entry_clr_protected(entry);
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 	return sto;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
dma_get_page_table_origin(unsigned long * entry)82*4882a593Smuzhiyun static unsigned long *dma_get_page_table_origin(unsigned long *entry)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	unsigned long *pto;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (reg_entry_isvalid(*entry))
87*4882a593Smuzhiyun 		pto = get_st_pto(*entry);
88*4882a593Smuzhiyun 	else {
89*4882a593Smuzhiyun 		pto = dma_alloc_page_table();
90*4882a593Smuzhiyun 		if (!pto)
91*4882a593Smuzhiyun 			return NULL;
92*4882a593Smuzhiyun 		set_st_pto(entry, pto);
93*4882a593Smuzhiyun 		validate_st_entry(entry);
94*4882a593Smuzhiyun 		entry_clr_protected(entry);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 	return pto;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
dma_walk_cpu_trans(unsigned long * rto,dma_addr_t dma_addr)99*4882a593Smuzhiyun unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun 	unsigned long *sto, *pto;
102*4882a593Smuzhiyun 	unsigned int rtx, sx, px;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	rtx = calc_rtx(dma_addr);
105*4882a593Smuzhiyun 	sto = dma_get_seg_table_origin(&rto[rtx]);
106*4882a593Smuzhiyun 	if (!sto)
107*4882a593Smuzhiyun 		return NULL;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	sx = calc_sx(dma_addr);
110*4882a593Smuzhiyun 	pto = dma_get_page_table_origin(&sto[sx]);
111*4882a593Smuzhiyun 	if (!pto)
112*4882a593Smuzhiyun 		return NULL;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	px = calc_px(dma_addr);
115*4882a593Smuzhiyun 	return &pto[px];
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
dma_update_cpu_trans(unsigned long * entry,void * page_addr,int flags)118*4882a593Smuzhiyun void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	if (flags & ZPCI_PTE_INVALID) {
121*4882a593Smuzhiyun 		invalidate_pt_entry(entry);
122*4882a593Smuzhiyun 	} else {
123*4882a593Smuzhiyun 		set_pt_pfaa(entry, page_addr);
124*4882a593Smuzhiyun 		validate_pt_entry(entry);
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (flags & ZPCI_TABLE_PROTECTED)
128*4882a593Smuzhiyun 		entry_set_protected(entry);
129*4882a593Smuzhiyun 	else
130*4882a593Smuzhiyun 		entry_clr_protected(entry);
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
__dma_update_trans(struct zpci_dev * zdev,unsigned long pa,dma_addr_t dma_addr,size_t size,int flags)133*4882a593Smuzhiyun static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
134*4882a593Smuzhiyun 			      dma_addr_t dma_addr, size_t size, int flags)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun 	unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
137*4882a593Smuzhiyun 	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
138*4882a593Smuzhiyun 	unsigned long irq_flags;
139*4882a593Smuzhiyun 	unsigned long *entry;
140*4882a593Smuzhiyun 	int i, rc = 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	if (!nr_pages)
143*4882a593Smuzhiyun 		return -EINVAL;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
146*4882a593Smuzhiyun 	if (!zdev->dma_table) {
147*4882a593Smuzhiyun 		rc = -EINVAL;
148*4882a593Smuzhiyun 		goto out_unlock;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	for (i = 0; i < nr_pages; i++) {
152*4882a593Smuzhiyun 		entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
153*4882a593Smuzhiyun 		if (!entry) {
154*4882a593Smuzhiyun 			rc = -ENOMEM;
155*4882a593Smuzhiyun 			goto undo_cpu_trans;
156*4882a593Smuzhiyun 		}
157*4882a593Smuzhiyun 		dma_update_cpu_trans(entry, page_addr, flags);
158*4882a593Smuzhiyun 		page_addr += PAGE_SIZE;
159*4882a593Smuzhiyun 		dma_addr += PAGE_SIZE;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun undo_cpu_trans:
163*4882a593Smuzhiyun 	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
164*4882a593Smuzhiyun 		flags = ZPCI_PTE_INVALID;
165*4882a593Smuzhiyun 		while (i-- > 0) {
166*4882a593Smuzhiyun 			page_addr -= PAGE_SIZE;
167*4882a593Smuzhiyun 			dma_addr -= PAGE_SIZE;
168*4882a593Smuzhiyun 			entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
169*4882a593Smuzhiyun 			if (!entry)
170*4882a593Smuzhiyun 				break;
171*4882a593Smuzhiyun 			dma_update_cpu_trans(entry, page_addr, flags);
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun out_unlock:
175*4882a593Smuzhiyun 	spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
176*4882a593Smuzhiyun 	return rc;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
__dma_purge_tlb(struct zpci_dev * zdev,dma_addr_t dma_addr,size_t size,int flags)179*4882a593Smuzhiyun static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
180*4882a593Smuzhiyun 			   size_t size, int flags)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	unsigned long irqflags;
183*4882a593Smuzhiyun 	int ret;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/*
186*4882a593Smuzhiyun 	 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
187*4882a593Smuzhiyun 	 * translations when previously invalid translation-table entries are
188*4882a593Smuzhiyun 	 * validated. With lazy unmap, rpcit is skipped for previously valid
189*4882a593Smuzhiyun 	 * entries, but a global rpcit is then required before any address can
190*4882a593Smuzhiyun 	 * be re-used, i.e. after each iommu bitmap wrap-around.
191*4882a593Smuzhiyun 	 */
192*4882a593Smuzhiyun 	if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
193*4882a593Smuzhiyun 		if (!zdev->tlb_refresh)
194*4882a593Smuzhiyun 			return 0;
195*4882a593Smuzhiyun 	} else {
196*4882a593Smuzhiyun 		if (!s390_iommu_strict)
197*4882a593Smuzhiyun 			return 0;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
201*4882a593Smuzhiyun 				 PAGE_ALIGN(size));
202*4882a593Smuzhiyun 	if (ret == -ENOMEM && !s390_iommu_strict) {
203*4882a593Smuzhiyun 		/* enable the hypervisor to free some resources */
204*4882a593Smuzhiyun 		if (zpci_refresh_global(zdev))
205*4882a593Smuzhiyun 			goto out;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
208*4882a593Smuzhiyun 		bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
209*4882a593Smuzhiyun 			      zdev->lazy_bitmap, zdev->iommu_pages);
210*4882a593Smuzhiyun 		bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
211*4882a593Smuzhiyun 		spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
212*4882a593Smuzhiyun 		ret = 0;
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun out:
215*4882a593Smuzhiyun 	return ret;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
dma_update_trans(struct zpci_dev * zdev,unsigned long pa,dma_addr_t dma_addr,size_t size,int flags)218*4882a593Smuzhiyun static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
219*4882a593Smuzhiyun 			    dma_addr_t dma_addr, size_t size, int flags)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	int rc;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
224*4882a593Smuzhiyun 	if (rc)
225*4882a593Smuzhiyun 		return rc;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
228*4882a593Smuzhiyun 	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
229*4882a593Smuzhiyun 		__dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return rc;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
dma_free_seg_table(unsigned long entry)234*4882a593Smuzhiyun void dma_free_seg_table(unsigned long entry)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun 	unsigned long *sto = get_rt_sto(entry);
237*4882a593Smuzhiyun 	int sx;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
240*4882a593Smuzhiyun 		if (reg_entry_isvalid(sto[sx]))
241*4882a593Smuzhiyun 			dma_free_page_table(get_st_pto(sto[sx]));
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	dma_free_cpu_table(sto);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
dma_cleanup_tables(unsigned long * table)246*4882a593Smuzhiyun void dma_cleanup_tables(unsigned long *table)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	int rtx;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (!table)
251*4882a593Smuzhiyun 		return;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
254*4882a593Smuzhiyun 		if (reg_entry_isvalid(table[rtx]))
255*4882a593Smuzhiyun 			dma_free_seg_table(table[rtx]);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	dma_free_cpu_table(table);
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun 
__dma_alloc_iommu(struct device * dev,unsigned long start,int size)260*4882a593Smuzhiyun static unsigned long __dma_alloc_iommu(struct device *dev,
261*4882a593Smuzhiyun 				       unsigned long start, int size)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
266*4882a593Smuzhiyun 				start, size, zdev->start_dma >> PAGE_SHIFT,
267*4882a593Smuzhiyun 				dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
268*4882a593Smuzhiyun 				0);
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
dma_alloc_address(struct device * dev,int size)271*4882a593Smuzhiyun static dma_addr_t dma_alloc_address(struct device *dev, int size)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
274*4882a593Smuzhiyun 	unsigned long offset, flags;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
277*4882a593Smuzhiyun 	offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
278*4882a593Smuzhiyun 	if (offset == -1) {
279*4882a593Smuzhiyun 		if (!s390_iommu_strict) {
280*4882a593Smuzhiyun 			/* global flush before DMA addresses are reused */
281*4882a593Smuzhiyun 			if (zpci_refresh_global(zdev))
282*4882a593Smuzhiyun 				goto out_error;
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 			bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
285*4882a593Smuzhiyun 				      zdev->lazy_bitmap, zdev->iommu_pages);
286*4882a593Smuzhiyun 			bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
287*4882a593Smuzhiyun 		}
288*4882a593Smuzhiyun 		/* wrap-around */
289*4882a593Smuzhiyun 		offset = __dma_alloc_iommu(dev, 0, size);
290*4882a593Smuzhiyun 		if (offset == -1)
291*4882a593Smuzhiyun 			goto out_error;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 	zdev->next_bit = offset + size;
294*4882a593Smuzhiyun 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return zdev->start_dma + offset * PAGE_SIZE;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun out_error:
299*4882a593Smuzhiyun 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
300*4882a593Smuzhiyun 	return DMA_MAPPING_ERROR;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
dma_free_address(struct device * dev,dma_addr_t dma_addr,int size)303*4882a593Smuzhiyun static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
306*4882a593Smuzhiyun 	unsigned long flags, offset;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
311*4882a593Smuzhiyun 	if (!zdev->iommu_bitmap)
312*4882a593Smuzhiyun 		goto out;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	if (s390_iommu_strict)
315*4882a593Smuzhiyun 		bitmap_clear(zdev->iommu_bitmap, offset, size);
316*4882a593Smuzhiyun 	else
317*4882a593Smuzhiyun 		bitmap_set(zdev->lazy_bitmap, offset, size);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun out:
320*4882a593Smuzhiyun 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
zpci_err_dma(unsigned long rc,unsigned long addr)323*4882a593Smuzhiyun static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun 	struct {
326*4882a593Smuzhiyun 		unsigned long rc;
327*4882a593Smuzhiyun 		unsigned long addr;
328*4882a593Smuzhiyun 	} __packed data = {rc, addr};
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	zpci_err_hex(&data, sizeof(data));
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
s390_dma_map_pages(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction direction,unsigned long attrs)333*4882a593Smuzhiyun static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
334*4882a593Smuzhiyun 				     unsigned long offset, size_t size,
335*4882a593Smuzhiyun 				     enum dma_data_direction direction,
336*4882a593Smuzhiyun 				     unsigned long attrs)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
339*4882a593Smuzhiyun 	unsigned long pa = page_to_phys(page) + offset;
340*4882a593Smuzhiyun 	int flags = ZPCI_PTE_VALID;
341*4882a593Smuzhiyun 	unsigned long nr_pages;
342*4882a593Smuzhiyun 	dma_addr_t dma_addr;
343*4882a593Smuzhiyun 	int ret;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	/* This rounds up number of pages based on size and offset */
346*4882a593Smuzhiyun 	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
347*4882a593Smuzhiyun 	dma_addr = dma_alloc_address(dev, nr_pages);
348*4882a593Smuzhiyun 	if (dma_addr == DMA_MAPPING_ERROR) {
349*4882a593Smuzhiyun 		ret = -ENOSPC;
350*4882a593Smuzhiyun 		goto out_err;
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Use rounded up size */
354*4882a593Smuzhiyun 	size = nr_pages * PAGE_SIZE;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
357*4882a593Smuzhiyun 		flags |= ZPCI_TABLE_PROTECTED;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
360*4882a593Smuzhiyun 	if (ret)
361*4882a593Smuzhiyun 		goto out_free;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	atomic64_add(nr_pages, &zdev->mapped_pages);
364*4882a593Smuzhiyun 	return dma_addr + (offset & ~PAGE_MASK);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun out_free:
367*4882a593Smuzhiyun 	dma_free_address(dev, dma_addr, nr_pages);
368*4882a593Smuzhiyun out_err:
369*4882a593Smuzhiyun 	zpci_err("map error:\n");
370*4882a593Smuzhiyun 	zpci_err_dma(ret, pa);
371*4882a593Smuzhiyun 	return DMA_MAPPING_ERROR;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
s390_dma_unmap_pages(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction direction,unsigned long attrs)374*4882a593Smuzhiyun static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
375*4882a593Smuzhiyun 				 size_t size, enum dma_data_direction direction,
376*4882a593Smuzhiyun 				 unsigned long attrs)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
379*4882a593Smuzhiyun 	int npages, ret;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
382*4882a593Smuzhiyun 	dma_addr = dma_addr & PAGE_MASK;
383*4882a593Smuzhiyun 	ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
384*4882a593Smuzhiyun 			       ZPCI_PTE_INVALID);
385*4882a593Smuzhiyun 	if (ret) {
386*4882a593Smuzhiyun 		zpci_err("unmap error:\n");
387*4882a593Smuzhiyun 		zpci_err_dma(ret, dma_addr);
388*4882a593Smuzhiyun 		return;
389*4882a593Smuzhiyun 	}
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	atomic64_add(npages, &zdev->unmapped_pages);
392*4882a593Smuzhiyun 	dma_free_address(dev, dma_addr, npages);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
s390_dma_alloc(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flag,unsigned long attrs)395*4882a593Smuzhiyun static void *s390_dma_alloc(struct device *dev, size_t size,
396*4882a593Smuzhiyun 			    dma_addr_t *dma_handle, gfp_t flag,
397*4882a593Smuzhiyun 			    unsigned long attrs)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
400*4882a593Smuzhiyun 	struct page *page;
401*4882a593Smuzhiyun 	unsigned long pa;
402*4882a593Smuzhiyun 	dma_addr_t map;
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	size = PAGE_ALIGN(size);
405*4882a593Smuzhiyun 	page = alloc_pages(flag | __GFP_ZERO, get_order(size));
406*4882a593Smuzhiyun 	if (!page)
407*4882a593Smuzhiyun 		return NULL;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	pa = page_to_phys(page);
410*4882a593Smuzhiyun 	map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
411*4882a593Smuzhiyun 	if (dma_mapping_error(dev, map)) {
412*4882a593Smuzhiyun 		free_pages(pa, get_order(size));
413*4882a593Smuzhiyun 		return NULL;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
417*4882a593Smuzhiyun 	if (dma_handle)
418*4882a593Smuzhiyun 		*dma_handle = map;
419*4882a593Smuzhiyun 	return (void *) pa;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
s390_dma_free(struct device * dev,size_t size,void * pa,dma_addr_t dma_handle,unsigned long attrs)422*4882a593Smuzhiyun static void s390_dma_free(struct device *dev, size_t size,
423*4882a593Smuzhiyun 			  void *pa, dma_addr_t dma_handle,
424*4882a593Smuzhiyun 			  unsigned long attrs)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	size = PAGE_ALIGN(size);
429*4882a593Smuzhiyun 	atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
430*4882a593Smuzhiyun 	s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
431*4882a593Smuzhiyun 	free_pages((unsigned long) pa, get_order(size));
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /* Map a segment into a contiguous dma address area */
__s390_dma_map_sg(struct device * dev,struct scatterlist * sg,size_t size,dma_addr_t * handle,enum dma_data_direction dir)435*4882a593Smuzhiyun static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
436*4882a593Smuzhiyun 			     size_t size, dma_addr_t *handle,
437*4882a593Smuzhiyun 			     enum dma_data_direction dir)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
440*4882a593Smuzhiyun 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
441*4882a593Smuzhiyun 	dma_addr_t dma_addr_base, dma_addr;
442*4882a593Smuzhiyun 	int flags = ZPCI_PTE_VALID;
443*4882a593Smuzhiyun 	struct scatterlist *s;
444*4882a593Smuzhiyun 	unsigned long pa = 0;
445*4882a593Smuzhiyun 	int ret;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	dma_addr_base = dma_alloc_address(dev, nr_pages);
448*4882a593Smuzhiyun 	if (dma_addr_base == DMA_MAPPING_ERROR)
449*4882a593Smuzhiyun 		return -ENOMEM;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	dma_addr = dma_addr_base;
452*4882a593Smuzhiyun 	if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
453*4882a593Smuzhiyun 		flags |= ZPCI_TABLE_PROTECTED;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
456*4882a593Smuzhiyun 		pa = page_to_phys(sg_page(s));
457*4882a593Smuzhiyun 		ret = __dma_update_trans(zdev, pa, dma_addr,
458*4882a593Smuzhiyun 					 s->offset + s->length, flags);
459*4882a593Smuzhiyun 		if (ret)
460*4882a593Smuzhiyun 			goto unmap;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		dma_addr += s->offset + s->length;
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 	ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
465*4882a593Smuzhiyun 	if (ret)
466*4882a593Smuzhiyun 		goto unmap;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	*handle = dma_addr_base;
469*4882a593Smuzhiyun 	atomic64_add(nr_pages, &zdev->mapped_pages);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	return ret;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun unmap:
474*4882a593Smuzhiyun 	dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
475*4882a593Smuzhiyun 			 ZPCI_PTE_INVALID);
476*4882a593Smuzhiyun 	dma_free_address(dev, dma_addr_base, nr_pages);
477*4882a593Smuzhiyun 	zpci_err("map error:\n");
478*4882a593Smuzhiyun 	zpci_err_dma(ret, pa);
479*4882a593Smuzhiyun 	return ret;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
s390_dma_map_sg(struct device * dev,struct scatterlist * sg,int nr_elements,enum dma_data_direction dir,unsigned long attrs)482*4882a593Smuzhiyun static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
483*4882a593Smuzhiyun 			   int nr_elements, enum dma_data_direction dir,
484*4882a593Smuzhiyun 			   unsigned long attrs)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	struct scatterlist *s = sg, *start = sg, *dma = sg;
487*4882a593Smuzhiyun 	unsigned int max = dma_get_max_seg_size(dev);
488*4882a593Smuzhiyun 	unsigned int size = s->offset + s->length;
489*4882a593Smuzhiyun 	unsigned int offset = s->offset;
490*4882a593Smuzhiyun 	int count = 0, i;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	for (i = 1; i < nr_elements; i++) {
493*4882a593Smuzhiyun 		s = sg_next(s);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 		s->dma_address = DMA_MAPPING_ERROR;
496*4882a593Smuzhiyun 		s->dma_length = 0;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 		if (s->offset || (size & ~PAGE_MASK) ||
499*4882a593Smuzhiyun 		    size + s->length > max) {
500*4882a593Smuzhiyun 			if (__s390_dma_map_sg(dev, start, size,
501*4882a593Smuzhiyun 					      &dma->dma_address, dir))
502*4882a593Smuzhiyun 				goto unmap;
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 			dma->dma_address += offset;
505*4882a593Smuzhiyun 			dma->dma_length = size - offset;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 			size = offset = s->offset;
508*4882a593Smuzhiyun 			start = s;
509*4882a593Smuzhiyun 			dma = sg_next(dma);
510*4882a593Smuzhiyun 			count++;
511*4882a593Smuzhiyun 		}
512*4882a593Smuzhiyun 		size += s->length;
513*4882a593Smuzhiyun 	}
514*4882a593Smuzhiyun 	if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
515*4882a593Smuzhiyun 		goto unmap;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	dma->dma_address += offset;
518*4882a593Smuzhiyun 	dma->dma_length = size - offset;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	return count + 1;
521*4882a593Smuzhiyun unmap:
522*4882a593Smuzhiyun 	for_each_sg(sg, s, count, i)
523*4882a593Smuzhiyun 		s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
524*4882a593Smuzhiyun 				     dir, attrs);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun 
s390_dma_unmap_sg(struct device * dev,struct scatterlist * sg,int nr_elements,enum dma_data_direction dir,unsigned long attrs)529*4882a593Smuzhiyun static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
530*4882a593Smuzhiyun 			      int nr_elements, enum dma_data_direction dir,
531*4882a593Smuzhiyun 			      unsigned long attrs)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct scatterlist *s;
534*4882a593Smuzhiyun 	int i;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	for_each_sg(sg, s, nr_elements, i) {
537*4882a593Smuzhiyun 		if (s->dma_length)
538*4882a593Smuzhiyun 			s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
539*4882a593Smuzhiyun 					     dir, attrs);
540*4882a593Smuzhiyun 		s->dma_address = 0;
541*4882a593Smuzhiyun 		s->dma_length = 0;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
zpci_dma_init_device(struct zpci_dev * zdev)545*4882a593Smuzhiyun int zpci_dma_init_device(struct zpci_dev *zdev)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	int rc;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/*
550*4882a593Smuzhiyun 	 * At this point, if the device is part of an IOMMU domain, this would
551*4882a593Smuzhiyun 	 * be a strong hint towards a bug in the IOMMU API (common) code and/or
552*4882a593Smuzhiyun 	 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
553*4882a593Smuzhiyun 	 */
554*4882a593Smuzhiyun 	WARN_ON(zdev->s390_domain);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	spin_lock_init(&zdev->iommu_bitmap_lock);
557*4882a593Smuzhiyun 	spin_lock_init(&zdev->dma_table_lock);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	zdev->dma_table = dma_alloc_cpu_table();
560*4882a593Smuzhiyun 	if (!zdev->dma_table) {
561*4882a593Smuzhiyun 		rc = -ENOMEM;
562*4882a593Smuzhiyun 		goto out;
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/*
566*4882a593Smuzhiyun 	 * Restrict the iommu bitmap size to the minimum of the following:
567*4882a593Smuzhiyun 	 * - main memory size
568*4882a593Smuzhiyun 	 * - 3-level pagetable address limit minus start_dma offset
569*4882a593Smuzhiyun 	 * - DMA address range allowed by the hardware (clp query pci fn)
570*4882a593Smuzhiyun 	 *
571*4882a593Smuzhiyun 	 * Also set zdev->end_dma to the actual end address of the usable
572*4882a593Smuzhiyun 	 * range, instead of the theoretical maximum as reported by hardware.
573*4882a593Smuzhiyun 	 */
574*4882a593Smuzhiyun 	zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
575*4882a593Smuzhiyun 	zdev->iommu_size = min3((u64) high_memory,
576*4882a593Smuzhiyun 				ZPCI_TABLE_SIZE_RT - zdev->start_dma,
577*4882a593Smuzhiyun 				zdev->end_dma - zdev->start_dma + 1);
578*4882a593Smuzhiyun 	zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
579*4882a593Smuzhiyun 	zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
580*4882a593Smuzhiyun 	zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
581*4882a593Smuzhiyun 	if (!zdev->iommu_bitmap) {
582*4882a593Smuzhiyun 		rc = -ENOMEM;
583*4882a593Smuzhiyun 		goto free_dma_table;
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 	if (!s390_iommu_strict) {
586*4882a593Smuzhiyun 		zdev->lazy_bitmap = vzalloc(zdev->iommu_pages / 8);
587*4882a593Smuzhiyun 		if (!zdev->lazy_bitmap) {
588*4882a593Smuzhiyun 			rc = -ENOMEM;
589*4882a593Smuzhiyun 			goto free_bitmap;
590*4882a593Smuzhiyun 		}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 	rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
594*4882a593Smuzhiyun 				(u64) zdev->dma_table);
595*4882a593Smuzhiyun 	if (rc)
596*4882a593Smuzhiyun 		goto free_bitmap;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	return 0;
599*4882a593Smuzhiyun free_bitmap:
600*4882a593Smuzhiyun 	vfree(zdev->iommu_bitmap);
601*4882a593Smuzhiyun 	zdev->iommu_bitmap = NULL;
602*4882a593Smuzhiyun 	vfree(zdev->lazy_bitmap);
603*4882a593Smuzhiyun 	zdev->lazy_bitmap = NULL;
604*4882a593Smuzhiyun free_dma_table:
605*4882a593Smuzhiyun 	dma_free_cpu_table(zdev->dma_table);
606*4882a593Smuzhiyun 	zdev->dma_table = NULL;
607*4882a593Smuzhiyun out:
608*4882a593Smuzhiyun 	return rc;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun 
zpci_dma_exit_device(struct zpci_dev * zdev)611*4882a593Smuzhiyun void zpci_dma_exit_device(struct zpci_dev *zdev)
612*4882a593Smuzhiyun {
613*4882a593Smuzhiyun 	/*
614*4882a593Smuzhiyun 	 * At this point, if the device is part of an IOMMU domain, this would
615*4882a593Smuzhiyun 	 * be a strong hint towards a bug in the IOMMU API (common) code and/or
616*4882a593Smuzhiyun 	 * simultaneous access via IOMMU and DMA API. So let's issue a warning.
617*4882a593Smuzhiyun 	 */
618*4882a593Smuzhiyun 	WARN_ON(zdev->s390_domain);
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (zpci_unregister_ioat(zdev, 0))
621*4882a593Smuzhiyun 		return;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	dma_cleanup_tables(zdev->dma_table);
624*4882a593Smuzhiyun 	zdev->dma_table = NULL;
625*4882a593Smuzhiyun 	vfree(zdev->iommu_bitmap);
626*4882a593Smuzhiyun 	zdev->iommu_bitmap = NULL;
627*4882a593Smuzhiyun 	vfree(zdev->lazy_bitmap);
628*4882a593Smuzhiyun 	zdev->lazy_bitmap = NULL;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	zdev->next_bit = 0;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun 
dma_alloc_cpu_table_caches(void)633*4882a593Smuzhiyun static int __init dma_alloc_cpu_table_caches(void)
634*4882a593Smuzhiyun {
635*4882a593Smuzhiyun 	dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
636*4882a593Smuzhiyun 					ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
637*4882a593Smuzhiyun 					0, NULL);
638*4882a593Smuzhiyun 	if (!dma_region_table_cache)
639*4882a593Smuzhiyun 		return -ENOMEM;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
642*4882a593Smuzhiyun 					ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
643*4882a593Smuzhiyun 					0, NULL);
644*4882a593Smuzhiyun 	if (!dma_page_table_cache) {
645*4882a593Smuzhiyun 		kmem_cache_destroy(dma_region_table_cache);
646*4882a593Smuzhiyun 		return -ENOMEM;
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 	return 0;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
zpci_dma_init(void)651*4882a593Smuzhiyun int __init zpci_dma_init(void)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	return dma_alloc_cpu_table_caches();
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
zpci_dma_exit(void)656*4882a593Smuzhiyun void zpci_dma_exit(void)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	kmem_cache_destroy(dma_page_table_cache);
659*4882a593Smuzhiyun 	kmem_cache_destroy(dma_region_table_cache);
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun const struct dma_map_ops s390_pci_dma_ops = {
663*4882a593Smuzhiyun 	.alloc		= s390_dma_alloc,
664*4882a593Smuzhiyun 	.free		= s390_dma_free,
665*4882a593Smuzhiyun 	.map_sg		= s390_dma_map_sg,
666*4882a593Smuzhiyun 	.unmap_sg	= s390_dma_unmap_sg,
667*4882a593Smuzhiyun 	.map_page	= s390_dma_map_pages,
668*4882a593Smuzhiyun 	.unmap_page	= s390_dma_unmap_pages,
669*4882a593Smuzhiyun 	.mmap		= dma_common_mmap,
670*4882a593Smuzhiyun 	.get_sgtable	= dma_common_get_sgtable,
671*4882a593Smuzhiyun 	.alloc_pages	= dma_common_alloc_pages,
672*4882a593Smuzhiyun 	.free_pages	= dma_common_free_pages,
673*4882a593Smuzhiyun 	/* dma_supported is unconditionally true without a callback */
674*4882a593Smuzhiyun };
675*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
676*4882a593Smuzhiyun 
s390_iommu_setup(char * str)677*4882a593Smuzhiyun static int __init s390_iommu_setup(char *str)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	if (!strcmp(str, "strict"))
680*4882a593Smuzhiyun 		s390_iommu_strict = 1;
681*4882a593Smuzhiyun 	return 1;
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun __setup("s390_iommu=", s390_iommu_setup);
685