xref: /OK3568_Linux_fs/kernel/arch/ia64/hp/common/sba_iommu.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun **  IA64 System Bus Adapter (SBA) I/O MMU manager
4*4882a593Smuzhiyun **
5*4882a593Smuzhiyun **	(c) Copyright 2002-2005 Alex Williamson
6*4882a593Smuzhiyun **	(c) Copyright 2002-2003 Grant Grundler
7*4882a593Smuzhiyun **	(c) Copyright 2002-2005 Hewlett-Packard Company
8*4882a593Smuzhiyun **
9*4882a593Smuzhiyun **	Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
10*4882a593Smuzhiyun **	Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11*4882a593Smuzhiyun **
12*4882a593Smuzhiyun **
13*4882a593Smuzhiyun **
14*4882a593Smuzhiyun ** This module initializes the IOC (I/O Controller) found on HP
15*4882a593Smuzhiyun ** McKinley machines and their successors.
16*4882a593Smuzhiyun **
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/types.h>
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun #include <linux/spinlock.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/init.h>
25*4882a593Smuzhiyun #include <linux/mm.h>
26*4882a593Smuzhiyun #include <linux/string.h>
27*4882a593Smuzhiyun #include <linux/pci.h>
28*4882a593Smuzhiyun #include <linux/proc_fs.h>
29*4882a593Smuzhiyun #include <linux/seq_file.h>
30*4882a593Smuzhiyun #include <linux/acpi.h>
31*4882a593Smuzhiyun #include <linux/efi.h>
32*4882a593Smuzhiyun #include <linux/nodemask.h>
33*4882a593Smuzhiyun #include <linux/bitops.h>         /* hweight64() */
34*4882a593Smuzhiyun #include <linux/crash_dump.h>
35*4882a593Smuzhiyun #include <linux/iommu-helper.h>
36*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
37*4882a593Smuzhiyun #include <linux/prefetch.h>
38*4882a593Smuzhiyun #include <linux/swiotlb.h>
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #include <asm/delay.h>		/* ia64_get_itc() */
41*4882a593Smuzhiyun #include <asm/io.h>
42*4882a593Smuzhiyun #include <asm/page.h>		/* PAGE_OFFSET */
43*4882a593Smuzhiyun #include <asm/dma.h>
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include <asm/acpi-ext.h>
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define PFX "IOC: "
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun ** Enabling timing search of the pdir resource map.  Output in /proc.
51*4882a593Smuzhiyun ** Disabled by default to optimize performance.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun #undef PDIR_SEARCH_TIMING
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun ** This option allows cards capable of 64bit DMA to bypass the IOMMU.  If
57*4882a593Smuzhiyun ** not defined, all DMA will be 32bit and go through the TLB.
58*4882a593Smuzhiyun ** There's potentially a conflict in the bio merge code with us
59*4882a593Smuzhiyun ** advertising an iommu, but then bypassing it.  Since I/O MMU bypassing
60*4882a593Smuzhiyun ** appears to give more performance than bio-level virtual merging, we'll
61*4882a593Smuzhiyun ** do the former for now.  NOTE: BYPASS_SG also needs to be undef'd to
62*4882a593Smuzhiyun ** completely restrict DMA to the IOMMU.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun #define ALLOW_IOV_BYPASS
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /*
67*4882a593Smuzhiyun ** This option specifically allows/disallows bypassing scatterlists with
68*4882a593Smuzhiyun ** multiple entries.  Coalescing these entries can allow better DMA streaming
69*4882a593Smuzhiyun ** and in some cases shows better performance than entirely bypassing the
70*4882a593Smuzhiyun ** IOMMU.  Performance increase on the order of 1-2% sequential output/input
71*4882a593Smuzhiyun ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun #undef ALLOW_IOV_BYPASS_SG
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /*
76*4882a593Smuzhiyun ** If a device prefetches beyond the end of a valid pdir entry, it will cause
77*4882a593Smuzhiyun ** a hard failure, ie. MCA.  Version 3.0 and later of the zx1 LBA should
78*4882a593Smuzhiyun ** disconnect on 4k boundaries and prevent such issues.  If the device is
79*4882a593Smuzhiyun ** particularly aggressive, this option will keep the entire pdir valid such
80*4882a593Smuzhiyun ** that prefetching will hit a valid address.  This could severely impact
81*4882a593Smuzhiyun ** error containment, and is therefore off by default.  The page that is
82*4882a593Smuzhiyun ** used for spill-over is poisoned, so that should help debugging somewhat.
83*4882a593Smuzhiyun */
84*4882a593Smuzhiyun #undef FULL_VALID_PDIR
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun #define ENABLE_MARK_CLEAN
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun /*
89*4882a593Smuzhiyun ** The number of debug flags is a clue - this code is fragile.  NOTE: since
90*4882a593Smuzhiyun ** tightening the use of res_lock the resource bitmap and actual pdir are no
91*4882a593Smuzhiyun ** longer guaranteed to stay in sync.  The sanity checking code isn't going to
92*4882a593Smuzhiyun ** like that.
93*4882a593Smuzhiyun */
94*4882a593Smuzhiyun #undef DEBUG_SBA_INIT
95*4882a593Smuzhiyun #undef DEBUG_SBA_RUN
96*4882a593Smuzhiyun #undef DEBUG_SBA_RUN_SG
97*4882a593Smuzhiyun #undef DEBUG_SBA_RESOURCE
98*4882a593Smuzhiyun #undef ASSERT_PDIR_SANITY
99*4882a593Smuzhiyun #undef DEBUG_LARGE_SG_ENTRIES
100*4882a593Smuzhiyun #undef DEBUG_BYPASS
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
103*4882a593Smuzhiyun #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
104*4882a593Smuzhiyun #endif
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define SBA_INLINE	__inline__
107*4882a593Smuzhiyun /* #define SBA_INLINE */
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #ifdef DEBUG_SBA_INIT
110*4882a593Smuzhiyun #define DBG_INIT(x...)	printk(x)
111*4882a593Smuzhiyun #else
112*4882a593Smuzhiyun #define DBG_INIT(x...)
113*4882a593Smuzhiyun #endif
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #ifdef DEBUG_SBA_RUN
116*4882a593Smuzhiyun #define DBG_RUN(x...)	printk(x)
117*4882a593Smuzhiyun #else
118*4882a593Smuzhiyun #define DBG_RUN(x...)
119*4882a593Smuzhiyun #endif
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun #ifdef DEBUG_SBA_RUN_SG
122*4882a593Smuzhiyun #define DBG_RUN_SG(x...)	printk(x)
123*4882a593Smuzhiyun #else
124*4882a593Smuzhiyun #define DBG_RUN_SG(x...)
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun #ifdef DEBUG_SBA_RESOURCE
129*4882a593Smuzhiyun #define DBG_RES(x...)	printk(x)
130*4882a593Smuzhiyun #else
131*4882a593Smuzhiyun #define DBG_RES(x...)
132*4882a593Smuzhiyun #endif
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun #ifdef DEBUG_BYPASS
135*4882a593Smuzhiyun #define DBG_BYPASS(x...)	printk(x)
136*4882a593Smuzhiyun #else
137*4882a593Smuzhiyun #define DBG_BYPASS(x...)
138*4882a593Smuzhiyun #endif
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
141*4882a593Smuzhiyun #define ASSERT(expr) \
142*4882a593Smuzhiyun         if(!(expr)) { \
143*4882a593Smuzhiyun                 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
144*4882a593Smuzhiyun                 panic(#expr); \
145*4882a593Smuzhiyun         }
146*4882a593Smuzhiyun #else
147*4882a593Smuzhiyun #define ASSERT(expr)
148*4882a593Smuzhiyun #endif
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /*
151*4882a593Smuzhiyun ** The number of pdir entries to "free" before issuing
152*4882a593Smuzhiyun ** a read to PCOM register to flush out PCOM writes.
153*4882a593Smuzhiyun ** Interacts with allocation granularity (ie 4 or 8 entries
154*4882a593Smuzhiyun ** allocated and free'd/purged at a time might make this
155*4882a593Smuzhiyun ** less interesting).
156*4882a593Smuzhiyun */
157*4882a593Smuzhiyun #define DELAYED_RESOURCE_CNT	64
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun #define PCI_DEVICE_ID_HP_SX2000_IOC	0x12ec
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define ZX1_IOC_ID	((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
162*4882a593Smuzhiyun #define ZX2_IOC_ID	((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
163*4882a593Smuzhiyun #define REO_IOC_ID	((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
164*4882a593Smuzhiyun #define SX1000_IOC_ID	((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
165*4882a593Smuzhiyun #define SX2000_IOC_ID	((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun #define ZX1_IOC_OFFSET	0x1000	/* ACPI reports SBA, we want IOC */
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun #define IOC_FUNC_ID	0x000
170*4882a593Smuzhiyun #define IOC_FCLASS	0x008	/* function class, bist, header, rev... */
171*4882a593Smuzhiyun #define IOC_IBASE	0x300	/* IO TLB */
172*4882a593Smuzhiyun #define IOC_IMASK	0x308
173*4882a593Smuzhiyun #define IOC_PCOM	0x310
174*4882a593Smuzhiyun #define IOC_TCNFG	0x318
175*4882a593Smuzhiyun #define IOC_PDIR_BASE	0x320
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun #define IOC_ROPE0_CFG	0x500
178*4882a593Smuzhiyun #define   IOC_ROPE_AO	  0x10	/* Allow "Relaxed Ordering" */
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /* AGP GART driver looks for this */
182*4882a593Smuzhiyun #define ZX1_SBA_IOMMU_COOKIE	0x0000badbadc0ffeeUL
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun /*
185*4882a593Smuzhiyun ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
186*4882a593Smuzhiyun **
187*4882a593Smuzhiyun ** Some IOCs (sx1000) can run at the above pages sizes, but are
188*4882a593Smuzhiyun ** really only supported using the IOC at a 4k page size.
189*4882a593Smuzhiyun **
190*4882a593Smuzhiyun ** iovp_size could only be greater than PAGE_SIZE if we are
191*4882a593Smuzhiyun ** confident the drivers really only touch the next physical
192*4882a593Smuzhiyun ** page iff that driver instance owns it.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun static unsigned long iovp_size;
195*4882a593Smuzhiyun static unsigned long iovp_shift;
196*4882a593Smuzhiyun static unsigned long iovp_mask;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun struct ioc {
199*4882a593Smuzhiyun 	void __iomem	*ioc_hpa;	/* I/O MMU base address */
200*4882a593Smuzhiyun 	char		*res_map;	/* resource map, bit == pdir entry */
201*4882a593Smuzhiyun 	u64		*pdir_base;	/* physical base address */
202*4882a593Smuzhiyun 	unsigned long	ibase;		/* pdir IOV Space base */
203*4882a593Smuzhiyun 	unsigned long	imask;		/* pdir IOV Space mask */
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	unsigned long	*res_hint;	/* next avail IOVP - circular search */
206*4882a593Smuzhiyun 	unsigned long	dma_mask;
207*4882a593Smuzhiyun 	spinlock_t	res_lock;	/* protects the resource bitmap, but must be held when */
208*4882a593Smuzhiyun 					/* clearing pdir to prevent races with allocations. */
209*4882a593Smuzhiyun 	unsigned int	res_bitshift;	/* from the RIGHT! */
210*4882a593Smuzhiyun 	unsigned int	res_size;	/* size of resource map in bytes */
211*4882a593Smuzhiyun #ifdef CONFIG_NUMA
212*4882a593Smuzhiyun 	unsigned int	node;		/* node where this IOC lives */
213*4882a593Smuzhiyun #endif
214*4882a593Smuzhiyun #if DELAYED_RESOURCE_CNT > 0
215*4882a593Smuzhiyun 	spinlock_t	saved_lock;	/* may want to try to get this on a separate cacheline */
216*4882a593Smuzhiyun 					/* than res_lock for bigger systems. */
217*4882a593Smuzhiyun 	int		saved_cnt;
218*4882a593Smuzhiyun 	struct sba_dma_pair {
219*4882a593Smuzhiyun 		dma_addr_t	iova;
220*4882a593Smuzhiyun 		size_t		size;
221*4882a593Smuzhiyun 	} saved[DELAYED_RESOURCE_CNT];
222*4882a593Smuzhiyun #endif
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun #ifdef PDIR_SEARCH_TIMING
225*4882a593Smuzhiyun #define SBA_SEARCH_SAMPLE	0x100
226*4882a593Smuzhiyun 	unsigned long avg_search[SBA_SEARCH_SAMPLE];
227*4882a593Smuzhiyun 	unsigned long avg_idx;	/* current index into avg_search */
228*4882a593Smuzhiyun #endif
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/* Stuff we don't need in performance path */
231*4882a593Smuzhiyun 	struct ioc	*next;		/* list of IOC's in system */
232*4882a593Smuzhiyun 	acpi_handle	handle;		/* for multiple IOC's */
233*4882a593Smuzhiyun 	const char 	*name;
234*4882a593Smuzhiyun 	unsigned int	func_id;
235*4882a593Smuzhiyun 	unsigned int	rev;		/* HW revision of chip */
236*4882a593Smuzhiyun 	u32		iov_size;
237*4882a593Smuzhiyun 	unsigned int	pdir_size;	/* in bytes, determined by IOV Space size */
238*4882a593Smuzhiyun 	struct pci_dev	*sac_only_dev;
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun static struct ioc *ioc_list, *ioc_found;
242*4882a593Smuzhiyun static int reserve_sba_gart = 1;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
245*4882a593Smuzhiyun static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #define sba_sg_address(sg)	sg_virt((sg))
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun #ifdef FULL_VALID_PDIR
250*4882a593Smuzhiyun static u64 prefetch_spill_page;
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun #define GET_IOC(dev)	((dev_is_pci(dev))						\
254*4882a593Smuzhiyun 			 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /*
257*4882a593Smuzhiyun ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
258*4882a593Smuzhiyun ** (or rather not merge) DMAs into manageable chunks.
259*4882a593Smuzhiyun ** On parisc, this is more of the software/tuning constraint
260*4882a593Smuzhiyun ** rather than the HW. I/O MMU allocation algorithms can be
261*4882a593Smuzhiyun ** faster with smaller sizes (to some degree).
262*4882a593Smuzhiyun */
263*4882a593Smuzhiyun #define DMA_CHUNK_SIZE  (BITS_PER_LONG*iovp_size)
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /************************************
268*4882a593Smuzhiyun ** SBA register read and write support
269*4882a593Smuzhiyun **
270*4882a593Smuzhiyun ** BE WARNED: register writes are posted.
271*4882a593Smuzhiyun **  (ie follow writes which must reach HW with a read)
272*4882a593Smuzhiyun **
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun #define READ_REG(addr)       __raw_readq(addr)
275*4882a593Smuzhiyun #define WRITE_REG(val, addr) __raw_writeq(val, addr)
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun #ifdef DEBUG_SBA_INIT
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /**
280*4882a593Smuzhiyun  * sba_dump_tlb - debugging only - print IOMMU operating parameters
281*4882a593Smuzhiyun  * @hpa: base address of the IOMMU
282*4882a593Smuzhiyun  *
283*4882a593Smuzhiyun  * Print the size/location of the IO MMU PDIR.
284*4882a593Smuzhiyun  */
285*4882a593Smuzhiyun static void
sba_dump_tlb(char * hpa)286*4882a593Smuzhiyun sba_dump_tlb(char *hpa)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
289*4882a593Smuzhiyun 	DBG_INIT("IOC_IBASE    : %016lx\n", READ_REG(hpa+IOC_IBASE));
290*4882a593Smuzhiyun 	DBG_INIT("IOC_IMASK    : %016lx\n", READ_REG(hpa+IOC_IMASK));
291*4882a593Smuzhiyun 	DBG_INIT("IOC_TCNFG    : %016lx\n", READ_REG(hpa+IOC_TCNFG));
292*4882a593Smuzhiyun 	DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
293*4882a593Smuzhiyun 	DBG_INIT("\n");
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun #endif
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun /**
301*4882a593Smuzhiyun  * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
302*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
303*4882a593Smuzhiyun  * @msg: text to print ont the output line.
304*4882a593Smuzhiyun  * @pide: pdir index.
305*4882a593Smuzhiyun  *
306*4882a593Smuzhiyun  * Print one entry of the IO MMU PDIR in human readable form.
307*4882a593Smuzhiyun  */
308*4882a593Smuzhiyun static void
sba_dump_pdir_entry(struct ioc * ioc,char * msg,uint pide)309*4882a593Smuzhiyun sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun 	/* start printing from lowest pde in rval */
312*4882a593Smuzhiyun 	u64 *ptr = &ioc->pdir_base[pide  & ~(BITS_PER_LONG - 1)];
313*4882a593Smuzhiyun 	unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
314*4882a593Smuzhiyun 	uint rcnt;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
317*4882a593Smuzhiyun 		 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	rcnt = 0;
320*4882a593Smuzhiyun 	while (rcnt < BITS_PER_LONG) {
321*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s %2d %p %016Lx\n",
322*4882a593Smuzhiyun 		       (rcnt == (pide & (BITS_PER_LONG - 1)))
323*4882a593Smuzhiyun 		       ? "    -->" : "       ",
324*4882a593Smuzhiyun 		       rcnt, ptr, (unsigned long long) *ptr );
325*4882a593Smuzhiyun 		rcnt++;
326*4882a593Smuzhiyun 		ptr++;
327*4882a593Smuzhiyun 	}
328*4882a593Smuzhiyun 	printk(KERN_DEBUG "%s", msg);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun /**
333*4882a593Smuzhiyun  * sba_check_pdir - debugging only - consistency checker
334*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
335*4882a593Smuzhiyun  * @msg: text to print ont the output line.
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * Verify the resource map and pdir state is consistent
338*4882a593Smuzhiyun  */
339*4882a593Smuzhiyun static int
sba_check_pdir(struct ioc * ioc,char * msg)340*4882a593Smuzhiyun sba_check_pdir(struct ioc *ioc, char *msg)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
343*4882a593Smuzhiyun 	u64 *rptr = (u64 *) ioc->res_map;	/* resource map ptr */
344*4882a593Smuzhiyun 	u64 *pptr = ioc->pdir_base;	/* pdir ptr */
345*4882a593Smuzhiyun 	uint pide = 0;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	while (rptr < rptr_end) {
348*4882a593Smuzhiyun 		u64 rval;
349*4882a593Smuzhiyun 		int rcnt; /* number of bits we might check */
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		rval = *rptr;
352*4882a593Smuzhiyun 		rcnt = 64;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		while (rcnt) {
355*4882a593Smuzhiyun 			/* Get last byte and highest bit from that */
356*4882a593Smuzhiyun 			u32 pde = ((u32)((*pptr >> (63)) & 0x1));
357*4882a593Smuzhiyun 			if ((rval & 0x1) ^ pde)
358*4882a593Smuzhiyun 			{
359*4882a593Smuzhiyun 				/*
360*4882a593Smuzhiyun 				** BUMMER!  -- res_map != pdir --
361*4882a593Smuzhiyun 				** Dump rval and matching pdir entries
362*4882a593Smuzhiyun 				*/
363*4882a593Smuzhiyun 				sba_dump_pdir_entry(ioc, msg, pide);
364*4882a593Smuzhiyun 				return(1);
365*4882a593Smuzhiyun 			}
366*4882a593Smuzhiyun 			rcnt--;
367*4882a593Smuzhiyun 			rval >>= 1;	/* try the next bit */
368*4882a593Smuzhiyun 			pptr++;
369*4882a593Smuzhiyun 			pide++;
370*4882a593Smuzhiyun 		}
371*4882a593Smuzhiyun 		rptr++;	/* look at next word of res_map */
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 	/* It'd be nice if we always got here :^) */
374*4882a593Smuzhiyun 	return 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun /**
379*4882a593Smuzhiyun  * sba_dump_sg - debugging only - print Scatter-Gather list
380*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
381*4882a593Smuzhiyun  * @startsg: head of the SG list
382*4882a593Smuzhiyun  * @nents: number of entries in SG list
383*4882a593Smuzhiyun  *
384*4882a593Smuzhiyun  * print the SG list so we can verify it's correct by hand.
385*4882a593Smuzhiyun  */
386*4882a593Smuzhiyun static void
sba_dump_sg(struct ioc * ioc,struct scatterlist * startsg,int nents)387*4882a593Smuzhiyun sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun 	while (nents-- > 0) {
390*4882a593Smuzhiyun 		printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
391*4882a593Smuzhiyun 		       startsg->dma_address, startsg->dma_length,
392*4882a593Smuzhiyun 		       sba_sg_address(startsg));
393*4882a593Smuzhiyun 		startsg = sg_next(startsg);
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun static void
sba_check_sg(struct ioc * ioc,struct scatterlist * startsg,int nents)398*4882a593Smuzhiyun sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	struct scatterlist *the_sg = startsg;
401*4882a593Smuzhiyun 	int the_nents = nents;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	while (the_nents-- > 0) {
404*4882a593Smuzhiyun 		if (sba_sg_address(the_sg) == 0x0UL)
405*4882a593Smuzhiyun 			sba_dump_sg(NULL, startsg, nents);
406*4882a593Smuzhiyun 		the_sg = sg_next(the_sg);
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun #endif /* ASSERT_PDIR_SANITY */
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /**************************************************************
416*4882a593Smuzhiyun *
417*4882a593Smuzhiyun *   I/O Pdir Resource Management
418*4882a593Smuzhiyun *
419*4882a593Smuzhiyun *   Bits set in the resource map are in use.
420*4882a593Smuzhiyun *   Each bit can represent a number of pages.
421*4882a593Smuzhiyun *   LSbs represent lower addresses (IOVA's).
422*4882a593Smuzhiyun *
423*4882a593Smuzhiyun ***************************************************************/
424*4882a593Smuzhiyun #define PAGES_PER_RANGE 1	/* could increase this to 4 or 8 if needed */
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun /* Convert from IOVP to IOVA and vice versa. */
427*4882a593Smuzhiyun #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
428*4882a593Smuzhiyun #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun #define PDIR_ENTRY_SIZE	sizeof(u64)
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun #define PDIR_INDEX(iovp)   ((iovp)>>iovp_shift)
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun #define RESMAP_MASK(n)    ~(~0UL << (n))
435*4882a593Smuzhiyun #define RESMAP_IDX_MASK   (sizeof(unsigned long) - 1)
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun /**
439*4882a593Smuzhiyun  * For most cases the normal get_order is sufficient, however it limits us
440*4882a593Smuzhiyun  * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
441*4882a593Smuzhiyun  * It only incurs about 1 clock cycle to use this one with the static variable
442*4882a593Smuzhiyun  * and makes the code more intuitive.
443*4882a593Smuzhiyun  */
444*4882a593Smuzhiyun static SBA_INLINE int
get_iovp_order(unsigned long size)445*4882a593Smuzhiyun get_iovp_order (unsigned long size)
446*4882a593Smuzhiyun {
447*4882a593Smuzhiyun 	long double d = size - 1;
448*4882a593Smuzhiyun 	long order;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	order = ia64_getf_exp(d);
451*4882a593Smuzhiyun 	order = order - iovp_shift - 0xffff + 1;
452*4882a593Smuzhiyun 	if (order < 0)
453*4882a593Smuzhiyun 		order = 0;
454*4882a593Smuzhiyun 	return order;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun 
ptr_to_pide(struct ioc * ioc,unsigned long * res_ptr,unsigned int bitshiftcnt)457*4882a593Smuzhiyun static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
458*4882a593Smuzhiyun 				 unsigned int bitshiftcnt)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
461*4882a593Smuzhiyun 		+ bitshiftcnt;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun  * sba_search_bitmap - find free space in IO PDIR resource bitmap
466*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
467*4882a593Smuzhiyun  * @bits_wanted: number of entries we need.
468*4882a593Smuzhiyun  * @use_hint: use res_hint to indicate where to start looking
469*4882a593Smuzhiyun  *
470*4882a593Smuzhiyun  * Find consecutive free bits in resource bitmap.
471*4882a593Smuzhiyun  * Each bit represents one entry in the IO Pdir.
472*4882a593Smuzhiyun  * Cool perf optimization: search for log2(size) bits at a time.
473*4882a593Smuzhiyun  */
474*4882a593Smuzhiyun static SBA_INLINE unsigned long
sba_search_bitmap(struct ioc * ioc,struct device * dev,unsigned long bits_wanted,int use_hint)475*4882a593Smuzhiyun sba_search_bitmap(struct ioc *ioc, struct device *dev,
476*4882a593Smuzhiyun 		  unsigned long bits_wanted, int use_hint)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	unsigned long *res_ptr;
479*4882a593Smuzhiyun 	unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
480*4882a593Smuzhiyun 	unsigned long flags, pide = ~0UL, tpide;
481*4882a593Smuzhiyun 	unsigned long boundary_size;
482*4882a593Smuzhiyun 	unsigned long shift;
483*4882a593Smuzhiyun 	int ret;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
486*4882a593Smuzhiyun 	ASSERT(res_ptr < res_end);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	boundary_size = dma_get_seg_boundary_nr_pages(dev, iovp_shift);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	BUG_ON(ioc->ibase & ~iovp_mask);
491*4882a593Smuzhiyun 	shift = ioc->ibase >> iovp_shift;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/* Allow caller to force a search through the entire resource space */
496*4882a593Smuzhiyun 	if (likely(use_hint)) {
497*4882a593Smuzhiyun 		res_ptr = ioc->res_hint;
498*4882a593Smuzhiyun 	} else {
499*4882a593Smuzhiyun 		res_ptr = (ulong *)ioc->res_map;
500*4882a593Smuzhiyun 		ioc->res_bitshift = 0;
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/*
504*4882a593Smuzhiyun 	 * N.B.  REO/Grande defect AR2305 can cause TLB fetch timeouts
505*4882a593Smuzhiyun 	 * if a TLB entry is purged while in use.  sba_mark_invalid()
506*4882a593Smuzhiyun 	 * purges IOTLB entries in power-of-two sizes, so we also
507*4882a593Smuzhiyun 	 * allocate IOVA space in power-of-two sizes.
508*4882a593Smuzhiyun 	 */
509*4882a593Smuzhiyun 	bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (likely(bits_wanted == 1)) {
512*4882a593Smuzhiyun 		unsigned int bitshiftcnt;
513*4882a593Smuzhiyun 		for(; res_ptr < res_end ; res_ptr++) {
514*4882a593Smuzhiyun 			if (likely(*res_ptr != ~0UL)) {
515*4882a593Smuzhiyun 				bitshiftcnt = ffz(*res_ptr);
516*4882a593Smuzhiyun 				*res_ptr |= (1UL << bitshiftcnt);
517*4882a593Smuzhiyun 				pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
518*4882a593Smuzhiyun 				ioc->res_bitshift = bitshiftcnt + bits_wanted;
519*4882a593Smuzhiyun 				goto found_it;
520*4882a593Smuzhiyun 			}
521*4882a593Smuzhiyun 		}
522*4882a593Smuzhiyun 		goto not_found;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	if (likely(bits_wanted <= BITS_PER_LONG/2)) {
527*4882a593Smuzhiyun 		/*
528*4882a593Smuzhiyun 		** Search the resource bit map on well-aligned values.
529*4882a593Smuzhiyun 		** "o" is the alignment.
530*4882a593Smuzhiyun 		** We need the alignment to invalidate I/O TLB using
531*4882a593Smuzhiyun 		** SBA HW features in the unmap path.
532*4882a593Smuzhiyun 		*/
533*4882a593Smuzhiyun 		unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
534*4882a593Smuzhiyun 		uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
535*4882a593Smuzhiyun 		unsigned long mask, base_mask;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		base_mask = RESMAP_MASK(bits_wanted);
538*4882a593Smuzhiyun 		mask = base_mask << bitshiftcnt;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 		DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
541*4882a593Smuzhiyun 		for(; res_ptr < res_end ; res_ptr++)
542*4882a593Smuzhiyun 		{
543*4882a593Smuzhiyun 			DBG_RES("    %p %lx %lx\n", res_ptr, mask, *res_ptr);
544*4882a593Smuzhiyun 			ASSERT(0 != mask);
545*4882a593Smuzhiyun 			for (; mask ; mask <<= o, bitshiftcnt += o) {
546*4882a593Smuzhiyun 				tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
547*4882a593Smuzhiyun 				ret = iommu_is_span_boundary(tpide, bits_wanted,
548*4882a593Smuzhiyun 							     shift,
549*4882a593Smuzhiyun 							     boundary_size);
550*4882a593Smuzhiyun 				if ((0 == ((*res_ptr) & mask)) && !ret) {
551*4882a593Smuzhiyun 					*res_ptr |= mask;     /* mark resources busy! */
552*4882a593Smuzhiyun 					pide = tpide;
553*4882a593Smuzhiyun 					ioc->res_bitshift = bitshiftcnt + bits_wanted;
554*4882a593Smuzhiyun 					goto found_it;
555*4882a593Smuzhiyun 				}
556*4882a593Smuzhiyun 			}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 			bitshiftcnt = 0;
559*4882a593Smuzhiyun 			mask = base_mask;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	} else {
564*4882a593Smuzhiyun 		int qwords, bits, i;
565*4882a593Smuzhiyun 		unsigned long *end;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 		qwords = bits_wanted >> 6; /* /64 */
568*4882a593Smuzhiyun 		bits = bits_wanted - (qwords * BITS_PER_LONG);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 		end = res_end - qwords;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		for (; res_ptr < end; res_ptr++) {
573*4882a593Smuzhiyun 			tpide = ptr_to_pide(ioc, res_ptr, 0);
574*4882a593Smuzhiyun 			ret = iommu_is_span_boundary(tpide, bits_wanted,
575*4882a593Smuzhiyun 						     shift, boundary_size);
576*4882a593Smuzhiyun 			if (ret)
577*4882a593Smuzhiyun 				goto next_ptr;
578*4882a593Smuzhiyun 			for (i = 0 ; i < qwords ; i++) {
579*4882a593Smuzhiyun 				if (res_ptr[i] != 0)
580*4882a593Smuzhiyun 					goto next_ptr;
581*4882a593Smuzhiyun 			}
582*4882a593Smuzhiyun 			if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
583*4882a593Smuzhiyun 				continue;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 			/* Found it, mark it */
586*4882a593Smuzhiyun 			for (i = 0 ; i < qwords ; i++)
587*4882a593Smuzhiyun 				res_ptr[i] = ~0UL;
588*4882a593Smuzhiyun 			res_ptr[i] |= RESMAP_MASK(bits);
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 			pide = tpide;
591*4882a593Smuzhiyun 			res_ptr += qwords;
592*4882a593Smuzhiyun 			ioc->res_bitshift = bits;
593*4882a593Smuzhiyun 			goto found_it;
594*4882a593Smuzhiyun next_ptr:
595*4882a593Smuzhiyun 			;
596*4882a593Smuzhiyun 		}
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun not_found:
600*4882a593Smuzhiyun 	prefetch(ioc->res_map);
601*4882a593Smuzhiyun 	ioc->res_hint = (unsigned long *) ioc->res_map;
602*4882a593Smuzhiyun 	ioc->res_bitshift = 0;
603*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
604*4882a593Smuzhiyun 	return (pide);
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun found_it:
607*4882a593Smuzhiyun 	ioc->res_hint = res_ptr;
608*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
609*4882a593Smuzhiyun 	return (pide);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun /**
614*4882a593Smuzhiyun  * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
615*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
616*4882a593Smuzhiyun  * @size: number of bytes to create a mapping for
617*4882a593Smuzhiyun  *
618*4882a593Smuzhiyun  * Given a size, find consecutive unmarked and then mark those bits in the
619*4882a593Smuzhiyun  * resource bit map.
620*4882a593Smuzhiyun  */
621*4882a593Smuzhiyun static int
sba_alloc_range(struct ioc * ioc,struct device * dev,size_t size)622*4882a593Smuzhiyun sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
623*4882a593Smuzhiyun {
624*4882a593Smuzhiyun 	unsigned int pages_needed = size >> iovp_shift;
625*4882a593Smuzhiyun #ifdef PDIR_SEARCH_TIMING
626*4882a593Smuzhiyun 	unsigned long itc_start;
627*4882a593Smuzhiyun #endif
628*4882a593Smuzhiyun 	unsigned long pide;
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 	ASSERT(pages_needed);
631*4882a593Smuzhiyun 	ASSERT(0 == (size & ~iovp_mask));
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun #ifdef PDIR_SEARCH_TIMING
634*4882a593Smuzhiyun 	itc_start = ia64_get_itc();
635*4882a593Smuzhiyun #endif
636*4882a593Smuzhiyun 	/*
637*4882a593Smuzhiyun 	** "seek and ye shall find"...praying never hurts either...
638*4882a593Smuzhiyun 	*/
639*4882a593Smuzhiyun 	pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
640*4882a593Smuzhiyun 	if (unlikely(pide >= (ioc->res_size << 3))) {
641*4882a593Smuzhiyun 		pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
642*4882a593Smuzhiyun 		if (unlikely(pide >= (ioc->res_size << 3))) {
643*4882a593Smuzhiyun #if DELAYED_RESOURCE_CNT > 0
644*4882a593Smuzhiyun 			unsigned long flags;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 			/*
647*4882a593Smuzhiyun 			** With delayed resource freeing, we can give this one more shot.  We're
648*4882a593Smuzhiyun 			** getting close to being in trouble here, so do what we can to make this
649*4882a593Smuzhiyun 			** one count.
650*4882a593Smuzhiyun 			*/
651*4882a593Smuzhiyun 			spin_lock_irqsave(&ioc->saved_lock, flags);
652*4882a593Smuzhiyun 			if (ioc->saved_cnt > 0) {
653*4882a593Smuzhiyun 				struct sba_dma_pair *d;
654*4882a593Smuzhiyun 				int cnt = ioc->saved_cnt;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 				d = &(ioc->saved[ioc->saved_cnt - 1]);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 				spin_lock(&ioc->res_lock);
659*4882a593Smuzhiyun 				while (cnt--) {
660*4882a593Smuzhiyun 					sba_mark_invalid(ioc, d->iova, d->size);
661*4882a593Smuzhiyun 					sba_free_range(ioc, d->iova, d->size);
662*4882a593Smuzhiyun 					d--;
663*4882a593Smuzhiyun 				}
664*4882a593Smuzhiyun 				ioc->saved_cnt = 0;
665*4882a593Smuzhiyun 				READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
666*4882a593Smuzhiyun 				spin_unlock(&ioc->res_lock);
667*4882a593Smuzhiyun 			}
668*4882a593Smuzhiyun 			spin_unlock_irqrestore(&ioc->saved_lock, flags);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 			pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
671*4882a593Smuzhiyun 			if (unlikely(pide >= (ioc->res_size << 3))) {
672*4882a593Smuzhiyun 				printk(KERN_WARNING "%s: I/O MMU @ %p is"
673*4882a593Smuzhiyun 				       "out of mapping resources, %u %u %lx\n",
674*4882a593Smuzhiyun 				       __func__, ioc->ioc_hpa, ioc->res_size,
675*4882a593Smuzhiyun 				       pages_needed, dma_get_seg_boundary(dev));
676*4882a593Smuzhiyun 				return -1;
677*4882a593Smuzhiyun 			}
678*4882a593Smuzhiyun #else
679*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: I/O MMU @ %p is"
680*4882a593Smuzhiyun 			       "out of mapping resources, %u %u %lx\n",
681*4882a593Smuzhiyun 			       __func__, ioc->ioc_hpa, ioc->res_size,
682*4882a593Smuzhiyun 			       pages_needed, dma_get_seg_boundary(dev));
683*4882a593Smuzhiyun 			return -1;
684*4882a593Smuzhiyun #endif
685*4882a593Smuzhiyun 		}
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun #ifdef PDIR_SEARCH_TIMING
689*4882a593Smuzhiyun 	ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
690*4882a593Smuzhiyun 	ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
691*4882a593Smuzhiyun #endif
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	prefetchw(&(ioc->pdir_base[pide]));
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
696*4882a593Smuzhiyun 	/* verify the first enable bit is clear */
697*4882a593Smuzhiyun 	if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
698*4882a593Smuzhiyun 		sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
699*4882a593Smuzhiyun 	}
700*4882a593Smuzhiyun #endif
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
703*4882a593Smuzhiyun 		__func__, size, pages_needed, pide,
704*4882a593Smuzhiyun 		(uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
705*4882a593Smuzhiyun 		ioc->res_bitshift );
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	return (pide);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun /**
712*4882a593Smuzhiyun  * sba_free_range - unmark bits in IO PDIR resource bitmap
713*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
714*4882a593Smuzhiyun  * @iova: IO virtual address which was previously allocated.
715*4882a593Smuzhiyun  * @size: number of bytes to create a mapping for
716*4882a593Smuzhiyun  *
717*4882a593Smuzhiyun  * clear bits in the ioc's resource map
718*4882a593Smuzhiyun  */
719*4882a593Smuzhiyun static SBA_INLINE void
sba_free_range(struct ioc * ioc,dma_addr_t iova,size_t size)720*4882a593Smuzhiyun sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	unsigned long iovp = SBA_IOVP(ioc, iova);
723*4882a593Smuzhiyun 	unsigned int pide = PDIR_INDEX(iovp);
724*4882a593Smuzhiyun 	unsigned int ridx = pide >> 3;	/* convert bit to byte address */
725*4882a593Smuzhiyun 	unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
726*4882a593Smuzhiyun 	int bits_not_wanted = size >> iovp_shift;
727*4882a593Smuzhiyun 	unsigned long m;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/* Round up to power-of-two size: see AR2305 note above */
730*4882a593Smuzhiyun 	bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
731*4882a593Smuzhiyun 	for (; bits_not_wanted > 0 ; res_ptr++) {
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 		if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 			/* these mappings start 64bit aligned */
736*4882a593Smuzhiyun 			*res_ptr = 0UL;
737*4882a593Smuzhiyun 			bits_not_wanted -= BITS_PER_LONG;
738*4882a593Smuzhiyun 			pide += BITS_PER_LONG;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 		} else {
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 			/* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
743*4882a593Smuzhiyun 			m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
744*4882a593Smuzhiyun 			bits_not_wanted = 0;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 			DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
747*4882a593Smuzhiyun 			        bits_not_wanted, m, pide, res_ptr, *res_ptr);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 			ASSERT(m != 0);
750*4882a593Smuzhiyun 			ASSERT(bits_not_wanted);
751*4882a593Smuzhiyun 			ASSERT((*res_ptr & m) == m); /* verify same bits are set */
752*4882a593Smuzhiyun 			*res_ptr &= ~m;
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun /**************************************************************
759*4882a593Smuzhiyun *
760*4882a593Smuzhiyun *   "Dynamic DMA Mapping" support (aka "Coherent I/O")
761*4882a593Smuzhiyun *
762*4882a593Smuzhiyun ***************************************************************/
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun /**
765*4882a593Smuzhiyun  * sba_io_pdir_entry - fill in one IO PDIR entry
766*4882a593Smuzhiyun  * @pdir_ptr:  pointer to IO PDIR entry
767*4882a593Smuzhiyun  * @vba: Virtual CPU address of buffer to map
768*4882a593Smuzhiyun  *
769*4882a593Smuzhiyun  * SBA Mapping Routine
770*4882a593Smuzhiyun  *
771*4882a593Smuzhiyun  * Given a virtual address (vba, arg1) sba_io_pdir_entry()
772*4882a593Smuzhiyun  * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
773*4882a593Smuzhiyun  * Each IO Pdir entry consists of 8 bytes as shown below
774*4882a593Smuzhiyun  * (LSB == bit 0):
775*4882a593Smuzhiyun  *
776*4882a593Smuzhiyun  *  63                    40                                 11    7        0
777*4882a593Smuzhiyun  * +-+---------------------+----------------------------------+----+--------+
778*4882a593Smuzhiyun  * |V|        U            |            PPN[39:12]            | U  |   FF   |
779*4882a593Smuzhiyun  * +-+---------------------+----------------------------------+----+--------+
780*4882a593Smuzhiyun  *
781*4882a593Smuzhiyun  *  V  == Valid Bit
782*4882a593Smuzhiyun  *  U  == Unused
783*4882a593Smuzhiyun  * PPN == Physical Page Number
784*4882a593Smuzhiyun  *
785*4882a593Smuzhiyun  * The physical address fields are filled with the results of virt_to_phys()
786*4882a593Smuzhiyun  * on the vba.
787*4882a593Smuzhiyun  */
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun #if 1
790*4882a593Smuzhiyun #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL)	\
791*4882a593Smuzhiyun 						      | 0x8000000000000000ULL)
792*4882a593Smuzhiyun #else
793*4882a593Smuzhiyun void SBA_INLINE
sba_io_pdir_entry(u64 * pdir_ptr,unsigned long vba)794*4882a593Smuzhiyun sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	*pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun #endif
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun #ifdef ENABLE_MARK_CLEAN
801*4882a593Smuzhiyun /**
802*4882a593Smuzhiyun  * Since DMA is i-cache coherent, any (complete) pages that were written via
803*4882a593Smuzhiyun  * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
804*4882a593Smuzhiyun  * flush them when they get mapped into an executable vm-area.
805*4882a593Smuzhiyun  */
806*4882a593Smuzhiyun static void
mark_clean(void * addr,size_t size)807*4882a593Smuzhiyun mark_clean (void *addr, size_t size)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	unsigned long pg_addr, end;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	pg_addr = PAGE_ALIGN((unsigned long) addr);
812*4882a593Smuzhiyun 	end = (unsigned long) addr + size;
813*4882a593Smuzhiyun 	while (pg_addr + PAGE_SIZE <= end) {
814*4882a593Smuzhiyun 		struct page *page = virt_to_page((void *)pg_addr);
815*4882a593Smuzhiyun 		set_bit(PG_arch_1, &page->flags);
816*4882a593Smuzhiyun 		pg_addr += PAGE_SIZE;
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun #endif
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun /**
822*4882a593Smuzhiyun  * sba_mark_invalid - invalidate one or more IO PDIR entries
823*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
824*4882a593Smuzhiyun  * @iova:  IO Virtual Address mapped earlier
825*4882a593Smuzhiyun  * @byte_cnt:  number of bytes this mapping covers.
826*4882a593Smuzhiyun  *
827*4882a593Smuzhiyun  * Marking the IO PDIR entry(ies) as Invalid and invalidate
828*4882a593Smuzhiyun  * corresponding IO TLB entry. The PCOM (Purge Command Register)
829*4882a593Smuzhiyun  * is to purge stale entries in the IO TLB when unmapping entries.
830*4882a593Smuzhiyun  *
831*4882a593Smuzhiyun  * The PCOM register supports purging of multiple pages, with a minium
832*4882a593Smuzhiyun  * of 1 page and a maximum of 2GB. Hardware requires the address be
833*4882a593Smuzhiyun  * aligned to the size of the range being purged. The size of the range
834*4882a593Smuzhiyun  * must be a power of 2. The "Cool perf optimization" in the
835*4882a593Smuzhiyun  * allocation routine helps keep that true.
836*4882a593Smuzhiyun  */
837*4882a593Smuzhiyun static SBA_INLINE void
sba_mark_invalid(struct ioc * ioc,dma_addr_t iova,size_t byte_cnt)838*4882a593Smuzhiyun sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	u32 iovp = (u32) SBA_IOVP(ioc,iova);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	int off = PDIR_INDEX(iovp);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	/* Must be non-zero and rounded up */
845*4882a593Smuzhiyun 	ASSERT(byte_cnt > 0);
846*4882a593Smuzhiyun 	ASSERT(0 == (byte_cnt & ~iovp_mask));
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
849*4882a593Smuzhiyun 	/* Assert first pdir entry is set */
850*4882a593Smuzhiyun 	if (!(ioc->pdir_base[off] >> 60)) {
851*4882a593Smuzhiyun 		sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun #endif
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	if (byte_cnt <= iovp_size)
856*4882a593Smuzhiyun 	{
857*4882a593Smuzhiyun 		ASSERT(off < ioc->pdir_size);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 		iovp |= iovp_shift;     /* set "size" field for PCOM */
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun #ifndef FULL_VALID_PDIR
862*4882a593Smuzhiyun 		/*
863*4882a593Smuzhiyun 		** clear I/O PDIR entry "valid" bit
864*4882a593Smuzhiyun 		** Do NOT clear the rest - save it for debugging.
865*4882a593Smuzhiyun 		** We should only clear bits that have previously
866*4882a593Smuzhiyun 		** been enabled.
867*4882a593Smuzhiyun 		*/
868*4882a593Smuzhiyun 		ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
869*4882a593Smuzhiyun #else
870*4882a593Smuzhiyun 		/*
871*4882a593Smuzhiyun   		** If we want to maintain the PDIR as valid, put in
872*4882a593Smuzhiyun 		** the spill page so devices prefetching won't
873*4882a593Smuzhiyun 		** cause a hard fail.
874*4882a593Smuzhiyun 		*/
875*4882a593Smuzhiyun 		ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
876*4882a593Smuzhiyun #endif
877*4882a593Smuzhiyun 	} else {
878*4882a593Smuzhiyun 		u32 t = get_iovp_order(byte_cnt) + iovp_shift;
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 		iovp |= t;
881*4882a593Smuzhiyun 		ASSERT(t <= 31);   /* 2GB! Max value of "size" field */
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 		do {
884*4882a593Smuzhiyun 			/* verify this pdir entry is enabled */
885*4882a593Smuzhiyun 			ASSERT(ioc->pdir_base[off]  >> 63);
886*4882a593Smuzhiyun #ifndef FULL_VALID_PDIR
887*4882a593Smuzhiyun 			/* clear I/O Pdir entry "valid" bit first */
888*4882a593Smuzhiyun 			ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
889*4882a593Smuzhiyun #else
890*4882a593Smuzhiyun 			ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
891*4882a593Smuzhiyun #endif
892*4882a593Smuzhiyun 			off++;
893*4882a593Smuzhiyun 			byte_cnt -= iovp_size;
894*4882a593Smuzhiyun 		} while (byte_cnt > 0);
895*4882a593Smuzhiyun 	}
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun /**
901*4882a593Smuzhiyun  * sba_map_page - map one buffer and return IOVA for DMA
902*4882a593Smuzhiyun  * @dev: instance of PCI owned by the driver that's asking.
903*4882a593Smuzhiyun  * @page: page to map
904*4882a593Smuzhiyun  * @poff: offset into page
905*4882a593Smuzhiyun  * @size: number of bytes to map
906*4882a593Smuzhiyun  * @dir: dma direction
907*4882a593Smuzhiyun  * @attrs: optional dma attributes
908*4882a593Smuzhiyun  *
909*4882a593Smuzhiyun  * See Documentation/core-api/dma-api-howto.rst
910*4882a593Smuzhiyun  */
sba_map_page(struct device * dev,struct page * page,unsigned long poff,size_t size,enum dma_data_direction dir,unsigned long attrs)911*4882a593Smuzhiyun static dma_addr_t sba_map_page(struct device *dev, struct page *page,
912*4882a593Smuzhiyun 			       unsigned long poff, size_t size,
913*4882a593Smuzhiyun 			       enum dma_data_direction dir,
914*4882a593Smuzhiyun 			       unsigned long attrs)
915*4882a593Smuzhiyun {
916*4882a593Smuzhiyun 	struct ioc *ioc;
917*4882a593Smuzhiyun 	void *addr = page_address(page) + poff;
918*4882a593Smuzhiyun 	dma_addr_t iovp;
919*4882a593Smuzhiyun 	dma_addr_t offset;
920*4882a593Smuzhiyun 	u64 *pdir_start;
921*4882a593Smuzhiyun 	int pide;
922*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
923*4882a593Smuzhiyun 	unsigned long flags;
924*4882a593Smuzhiyun #endif
925*4882a593Smuzhiyun #ifdef ALLOW_IOV_BYPASS
926*4882a593Smuzhiyun 	unsigned long pci_addr = virt_to_phys(addr);
927*4882a593Smuzhiyun #endif
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun #ifdef ALLOW_IOV_BYPASS
930*4882a593Smuzhiyun 	ASSERT(to_pci_dev(dev)->dma_mask);
931*4882a593Smuzhiyun 	/*
932*4882a593Smuzhiyun  	** Check if the PCI device can DMA to ptr... if so, just return ptr
933*4882a593Smuzhiyun  	*/
934*4882a593Smuzhiyun 	if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
935*4882a593Smuzhiyun 		/*
936*4882a593Smuzhiyun  		** Device is bit capable of DMA'ing to the buffer...
937*4882a593Smuzhiyun 		** just return the PCI address of ptr
938*4882a593Smuzhiyun  		*/
939*4882a593Smuzhiyun 		DBG_BYPASS("sba_map_page() bypass mask/addr: "
940*4882a593Smuzhiyun 			   "0x%lx/0x%lx\n",
941*4882a593Smuzhiyun 		           to_pci_dev(dev)->dma_mask, pci_addr);
942*4882a593Smuzhiyun 		return pci_addr;
943*4882a593Smuzhiyun 	}
944*4882a593Smuzhiyun #endif
945*4882a593Smuzhiyun 	ioc = GET_IOC(dev);
946*4882a593Smuzhiyun 	ASSERT(ioc);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	prefetch(ioc->res_hint);
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	ASSERT(size > 0);
951*4882a593Smuzhiyun 	ASSERT(size <= DMA_CHUNK_SIZE);
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	/* save offset bits */
954*4882a593Smuzhiyun 	offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	/* round up to nearest iovp_size */
957*4882a593Smuzhiyun 	size = (size + offset + ~iovp_mask) & iovp_mask;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
960*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
961*4882a593Smuzhiyun 	if (sba_check_pdir(ioc,"Check before sba_map_page()"))
962*4882a593Smuzhiyun 		panic("Sanity check failed");
963*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
964*4882a593Smuzhiyun #endif
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	pide = sba_alloc_range(ioc, dev, size);
967*4882a593Smuzhiyun 	if (pide < 0)
968*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	iovp = (dma_addr_t) pide << iovp_shift;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	pdir_start = &(ioc->pdir_base[pide]);
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	while (size > 0) {
977*4882a593Smuzhiyun 		ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
978*4882a593Smuzhiyun 		sba_io_pdir_entry(pdir_start, (unsigned long) addr);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 		DBG_RUN("     pdir 0x%p %lx\n", pdir_start, *pdir_start);
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 		addr += iovp_size;
983*4882a593Smuzhiyun 		size -= iovp_size;
984*4882a593Smuzhiyun 		pdir_start++;
985*4882a593Smuzhiyun 	}
986*4882a593Smuzhiyun 	/* force pdir update */
987*4882a593Smuzhiyun 	wmb();
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	/* form complete address */
990*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
991*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
992*4882a593Smuzhiyun 	sba_check_pdir(ioc,"Check after sba_map_page()");
993*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
994*4882a593Smuzhiyun #endif
995*4882a593Smuzhiyun 	return SBA_IOVA(ioc, iovp, offset);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun #ifdef ENABLE_MARK_CLEAN
999*4882a593Smuzhiyun static SBA_INLINE void
sba_mark_clean(struct ioc * ioc,dma_addr_t iova,size_t size)1000*4882a593Smuzhiyun sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1001*4882a593Smuzhiyun {
1002*4882a593Smuzhiyun 	u32	iovp = (u32) SBA_IOVP(ioc,iova);
1003*4882a593Smuzhiyun 	int	off = PDIR_INDEX(iovp);
1004*4882a593Smuzhiyun 	void	*addr;
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	if (size <= iovp_size) {
1007*4882a593Smuzhiyun 		addr = phys_to_virt(ioc->pdir_base[off] &
1008*4882a593Smuzhiyun 		                    ~0xE000000000000FFFULL);
1009*4882a593Smuzhiyun 		mark_clean(addr, size);
1010*4882a593Smuzhiyun 	} else {
1011*4882a593Smuzhiyun 		do {
1012*4882a593Smuzhiyun 			addr = phys_to_virt(ioc->pdir_base[off] &
1013*4882a593Smuzhiyun 			                    ~0xE000000000000FFFULL);
1014*4882a593Smuzhiyun 			mark_clean(addr, min(size, iovp_size));
1015*4882a593Smuzhiyun 			off++;
1016*4882a593Smuzhiyun 			size -= iovp_size;
1017*4882a593Smuzhiyun 		} while (size > 0);
1018*4882a593Smuzhiyun 	}
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun #endif
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun /**
1023*4882a593Smuzhiyun  * sba_unmap_page - unmap one IOVA and free resources
1024*4882a593Smuzhiyun  * @dev: instance of PCI owned by the driver that's asking.
1025*4882a593Smuzhiyun  * @iova:  IOVA of driver buffer previously mapped.
1026*4882a593Smuzhiyun  * @size:  number of bytes mapped in driver buffer.
1027*4882a593Smuzhiyun  * @dir:  R/W or both.
1028*4882a593Smuzhiyun  * @attrs: optional dma attributes
1029*4882a593Smuzhiyun  *
1030*4882a593Smuzhiyun  * See Documentation/core-api/dma-api-howto.rst
1031*4882a593Smuzhiyun  */
sba_unmap_page(struct device * dev,dma_addr_t iova,size_t size,enum dma_data_direction dir,unsigned long attrs)1032*4882a593Smuzhiyun static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1033*4882a593Smuzhiyun 			   enum dma_data_direction dir, unsigned long attrs)
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun 	struct ioc *ioc;
1036*4882a593Smuzhiyun #if DELAYED_RESOURCE_CNT > 0
1037*4882a593Smuzhiyun 	struct sba_dma_pair *d;
1038*4882a593Smuzhiyun #endif
1039*4882a593Smuzhiyun 	unsigned long flags;
1040*4882a593Smuzhiyun 	dma_addr_t offset;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	ioc = GET_IOC(dev);
1043*4882a593Smuzhiyun 	ASSERT(ioc);
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun #ifdef ALLOW_IOV_BYPASS
1046*4882a593Smuzhiyun 	if (likely((iova & ioc->imask) != ioc->ibase)) {
1047*4882a593Smuzhiyun 		/*
1048*4882a593Smuzhiyun 		** Address does not fall w/in IOVA, must be bypassing
1049*4882a593Smuzhiyun 		*/
1050*4882a593Smuzhiyun 		DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
1051*4882a593Smuzhiyun 			   iova);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun #ifdef ENABLE_MARK_CLEAN
1054*4882a593Smuzhiyun 		if (dir == DMA_FROM_DEVICE) {
1055*4882a593Smuzhiyun 			mark_clean(phys_to_virt(iova), size);
1056*4882a593Smuzhiyun 		}
1057*4882a593Smuzhiyun #endif
1058*4882a593Smuzhiyun 		return;
1059*4882a593Smuzhiyun 	}
1060*4882a593Smuzhiyun #endif
1061*4882a593Smuzhiyun 	offset = iova & ~iovp_mask;
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	iova ^= offset;        /* clear offset bits */
1066*4882a593Smuzhiyun 	size += offset;
1067*4882a593Smuzhiyun 	size = ROUNDUP(size, iovp_size);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun #ifdef ENABLE_MARK_CLEAN
1070*4882a593Smuzhiyun 	if (dir == DMA_FROM_DEVICE)
1071*4882a593Smuzhiyun 		sba_mark_clean(ioc, iova, size);
1072*4882a593Smuzhiyun #endif
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun #if DELAYED_RESOURCE_CNT > 0
1075*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->saved_lock, flags);
1076*4882a593Smuzhiyun 	d = &(ioc->saved[ioc->saved_cnt]);
1077*4882a593Smuzhiyun 	d->iova = iova;
1078*4882a593Smuzhiyun 	d->size = size;
1079*4882a593Smuzhiyun 	if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1080*4882a593Smuzhiyun 		int cnt = ioc->saved_cnt;
1081*4882a593Smuzhiyun 		spin_lock(&ioc->res_lock);
1082*4882a593Smuzhiyun 		while (cnt--) {
1083*4882a593Smuzhiyun 			sba_mark_invalid(ioc, d->iova, d->size);
1084*4882a593Smuzhiyun 			sba_free_range(ioc, d->iova, d->size);
1085*4882a593Smuzhiyun 			d--;
1086*4882a593Smuzhiyun 		}
1087*4882a593Smuzhiyun 		ioc->saved_cnt = 0;
1088*4882a593Smuzhiyun 		READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
1089*4882a593Smuzhiyun 		spin_unlock(&ioc->res_lock);
1090*4882a593Smuzhiyun 	}
1091*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->saved_lock, flags);
1092*4882a593Smuzhiyun #else /* DELAYED_RESOURCE_CNT == 0 */
1093*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
1094*4882a593Smuzhiyun 	sba_mark_invalid(ioc, iova, size);
1095*4882a593Smuzhiyun 	sba_free_range(ioc, iova, size);
1096*4882a593Smuzhiyun 	READ_REG(ioc->ioc_hpa+IOC_PCOM);	/* flush purges */
1097*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1098*4882a593Smuzhiyun #endif /* DELAYED_RESOURCE_CNT == 0 */
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun /**
1102*4882a593Smuzhiyun  * sba_alloc_coherent - allocate/map shared mem for DMA
1103*4882a593Smuzhiyun  * @dev: instance of PCI owned by the driver that's asking.
1104*4882a593Smuzhiyun  * @size:  number of bytes mapped in driver buffer.
1105*4882a593Smuzhiyun  * @dma_handle:  IOVA of new buffer.
1106*4882a593Smuzhiyun  *
1107*4882a593Smuzhiyun  * See Documentation/core-api/dma-api-howto.rst
1108*4882a593Smuzhiyun  */
1109*4882a593Smuzhiyun static void *
sba_alloc_coherent(struct device * dev,size_t size,dma_addr_t * dma_handle,gfp_t flags,unsigned long attrs)1110*4882a593Smuzhiyun sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1111*4882a593Smuzhiyun 		   gfp_t flags, unsigned long attrs)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun 	struct page *page;
1114*4882a593Smuzhiyun 	struct ioc *ioc;
1115*4882a593Smuzhiyun 	int node = -1;
1116*4882a593Smuzhiyun 	void *addr;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	ioc = GET_IOC(dev);
1119*4882a593Smuzhiyun 	ASSERT(ioc);
1120*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1121*4882a593Smuzhiyun 	node = ioc->node;
1122*4882a593Smuzhiyun #endif
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	page = alloc_pages_node(node, flags, get_order(size));
1125*4882a593Smuzhiyun 	if (unlikely(!page))
1126*4882a593Smuzhiyun 		return NULL;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	addr = page_address(page);
1129*4882a593Smuzhiyun 	memset(addr, 0, size);
1130*4882a593Smuzhiyun 	*dma_handle = page_to_phys(page);
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun #ifdef ALLOW_IOV_BYPASS
1133*4882a593Smuzhiyun 	ASSERT(dev->coherent_dma_mask);
1134*4882a593Smuzhiyun 	/*
1135*4882a593Smuzhiyun  	** Check if the PCI device can DMA to ptr... if so, just return ptr
1136*4882a593Smuzhiyun  	*/
1137*4882a593Smuzhiyun 	if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1138*4882a593Smuzhiyun 		DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1139*4882a593Smuzhiyun 		           dev->coherent_dma_mask, *dma_handle);
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 		return addr;
1142*4882a593Smuzhiyun 	}
1143*4882a593Smuzhiyun #endif
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/*
1146*4882a593Smuzhiyun 	 * If device can't bypass or bypass is disabled, pass the 32bit fake
1147*4882a593Smuzhiyun 	 * device to map single to get an iova mapping.
1148*4882a593Smuzhiyun 	 */
1149*4882a593Smuzhiyun 	*dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
1150*4882a593Smuzhiyun 			DMA_BIDIRECTIONAL, 0);
1151*4882a593Smuzhiyun 	if (dma_mapping_error(dev, *dma_handle))
1152*4882a593Smuzhiyun 		return NULL;
1153*4882a593Smuzhiyun 	return addr;
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun /**
1158*4882a593Smuzhiyun  * sba_free_coherent - free/unmap shared mem for DMA
1159*4882a593Smuzhiyun  * @dev: instance of PCI owned by the driver that's asking.
1160*4882a593Smuzhiyun  * @size:  number of bytes mapped in driver buffer.
1161*4882a593Smuzhiyun  * @vaddr:  virtual address IOVA of "consistent" buffer.
1162*4882a593Smuzhiyun  * @dma_handler:  IO virtual address of "consistent" buffer.
1163*4882a593Smuzhiyun  *
1164*4882a593Smuzhiyun  * See Documentation/core-api/dma-api-howto.rst
1165*4882a593Smuzhiyun  */
sba_free_coherent(struct device * dev,size_t size,void * vaddr,dma_addr_t dma_handle,unsigned long attrs)1166*4882a593Smuzhiyun static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
1167*4882a593Smuzhiyun 			      dma_addr_t dma_handle, unsigned long attrs)
1168*4882a593Smuzhiyun {
1169*4882a593Smuzhiyun 	sba_unmap_page(dev, dma_handle, size, 0, 0);
1170*4882a593Smuzhiyun 	free_pages((unsigned long) vaddr, get_order(size));
1171*4882a593Smuzhiyun }
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun /*
1175*4882a593Smuzhiyun ** Since 0 is a valid pdir_base index value, can't use that
1176*4882a593Smuzhiyun ** to determine if a value is valid or not. Use a flag to indicate
1177*4882a593Smuzhiyun ** the SG list entry contains a valid pdir index.
1178*4882a593Smuzhiyun */
1179*4882a593Smuzhiyun #define PIDE_FLAG 0x1UL
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun #ifdef DEBUG_LARGE_SG_ENTRIES
1182*4882a593Smuzhiyun int dump_run_sg = 0;
1183*4882a593Smuzhiyun #endif
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun /**
1187*4882a593Smuzhiyun  * sba_fill_pdir - write allocated SG entries into IO PDIR
1188*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
1189*4882a593Smuzhiyun  * @startsg:  list of IOVA/size pairs
1190*4882a593Smuzhiyun  * @nents: number of entries in startsg list
1191*4882a593Smuzhiyun  *
1192*4882a593Smuzhiyun  * Take preprocessed SG list and write corresponding entries
1193*4882a593Smuzhiyun  * in the IO PDIR.
1194*4882a593Smuzhiyun  */
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun static SBA_INLINE int
sba_fill_pdir(struct ioc * ioc,struct scatterlist * startsg,int nents)1197*4882a593Smuzhiyun sba_fill_pdir(
1198*4882a593Smuzhiyun 	struct ioc *ioc,
1199*4882a593Smuzhiyun 	struct scatterlist *startsg,
1200*4882a593Smuzhiyun 	int nents)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun 	struct scatterlist *dma_sg = startsg;	/* pointer to current DMA */
1203*4882a593Smuzhiyun 	int n_mappings = 0;
1204*4882a593Smuzhiyun 	u64 *pdirp = NULL;
1205*4882a593Smuzhiyun 	unsigned long dma_offset = 0;
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	while (nents-- > 0) {
1208*4882a593Smuzhiyun 		int     cnt = startsg->dma_length;
1209*4882a593Smuzhiyun 		startsg->dma_length = 0;
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun #ifdef DEBUG_LARGE_SG_ENTRIES
1212*4882a593Smuzhiyun 		if (dump_run_sg)
1213*4882a593Smuzhiyun 			printk(" %2d : %08lx/%05x %p\n",
1214*4882a593Smuzhiyun 				nents, startsg->dma_address, cnt,
1215*4882a593Smuzhiyun 				sba_sg_address(startsg));
1216*4882a593Smuzhiyun #else
1217*4882a593Smuzhiyun 		DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1218*4882a593Smuzhiyun 				nents, startsg->dma_address, cnt,
1219*4882a593Smuzhiyun 				sba_sg_address(startsg));
1220*4882a593Smuzhiyun #endif
1221*4882a593Smuzhiyun 		/*
1222*4882a593Smuzhiyun 		** Look for the start of a new DMA stream
1223*4882a593Smuzhiyun 		*/
1224*4882a593Smuzhiyun 		if (startsg->dma_address & PIDE_FLAG) {
1225*4882a593Smuzhiyun 			u32 pide = startsg->dma_address & ~PIDE_FLAG;
1226*4882a593Smuzhiyun 			dma_offset = (unsigned long) pide & ~iovp_mask;
1227*4882a593Smuzhiyun 			startsg->dma_address = 0;
1228*4882a593Smuzhiyun 			if (n_mappings)
1229*4882a593Smuzhiyun 				dma_sg = sg_next(dma_sg);
1230*4882a593Smuzhiyun 			dma_sg->dma_address = pide | ioc->ibase;
1231*4882a593Smuzhiyun 			pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1232*4882a593Smuzhiyun 			n_mappings++;
1233*4882a593Smuzhiyun 		}
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 		/*
1236*4882a593Smuzhiyun 		** Look for a VCONTIG chunk
1237*4882a593Smuzhiyun 		*/
1238*4882a593Smuzhiyun 		if (cnt) {
1239*4882a593Smuzhiyun 			unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1240*4882a593Smuzhiyun 			ASSERT(pdirp);
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 			/* Since multiple Vcontig blocks could make up
1243*4882a593Smuzhiyun 			** one DMA stream, *add* cnt to dma_len.
1244*4882a593Smuzhiyun 			*/
1245*4882a593Smuzhiyun 			dma_sg->dma_length += cnt;
1246*4882a593Smuzhiyun 			cnt += dma_offset;
1247*4882a593Smuzhiyun 			dma_offset=0;	/* only want offset on first chunk */
1248*4882a593Smuzhiyun 			cnt = ROUNDUP(cnt, iovp_size);
1249*4882a593Smuzhiyun 			do {
1250*4882a593Smuzhiyun 				sba_io_pdir_entry(pdirp, vaddr);
1251*4882a593Smuzhiyun 				vaddr += iovp_size;
1252*4882a593Smuzhiyun 				cnt -= iovp_size;
1253*4882a593Smuzhiyun 				pdirp++;
1254*4882a593Smuzhiyun 			} while (cnt > 0);
1255*4882a593Smuzhiyun 		}
1256*4882a593Smuzhiyun 		startsg = sg_next(startsg);
1257*4882a593Smuzhiyun 	}
1258*4882a593Smuzhiyun 	/* force pdir update */
1259*4882a593Smuzhiyun 	wmb();
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun #ifdef DEBUG_LARGE_SG_ENTRIES
1262*4882a593Smuzhiyun 	dump_run_sg = 0;
1263*4882a593Smuzhiyun #endif
1264*4882a593Smuzhiyun 	return(n_mappings);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun /*
1269*4882a593Smuzhiyun ** Two address ranges are DMA contiguous *iff* "end of prev" and
1270*4882a593Smuzhiyun ** "start of next" are both on an IOV page boundary.
1271*4882a593Smuzhiyun **
1272*4882a593Smuzhiyun ** (shift left is a quick trick to mask off upper bits)
1273*4882a593Smuzhiyun */
1274*4882a593Smuzhiyun #define DMA_CONTIG(__X, __Y) \
1275*4882a593Smuzhiyun 	(((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun /**
1279*4882a593Smuzhiyun  * sba_coalesce_chunks - preprocess the SG list
1280*4882a593Smuzhiyun  * @ioc: IO MMU structure which owns the pdir we are interested in.
1281*4882a593Smuzhiyun  * @startsg:  list of IOVA/size pairs
1282*4882a593Smuzhiyun  * @nents: number of entries in startsg list
1283*4882a593Smuzhiyun  *
1284*4882a593Smuzhiyun  * First pass is to walk the SG list and determine where the breaks are
1285*4882a593Smuzhiyun  * in the DMA stream. Allocates PDIR entries but does not fill them.
1286*4882a593Smuzhiyun  * Returns the number of DMA chunks.
1287*4882a593Smuzhiyun  *
1288*4882a593Smuzhiyun  * Doing the fill separate from the coalescing/allocation keeps the
1289*4882a593Smuzhiyun  * code simpler. Future enhancement could make one pass through
1290*4882a593Smuzhiyun  * the sglist do both.
1291*4882a593Smuzhiyun  */
1292*4882a593Smuzhiyun static SBA_INLINE int
sba_coalesce_chunks(struct ioc * ioc,struct device * dev,struct scatterlist * startsg,int nents)1293*4882a593Smuzhiyun sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1294*4882a593Smuzhiyun 	struct scatterlist *startsg,
1295*4882a593Smuzhiyun 	int nents)
1296*4882a593Smuzhiyun {
1297*4882a593Smuzhiyun 	struct scatterlist *vcontig_sg;    /* VCONTIG chunk head */
1298*4882a593Smuzhiyun 	unsigned long vcontig_len;         /* len of VCONTIG chunk */
1299*4882a593Smuzhiyun 	unsigned long vcontig_end;
1300*4882a593Smuzhiyun 	struct scatterlist *dma_sg;        /* next DMA stream head */
1301*4882a593Smuzhiyun 	unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1302*4882a593Smuzhiyun 	int n_mappings = 0;
1303*4882a593Smuzhiyun 	unsigned int max_seg_size = dma_get_max_seg_size(dev);
1304*4882a593Smuzhiyun 	int idx;
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	while (nents > 0) {
1307*4882a593Smuzhiyun 		unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun 		/*
1310*4882a593Smuzhiyun 		** Prepare for first/next DMA stream
1311*4882a593Smuzhiyun 		*/
1312*4882a593Smuzhiyun 		dma_sg = vcontig_sg = startsg;
1313*4882a593Smuzhiyun 		dma_len = vcontig_len = vcontig_end = startsg->length;
1314*4882a593Smuzhiyun 		vcontig_end +=  vaddr;
1315*4882a593Smuzhiyun 		dma_offset = vaddr & ~iovp_mask;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 		/* PARANOID: clear entries */
1318*4882a593Smuzhiyun 		startsg->dma_address = startsg->dma_length = 0;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 		/*
1321*4882a593Smuzhiyun 		** This loop terminates one iteration "early" since
1322*4882a593Smuzhiyun 		** it's always looking one "ahead".
1323*4882a593Smuzhiyun 		*/
1324*4882a593Smuzhiyun 		while (--nents > 0) {
1325*4882a593Smuzhiyun 			unsigned long vaddr;	/* tmp */
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 			startsg = sg_next(startsg);
1328*4882a593Smuzhiyun 
1329*4882a593Smuzhiyun 			/* PARANOID */
1330*4882a593Smuzhiyun 			startsg->dma_address = startsg->dma_length = 0;
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 			/* catch brokenness in SCSI layer */
1333*4882a593Smuzhiyun 			ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 			/*
1336*4882a593Smuzhiyun 			** First make sure current dma stream won't
1337*4882a593Smuzhiyun 			** exceed DMA_CHUNK_SIZE if we coalesce the
1338*4882a593Smuzhiyun 			** next entry.
1339*4882a593Smuzhiyun 			*/
1340*4882a593Smuzhiyun 			if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1341*4882a593Smuzhiyun 			    > DMA_CHUNK_SIZE)
1342*4882a593Smuzhiyun 				break;
1343*4882a593Smuzhiyun 
1344*4882a593Smuzhiyun 			if (dma_len + startsg->length > max_seg_size)
1345*4882a593Smuzhiyun 				break;
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 			/*
1348*4882a593Smuzhiyun 			** Then look for virtually contiguous blocks.
1349*4882a593Smuzhiyun 			**
1350*4882a593Smuzhiyun 			** append the next transaction?
1351*4882a593Smuzhiyun 			*/
1352*4882a593Smuzhiyun 			vaddr = (unsigned long) sba_sg_address(startsg);
1353*4882a593Smuzhiyun 			if  (vcontig_end == vaddr)
1354*4882a593Smuzhiyun 			{
1355*4882a593Smuzhiyun 				vcontig_len += startsg->length;
1356*4882a593Smuzhiyun 				vcontig_end += startsg->length;
1357*4882a593Smuzhiyun 				dma_len     += startsg->length;
1358*4882a593Smuzhiyun 				continue;
1359*4882a593Smuzhiyun 			}
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun #ifdef DEBUG_LARGE_SG_ENTRIES
1362*4882a593Smuzhiyun 			dump_run_sg = (vcontig_len > iovp_size);
1363*4882a593Smuzhiyun #endif
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 			/*
1366*4882a593Smuzhiyun 			** Not virtually contiguous.
1367*4882a593Smuzhiyun 			** Terminate prev chunk.
1368*4882a593Smuzhiyun 			** Start a new chunk.
1369*4882a593Smuzhiyun 			**
1370*4882a593Smuzhiyun 			** Once we start a new VCONTIG chunk, dma_offset
1371*4882a593Smuzhiyun 			** can't change. And we need the offset from the first
1372*4882a593Smuzhiyun 			** chunk - not the last one. Ergo Successive chunks
1373*4882a593Smuzhiyun 			** must start on page boundaries and dove tail
1374*4882a593Smuzhiyun 			** with it's predecessor.
1375*4882a593Smuzhiyun 			*/
1376*4882a593Smuzhiyun 			vcontig_sg->dma_length = vcontig_len;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 			vcontig_sg = startsg;
1379*4882a593Smuzhiyun 			vcontig_len = startsg->length;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 			/*
1382*4882a593Smuzhiyun 			** 3) do the entries end/start on page boundaries?
1383*4882a593Smuzhiyun 			**    Don't update vcontig_end until we've checked.
1384*4882a593Smuzhiyun 			*/
1385*4882a593Smuzhiyun 			if (DMA_CONTIG(vcontig_end, vaddr))
1386*4882a593Smuzhiyun 			{
1387*4882a593Smuzhiyun 				vcontig_end = vcontig_len + vaddr;
1388*4882a593Smuzhiyun 				dma_len += vcontig_len;
1389*4882a593Smuzhiyun 				continue;
1390*4882a593Smuzhiyun 			} else {
1391*4882a593Smuzhiyun 				break;
1392*4882a593Smuzhiyun 			}
1393*4882a593Smuzhiyun 		}
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 		/*
1396*4882a593Smuzhiyun 		** End of DMA Stream
1397*4882a593Smuzhiyun 		** Terminate last VCONTIG block.
1398*4882a593Smuzhiyun 		** Allocate space for DMA stream.
1399*4882a593Smuzhiyun 		*/
1400*4882a593Smuzhiyun 		vcontig_sg->dma_length = vcontig_len;
1401*4882a593Smuzhiyun 		dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1402*4882a593Smuzhiyun 		ASSERT(dma_len <= DMA_CHUNK_SIZE);
1403*4882a593Smuzhiyun 		idx = sba_alloc_range(ioc, dev, dma_len);
1404*4882a593Smuzhiyun 		if (idx < 0) {
1405*4882a593Smuzhiyun 			dma_sg->dma_length = 0;
1406*4882a593Smuzhiyun 			return -1;
1407*4882a593Smuzhiyun 		}
1408*4882a593Smuzhiyun 		dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1409*4882a593Smuzhiyun 						   | dma_offset);
1410*4882a593Smuzhiyun 		n_mappings++;
1411*4882a593Smuzhiyun 	}
1412*4882a593Smuzhiyun 
1413*4882a593Smuzhiyun 	return n_mappings;
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1417*4882a593Smuzhiyun 			       int nents, enum dma_data_direction dir,
1418*4882a593Smuzhiyun 			       unsigned long attrs);
1419*4882a593Smuzhiyun /**
1420*4882a593Smuzhiyun  * sba_map_sg - map Scatter/Gather list
1421*4882a593Smuzhiyun  * @dev: instance of PCI owned by the driver that's asking.
1422*4882a593Smuzhiyun  * @sglist:  array of buffer/length pairs
1423*4882a593Smuzhiyun  * @nents:  number of entries in list
1424*4882a593Smuzhiyun  * @dir:  R/W or both.
1425*4882a593Smuzhiyun  * @attrs: optional dma attributes
1426*4882a593Smuzhiyun  *
1427*4882a593Smuzhiyun  * See Documentation/core-api/dma-api-howto.rst
1428*4882a593Smuzhiyun  */
sba_map_sg_attrs(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction dir,unsigned long attrs)1429*4882a593Smuzhiyun static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1430*4882a593Smuzhiyun 			    int nents, enum dma_data_direction dir,
1431*4882a593Smuzhiyun 			    unsigned long attrs)
1432*4882a593Smuzhiyun {
1433*4882a593Smuzhiyun 	struct ioc *ioc;
1434*4882a593Smuzhiyun 	int coalesced, filled = 0;
1435*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1436*4882a593Smuzhiyun 	unsigned long flags;
1437*4882a593Smuzhiyun #endif
1438*4882a593Smuzhiyun #ifdef ALLOW_IOV_BYPASS_SG
1439*4882a593Smuzhiyun 	struct scatterlist *sg;
1440*4882a593Smuzhiyun #endif
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1443*4882a593Smuzhiyun 	ioc = GET_IOC(dev);
1444*4882a593Smuzhiyun 	ASSERT(ioc);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun #ifdef ALLOW_IOV_BYPASS_SG
1447*4882a593Smuzhiyun 	ASSERT(to_pci_dev(dev)->dma_mask);
1448*4882a593Smuzhiyun 	if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1449*4882a593Smuzhiyun 		for_each_sg(sglist, sg, nents, filled) {
1450*4882a593Smuzhiyun 			sg->dma_length = sg->length;
1451*4882a593Smuzhiyun 			sg->dma_address = virt_to_phys(sba_sg_address(sg));
1452*4882a593Smuzhiyun 		}
1453*4882a593Smuzhiyun 		return filled;
1454*4882a593Smuzhiyun 	}
1455*4882a593Smuzhiyun #endif
1456*4882a593Smuzhiyun 	/* Fast path single entry scatterlists. */
1457*4882a593Smuzhiyun 	if (nents == 1) {
1458*4882a593Smuzhiyun 		sglist->dma_length = sglist->length;
1459*4882a593Smuzhiyun 		sglist->dma_address = sba_map_page(dev, sg_page(sglist),
1460*4882a593Smuzhiyun 				sglist->offset, sglist->length, dir, attrs);
1461*4882a593Smuzhiyun 		if (dma_mapping_error(dev, sglist->dma_address))
1462*4882a593Smuzhiyun 			return 0;
1463*4882a593Smuzhiyun 		return 1;
1464*4882a593Smuzhiyun 	}
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1467*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
1468*4882a593Smuzhiyun 	if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1469*4882a593Smuzhiyun 	{
1470*4882a593Smuzhiyun 		sba_dump_sg(ioc, sglist, nents);
1471*4882a593Smuzhiyun 		panic("Check before sba_map_sg_attrs()");
1472*4882a593Smuzhiyun 	}
1473*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1474*4882a593Smuzhiyun #endif
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	prefetch(ioc->res_hint);
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	/*
1479*4882a593Smuzhiyun 	** First coalesce the chunks and allocate I/O pdir space
1480*4882a593Smuzhiyun 	**
1481*4882a593Smuzhiyun 	** If this is one DMA stream, we can properly map using the
1482*4882a593Smuzhiyun 	** correct virtual address associated with each DMA page.
1483*4882a593Smuzhiyun 	** w/o this association, we wouldn't have coherent DMA!
1484*4882a593Smuzhiyun 	** Access to the virtual address is what forces a two pass algorithm.
1485*4882a593Smuzhiyun 	*/
1486*4882a593Smuzhiyun 	coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1487*4882a593Smuzhiyun 	if (coalesced < 0) {
1488*4882a593Smuzhiyun 		sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1489*4882a593Smuzhiyun 		return 0;
1490*4882a593Smuzhiyun 	}
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	/*
1493*4882a593Smuzhiyun 	** Program the I/O Pdir
1494*4882a593Smuzhiyun 	**
1495*4882a593Smuzhiyun 	** map the virtual addresses to the I/O Pdir
1496*4882a593Smuzhiyun 	** o dma_address will contain the pdir index
1497*4882a593Smuzhiyun 	** o dma_len will contain the number of bytes to map
1498*4882a593Smuzhiyun 	** o address contains the virtual address.
1499*4882a593Smuzhiyun 	*/
1500*4882a593Smuzhiyun 	filled = sba_fill_pdir(ioc, sglist, nents);
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1503*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
1504*4882a593Smuzhiyun 	if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1505*4882a593Smuzhiyun 	{
1506*4882a593Smuzhiyun 		sba_dump_sg(ioc, sglist, nents);
1507*4882a593Smuzhiyun 		panic("Check after sba_map_sg_attrs()\n");
1508*4882a593Smuzhiyun 	}
1509*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1510*4882a593Smuzhiyun #endif
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	ASSERT(coalesced == filled);
1513*4882a593Smuzhiyun 	DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 	return filled;
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun 
1518*4882a593Smuzhiyun /**
1519*4882a593Smuzhiyun  * sba_unmap_sg_attrs - unmap Scatter/Gather list
1520*4882a593Smuzhiyun  * @dev: instance of PCI owned by the driver that's asking.
1521*4882a593Smuzhiyun  * @sglist:  array of buffer/length pairs
1522*4882a593Smuzhiyun  * @nents:  number of entries in list
1523*4882a593Smuzhiyun  * @dir:  R/W or both.
1524*4882a593Smuzhiyun  * @attrs: optional dma attributes
1525*4882a593Smuzhiyun  *
1526*4882a593Smuzhiyun  * See Documentation/core-api/dma-api-howto.rst
1527*4882a593Smuzhiyun  */
sba_unmap_sg_attrs(struct device * dev,struct scatterlist * sglist,int nents,enum dma_data_direction dir,unsigned long attrs)1528*4882a593Smuzhiyun static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1529*4882a593Smuzhiyun 			       int nents, enum dma_data_direction dir,
1530*4882a593Smuzhiyun 			       unsigned long attrs)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1533*4882a593Smuzhiyun 	struct ioc *ioc;
1534*4882a593Smuzhiyun 	unsigned long flags;
1535*4882a593Smuzhiyun #endif
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	DBG_RUN_SG("%s() START %d entries,  %p,%x\n",
1538*4882a593Smuzhiyun 		   __func__, nents, sba_sg_address(sglist), sglist->length);
1539*4882a593Smuzhiyun 
1540*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1541*4882a593Smuzhiyun 	ioc = GET_IOC(dev);
1542*4882a593Smuzhiyun 	ASSERT(ioc);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
1545*4882a593Smuzhiyun 	sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1546*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1547*4882a593Smuzhiyun #endif
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	while (nents && sglist->dma_length) {
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 		sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
1552*4882a593Smuzhiyun 			       dir, attrs);
1553*4882a593Smuzhiyun 		sglist = sg_next(sglist);
1554*4882a593Smuzhiyun 		nents--;
1555*4882a593Smuzhiyun 	}
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	DBG_RUN_SG("%s() DONE (nents %d)\n", __func__,  nents);
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1560*4882a593Smuzhiyun 	spin_lock_irqsave(&ioc->res_lock, flags);
1561*4882a593Smuzhiyun 	sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1562*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ioc->res_lock, flags);
1563*4882a593Smuzhiyun #endif
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun /**************************************************************
1568*4882a593Smuzhiyun *
1569*4882a593Smuzhiyun *   Initialization and claim
1570*4882a593Smuzhiyun *
1571*4882a593Smuzhiyun ***************************************************************/
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun static void
ioc_iova_init(struct ioc * ioc)1574*4882a593Smuzhiyun ioc_iova_init(struct ioc *ioc)
1575*4882a593Smuzhiyun {
1576*4882a593Smuzhiyun 	int tcnfg;
1577*4882a593Smuzhiyun 	int agp_found = 0;
1578*4882a593Smuzhiyun 	struct pci_dev *device = NULL;
1579*4882a593Smuzhiyun #ifdef FULL_VALID_PDIR
1580*4882a593Smuzhiyun 	unsigned long index;
1581*4882a593Smuzhiyun #endif
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	/*
1584*4882a593Smuzhiyun 	** Firmware programs the base and size of a "safe IOVA space"
1585*4882a593Smuzhiyun 	** (one that doesn't overlap memory or LMMIO space) in the
1586*4882a593Smuzhiyun 	** IBASE and IMASK registers.
1587*4882a593Smuzhiyun 	*/
1588*4882a593Smuzhiyun 	ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1589*4882a593Smuzhiyun 	ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	ioc->iov_size = ~ioc->imask + 1;
1592*4882a593Smuzhiyun 
1593*4882a593Smuzhiyun 	DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1594*4882a593Smuzhiyun 		__func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1595*4882a593Smuzhiyun 		ioc->iov_size >> 20);
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	switch (iovp_size) {
1598*4882a593Smuzhiyun 		case  4*1024: tcnfg = 0; break;
1599*4882a593Smuzhiyun 		case  8*1024: tcnfg = 1; break;
1600*4882a593Smuzhiyun 		case 16*1024: tcnfg = 2; break;
1601*4882a593Smuzhiyun 		case 64*1024: tcnfg = 3; break;
1602*4882a593Smuzhiyun 		default:
1603*4882a593Smuzhiyun 			panic(PFX "Unsupported IOTLB page size %ldK",
1604*4882a593Smuzhiyun 				iovp_size >> 10);
1605*4882a593Smuzhiyun 			break;
1606*4882a593Smuzhiyun 	}
1607*4882a593Smuzhiyun 	WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1610*4882a593Smuzhiyun 	ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1611*4882a593Smuzhiyun 						   get_order(ioc->pdir_size));
1612*4882a593Smuzhiyun 	if (!ioc->pdir_base)
1613*4882a593Smuzhiyun 		panic(PFX "Couldn't allocate I/O Page Table\n");
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 	memset(ioc->pdir_base, 0, ioc->pdir_size);
1616*4882a593Smuzhiyun 
1617*4882a593Smuzhiyun 	DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1618*4882a593Smuzhiyun 		iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1621*4882a593Smuzhiyun 	WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	/*
1624*4882a593Smuzhiyun 	** If an AGP device is present, only use half of the IOV space
1625*4882a593Smuzhiyun 	** for PCI DMA.  Unfortunately we can't know ahead of time
1626*4882a593Smuzhiyun 	** whether GART support will actually be used, for now we
1627*4882a593Smuzhiyun 	** can just key on an AGP device found in the system.
1628*4882a593Smuzhiyun 	** We program the next pdir index after we stop w/ a key for
1629*4882a593Smuzhiyun 	** the GART code to handshake on.
1630*4882a593Smuzhiyun 	*/
1631*4882a593Smuzhiyun 	for_each_pci_dev(device)
1632*4882a593Smuzhiyun 		agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	if (agp_found && reserve_sba_gart) {
1635*4882a593Smuzhiyun 		printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1636*4882a593Smuzhiyun 		      ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1637*4882a593Smuzhiyun 		ioc->pdir_size /= 2;
1638*4882a593Smuzhiyun 		((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1639*4882a593Smuzhiyun 	}
1640*4882a593Smuzhiyun #ifdef FULL_VALID_PDIR
1641*4882a593Smuzhiyun 	/*
1642*4882a593Smuzhiyun   	** Check to see if the spill page has been allocated, we don't need more than
1643*4882a593Smuzhiyun 	** one across multiple SBAs.
1644*4882a593Smuzhiyun 	*/
1645*4882a593Smuzhiyun 	if (!prefetch_spill_page) {
1646*4882a593Smuzhiyun 		char *spill_poison = "SBAIOMMU POISON";
1647*4882a593Smuzhiyun 		int poison_size = 16;
1648*4882a593Smuzhiyun 		void *poison_addr, *addr;
1649*4882a593Smuzhiyun 
1650*4882a593Smuzhiyun 		addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1651*4882a593Smuzhiyun 		if (!addr)
1652*4882a593Smuzhiyun 			panic(PFX "Couldn't allocate PDIR spill page\n");
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 		poison_addr = addr;
1655*4882a593Smuzhiyun 		for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1656*4882a593Smuzhiyun 			memcpy(poison_addr, spill_poison, poison_size);
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 		prefetch_spill_page = virt_to_phys(addr);
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 		DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1661*4882a593Smuzhiyun 	}
1662*4882a593Smuzhiyun 	/*
1663*4882a593Smuzhiyun   	** Set all the PDIR entries valid w/ the spill page as the target
1664*4882a593Smuzhiyun 	*/
1665*4882a593Smuzhiyun 	for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1666*4882a593Smuzhiyun 		((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1667*4882a593Smuzhiyun #endif
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	/* Clear I/O TLB of any possible entries */
1670*4882a593Smuzhiyun 	WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1671*4882a593Smuzhiyun 	READ_REG(ioc->ioc_hpa + IOC_PCOM);
1672*4882a593Smuzhiyun 
1673*4882a593Smuzhiyun 	/* Enable IOVA translation */
1674*4882a593Smuzhiyun 	WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1675*4882a593Smuzhiyun 	READ_REG(ioc->ioc_hpa + IOC_IBASE);
1676*4882a593Smuzhiyun }
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun static void __init
ioc_resource_init(struct ioc * ioc)1679*4882a593Smuzhiyun ioc_resource_init(struct ioc *ioc)
1680*4882a593Smuzhiyun {
1681*4882a593Smuzhiyun 	spin_lock_init(&ioc->res_lock);
1682*4882a593Smuzhiyun #if DELAYED_RESOURCE_CNT > 0
1683*4882a593Smuzhiyun 	spin_lock_init(&ioc->saved_lock);
1684*4882a593Smuzhiyun #endif
1685*4882a593Smuzhiyun 
1686*4882a593Smuzhiyun 	/* resource map size dictated by pdir_size */
1687*4882a593Smuzhiyun 	ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1688*4882a593Smuzhiyun 	ioc->res_size >>= 3;  /* convert bit count to byte count */
1689*4882a593Smuzhiyun 	DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1692*4882a593Smuzhiyun 						 get_order(ioc->res_size));
1693*4882a593Smuzhiyun 	if (!ioc->res_map)
1694*4882a593Smuzhiyun 		panic(PFX "Couldn't allocate resource map\n");
1695*4882a593Smuzhiyun 
1696*4882a593Smuzhiyun 	memset(ioc->res_map, 0, ioc->res_size);
1697*4882a593Smuzhiyun 	/* next available IOVP - circular search */
1698*4882a593Smuzhiyun 	ioc->res_hint = (unsigned long *) ioc->res_map;
1699*4882a593Smuzhiyun 
1700*4882a593Smuzhiyun #ifdef ASSERT_PDIR_SANITY
1701*4882a593Smuzhiyun 	/* Mark first bit busy - ie no IOVA 0 */
1702*4882a593Smuzhiyun 	ioc->res_map[0] = 0x1;
1703*4882a593Smuzhiyun 	ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1704*4882a593Smuzhiyun #endif
1705*4882a593Smuzhiyun #ifdef FULL_VALID_PDIR
1706*4882a593Smuzhiyun 	/* Mark the last resource used so we don't prefetch beyond IOVA space */
1707*4882a593Smuzhiyun 	ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1708*4882a593Smuzhiyun 	ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1709*4882a593Smuzhiyun 							      | prefetch_spill_page);
1710*4882a593Smuzhiyun #endif
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 	DBG_INIT("%s() res_map %x %p\n", __func__,
1713*4882a593Smuzhiyun 		 ioc->res_size, (void *) ioc->res_map);
1714*4882a593Smuzhiyun }
1715*4882a593Smuzhiyun 
1716*4882a593Smuzhiyun static void __init
ioc_sac_init(struct ioc * ioc)1717*4882a593Smuzhiyun ioc_sac_init(struct ioc *ioc)
1718*4882a593Smuzhiyun {
1719*4882a593Smuzhiyun 	struct pci_dev *sac = NULL;
1720*4882a593Smuzhiyun 	struct pci_controller *controller = NULL;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	/*
1723*4882a593Smuzhiyun 	 * pci_alloc_coherent() must return a DMA address which is
1724*4882a593Smuzhiyun 	 * SAC (single address cycle) addressable, so allocate a
1725*4882a593Smuzhiyun 	 * pseudo-device to enforce that.
1726*4882a593Smuzhiyun 	 */
1727*4882a593Smuzhiyun 	sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1728*4882a593Smuzhiyun 	if (!sac)
1729*4882a593Smuzhiyun 		panic(PFX "Couldn't allocate struct pci_dev");
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1732*4882a593Smuzhiyun 	if (!controller)
1733*4882a593Smuzhiyun 		panic(PFX "Couldn't allocate struct pci_controller");
1734*4882a593Smuzhiyun 
1735*4882a593Smuzhiyun 	controller->iommu = ioc;
1736*4882a593Smuzhiyun 	sac->sysdata = controller;
1737*4882a593Smuzhiyun 	sac->dma_mask = 0xFFFFFFFFUL;
1738*4882a593Smuzhiyun 	sac->dev.bus = &pci_bus_type;
1739*4882a593Smuzhiyun 	ioc->sac_only_dev = sac;
1740*4882a593Smuzhiyun }
1741*4882a593Smuzhiyun 
1742*4882a593Smuzhiyun static void __init
ioc_zx1_init(struct ioc * ioc)1743*4882a593Smuzhiyun ioc_zx1_init(struct ioc *ioc)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun 	unsigned long rope_config;
1746*4882a593Smuzhiyun 	unsigned int i;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	if (ioc->rev < 0x20)
1749*4882a593Smuzhiyun 		panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	/* 38 bit memory controller + extra bit for range displaced by MMIO */
1752*4882a593Smuzhiyun 	ioc->dma_mask = (0x1UL << 39) - 1;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	/*
1755*4882a593Smuzhiyun 	** Clear ROPE(N)_CONFIG AO bit.
1756*4882a593Smuzhiyun 	** Disables "NT Ordering" (~= !"Relaxed Ordering")
1757*4882a593Smuzhiyun 	** Overrides bit 1 in DMA Hint Sets.
1758*4882a593Smuzhiyun 	** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1759*4882a593Smuzhiyun 	*/
1760*4882a593Smuzhiyun 	for (i=0; i<(8*8); i+=8) {
1761*4882a593Smuzhiyun 		rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1762*4882a593Smuzhiyun 		rope_config &= ~IOC_ROPE_AO;
1763*4882a593Smuzhiyun 		WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1764*4882a593Smuzhiyun 	}
1765*4882a593Smuzhiyun }
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun typedef void (initfunc)(struct ioc *);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun struct ioc_iommu {
1770*4882a593Smuzhiyun 	u32 func_id;
1771*4882a593Smuzhiyun 	char *name;
1772*4882a593Smuzhiyun 	initfunc *init;
1773*4882a593Smuzhiyun };
1774*4882a593Smuzhiyun 
1775*4882a593Smuzhiyun static struct ioc_iommu ioc_iommu_info[] __initdata = {
1776*4882a593Smuzhiyun 	{ ZX1_IOC_ID, "zx1", ioc_zx1_init },
1777*4882a593Smuzhiyun 	{ ZX2_IOC_ID, "zx2", NULL },
1778*4882a593Smuzhiyun 	{ SX1000_IOC_ID, "sx1000", NULL },
1779*4882a593Smuzhiyun 	{ SX2000_IOC_ID, "sx2000", NULL },
1780*4882a593Smuzhiyun };
1781*4882a593Smuzhiyun 
ioc_init(unsigned long hpa,struct ioc * ioc)1782*4882a593Smuzhiyun static void __init ioc_init(unsigned long hpa, struct ioc *ioc)
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun 	struct ioc_iommu *info;
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	ioc->next = ioc_list;
1787*4882a593Smuzhiyun 	ioc_list = ioc;
1788*4882a593Smuzhiyun 
1789*4882a593Smuzhiyun 	ioc->ioc_hpa = ioremap(hpa, 0x1000);
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1792*4882a593Smuzhiyun 	ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1793*4882a593Smuzhiyun 	ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL;	/* conservative */
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1796*4882a593Smuzhiyun 		if (ioc->func_id == info->func_id) {
1797*4882a593Smuzhiyun 			ioc->name = info->name;
1798*4882a593Smuzhiyun 			if (info->init)
1799*4882a593Smuzhiyun 				(info->init)(ioc);
1800*4882a593Smuzhiyun 		}
1801*4882a593Smuzhiyun 	}
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	iovp_size = (1 << iovp_shift);
1804*4882a593Smuzhiyun 	iovp_mask = ~(iovp_size - 1);
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 	DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1807*4882a593Smuzhiyun 		PAGE_SIZE >> 10, iovp_size >> 10);
1808*4882a593Smuzhiyun 
1809*4882a593Smuzhiyun 	if (!ioc->name) {
1810*4882a593Smuzhiyun 		ioc->name = kmalloc(24, GFP_KERNEL);
1811*4882a593Smuzhiyun 		if (ioc->name)
1812*4882a593Smuzhiyun 			sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1813*4882a593Smuzhiyun 				ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1814*4882a593Smuzhiyun 		else
1815*4882a593Smuzhiyun 			ioc->name = "Unknown";
1816*4882a593Smuzhiyun 	}
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	ioc_iova_init(ioc);
1819*4882a593Smuzhiyun 	ioc_resource_init(ioc);
1820*4882a593Smuzhiyun 	ioc_sac_init(ioc);
1821*4882a593Smuzhiyun 
1822*4882a593Smuzhiyun 	printk(KERN_INFO PFX
1823*4882a593Smuzhiyun 		"%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1824*4882a593Smuzhiyun 		ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1825*4882a593Smuzhiyun 		hpa, ioc->iov_size >> 20, ioc->ibase);
1826*4882a593Smuzhiyun }
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun /**************************************************************************
1831*4882a593Smuzhiyun **
1832*4882a593Smuzhiyun **   SBA initialization code (HW and SW)
1833*4882a593Smuzhiyun **
1834*4882a593Smuzhiyun **   o identify SBA chip itself
1835*4882a593Smuzhiyun **   o FIXME: initialize DMA hints for reasonable defaults
1836*4882a593Smuzhiyun **
1837*4882a593Smuzhiyun **************************************************************************/
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
1840*4882a593Smuzhiyun static void *
ioc_start(struct seq_file * s,loff_t * pos)1841*4882a593Smuzhiyun ioc_start(struct seq_file *s, loff_t *pos)
1842*4882a593Smuzhiyun {
1843*4882a593Smuzhiyun 	struct ioc *ioc;
1844*4882a593Smuzhiyun 	loff_t n = *pos;
1845*4882a593Smuzhiyun 
1846*4882a593Smuzhiyun 	for (ioc = ioc_list; ioc; ioc = ioc->next)
1847*4882a593Smuzhiyun 		if (!n--)
1848*4882a593Smuzhiyun 			return ioc;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	return NULL;
1851*4882a593Smuzhiyun }
1852*4882a593Smuzhiyun 
1853*4882a593Smuzhiyun static void *
ioc_next(struct seq_file * s,void * v,loff_t * pos)1854*4882a593Smuzhiyun ioc_next(struct seq_file *s, void *v, loff_t *pos)
1855*4882a593Smuzhiyun {
1856*4882a593Smuzhiyun 	struct ioc *ioc = v;
1857*4882a593Smuzhiyun 
1858*4882a593Smuzhiyun 	++*pos;
1859*4882a593Smuzhiyun 	return ioc->next;
1860*4882a593Smuzhiyun }
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun static void
ioc_stop(struct seq_file * s,void * v)1863*4882a593Smuzhiyun ioc_stop(struct seq_file *s, void *v)
1864*4882a593Smuzhiyun {
1865*4882a593Smuzhiyun }
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun static int
ioc_show(struct seq_file * s,void * v)1868*4882a593Smuzhiyun ioc_show(struct seq_file *s, void *v)
1869*4882a593Smuzhiyun {
1870*4882a593Smuzhiyun 	struct ioc *ioc = v;
1871*4882a593Smuzhiyun 	unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1872*4882a593Smuzhiyun 	int i, used = 0;
1873*4882a593Smuzhiyun 
1874*4882a593Smuzhiyun 	seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1875*4882a593Smuzhiyun 		ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1876*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1877*4882a593Smuzhiyun 	if (ioc->node != NUMA_NO_NODE)
1878*4882a593Smuzhiyun 		seq_printf(s, "NUMA node       : %d\n", ioc->node);
1879*4882a593Smuzhiyun #endif
1880*4882a593Smuzhiyun 	seq_printf(s, "IOVA size       : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1881*4882a593Smuzhiyun 	seq_printf(s, "IOVA page size  : %ld kb\n", iovp_size/1024);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 	for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1884*4882a593Smuzhiyun 		used += hweight64(*res_ptr);
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 	seq_printf(s, "PDIR size       : %d entries\n", ioc->pdir_size >> 3);
1887*4882a593Smuzhiyun 	seq_printf(s, "PDIR used       : %d entries\n", used);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun #ifdef PDIR_SEARCH_TIMING
1890*4882a593Smuzhiyun 	{
1891*4882a593Smuzhiyun 		unsigned long i = 0, avg = 0, min, max;
1892*4882a593Smuzhiyun 		min = max = ioc->avg_search[0];
1893*4882a593Smuzhiyun 		for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1894*4882a593Smuzhiyun 			avg += ioc->avg_search[i];
1895*4882a593Smuzhiyun 			if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1896*4882a593Smuzhiyun 			if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1897*4882a593Smuzhiyun 		}
1898*4882a593Smuzhiyun 		avg /= SBA_SEARCH_SAMPLE;
1899*4882a593Smuzhiyun 		seq_printf(s, "Bitmap search   : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1900*4882a593Smuzhiyun 		           min, avg, max);
1901*4882a593Smuzhiyun 	}
1902*4882a593Smuzhiyun #endif
1903*4882a593Smuzhiyun #ifndef ALLOW_IOV_BYPASS
1904*4882a593Smuzhiyun 	 seq_printf(s, "IOVA bypass disabled\n");
1905*4882a593Smuzhiyun #endif
1906*4882a593Smuzhiyun 	return 0;
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun static const struct seq_operations ioc_seq_ops = {
1910*4882a593Smuzhiyun 	.start = ioc_start,
1911*4882a593Smuzhiyun 	.next  = ioc_next,
1912*4882a593Smuzhiyun 	.stop  = ioc_stop,
1913*4882a593Smuzhiyun 	.show  = ioc_show
1914*4882a593Smuzhiyun };
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun static void __init
ioc_proc_init(void)1917*4882a593Smuzhiyun ioc_proc_init(void)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun 	struct proc_dir_entry *dir;
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	dir = proc_mkdir("bus/mckinley", NULL);
1922*4882a593Smuzhiyun 	if (!dir)
1923*4882a593Smuzhiyun 		return;
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 	proc_create_seq(ioc_list->name, 0, dir, &ioc_seq_ops);
1926*4882a593Smuzhiyun }
1927*4882a593Smuzhiyun #endif
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun static void
sba_connect_bus(struct pci_bus * bus)1930*4882a593Smuzhiyun sba_connect_bus(struct pci_bus *bus)
1931*4882a593Smuzhiyun {
1932*4882a593Smuzhiyun 	acpi_handle handle, parent;
1933*4882a593Smuzhiyun 	acpi_status status;
1934*4882a593Smuzhiyun 	struct ioc *ioc;
1935*4882a593Smuzhiyun 
1936*4882a593Smuzhiyun 	if (!PCI_CONTROLLER(bus))
1937*4882a593Smuzhiyun 		panic(PFX "no sysdata on bus %d!\n", bus->number);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	if (PCI_CONTROLLER(bus)->iommu)
1940*4882a593Smuzhiyun 		return;
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun 	handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1943*4882a593Smuzhiyun 	if (!handle)
1944*4882a593Smuzhiyun 		return;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 	/*
1947*4882a593Smuzhiyun 	 * The IOC scope encloses PCI root bridges in the ACPI
1948*4882a593Smuzhiyun 	 * namespace, so work our way out until we find an IOC we
1949*4882a593Smuzhiyun 	 * claimed previously.
1950*4882a593Smuzhiyun 	 */
1951*4882a593Smuzhiyun 	do {
1952*4882a593Smuzhiyun 		for (ioc = ioc_list; ioc; ioc = ioc->next)
1953*4882a593Smuzhiyun 			if (ioc->handle == handle) {
1954*4882a593Smuzhiyun 				PCI_CONTROLLER(bus)->iommu = ioc;
1955*4882a593Smuzhiyun 				return;
1956*4882a593Smuzhiyun 			}
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 		status = acpi_get_parent(handle, &parent);
1959*4882a593Smuzhiyun 		handle = parent;
1960*4882a593Smuzhiyun 	} while (ACPI_SUCCESS(status));
1961*4882a593Smuzhiyun 
1962*4882a593Smuzhiyun 	printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1963*4882a593Smuzhiyun }
1964*4882a593Smuzhiyun 
1965*4882a593Smuzhiyun static void __init
sba_map_ioc_to_node(struct ioc * ioc,acpi_handle handle)1966*4882a593Smuzhiyun sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1967*4882a593Smuzhiyun {
1968*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1969*4882a593Smuzhiyun 	unsigned int node;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 	node = acpi_get_node(handle);
1972*4882a593Smuzhiyun 	if (node != NUMA_NO_NODE && !node_online(node))
1973*4882a593Smuzhiyun 		node = NUMA_NO_NODE;
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	ioc->node = node;
1976*4882a593Smuzhiyun #endif
1977*4882a593Smuzhiyun }
1978*4882a593Smuzhiyun 
acpi_sba_ioc_add(struct ioc * ioc)1979*4882a593Smuzhiyun static void __init acpi_sba_ioc_add(struct ioc *ioc)
1980*4882a593Smuzhiyun {
1981*4882a593Smuzhiyun 	acpi_handle handle = ioc->handle;
1982*4882a593Smuzhiyun 	acpi_status status;
1983*4882a593Smuzhiyun 	u64 hpa, length;
1984*4882a593Smuzhiyun 	struct acpi_device_info *adi;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	ioc_found = ioc->next;
1987*4882a593Smuzhiyun 	status = hp_acpi_csr_space(handle, &hpa, &length);
1988*4882a593Smuzhiyun 	if (ACPI_FAILURE(status))
1989*4882a593Smuzhiyun 		goto err;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	status = acpi_get_object_info(handle, &adi);
1992*4882a593Smuzhiyun 	if (ACPI_FAILURE(status))
1993*4882a593Smuzhiyun 		goto err;
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	/*
1996*4882a593Smuzhiyun 	 * For HWP0001, only SBA appears in ACPI namespace.  It encloses the PCI
1997*4882a593Smuzhiyun 	 * root bridges, and its CSR space includes the IOC function.
1998*4882a593Smuzhiyun 	 */
1999*4882a593Smuzhiyun 	if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2000*4882a593Smuzhiyun 		hpa += ZX1_IOC_OFFSET;
2001*4882a593Smuzhiyun 		/* zx1 based systems default to kernel page size iommu pages */
2002*4882a593Smuzhiyun 		if (!iovp_shift)
2003*4882a593Smuzhiyun 			iovp_shift = min(PAGE_SHIFT, 16);
2004*4882a593Smuzhiyun 	}
2005*4882a593Smuzhiyun 	kfree(adi);
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 	/*
2008*4882a593Smuzhiyun 	 * default anything not caught above or specified on cmdline to 4k
2009*4882a593Smuzhiyun 	 * iommu page size
2010*4882a593Smuzhiyun 	 */
2011*4882a593Smuzhiyun 	if (!iovp_shift)
2012*4882a593Smuzhiyun 		iovp_shift = 12;
2013*4882a593Smuzhiyun 
2014*4882a593Smuzhiyun 	ioc_init(hpa, ioc);
2015*4882a593Smuzhiyun 	/* setup NUMA node association */
2016*4882a593Smuzhiyun 	sba_map_ioc_to_node(ioc, handle);
2017*4882a593Smuzhiyun 	return;
2018*4882a593Smuzhiyun 
2019*4882a593Smuzhiyun  err:
2020*4882a593Smuzhiyun 	kfree(ioc);
2021*4882a593Smuzhiyun }
2022*4882a593Smuzhiyun 
2023*4882a593Smuzhiyun static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2024*4882a593Smuzhiyun 	{"HWP0001", 0},
2025*4882a593Smuzhiyun 	{"HWP0004", 0},
2026*4882a593Smuzhiyun 	{"", 0},
2027*4882a593Smuzhiyun };
2028*4882a593Smuzhiyun 
acpi_sba_ioc_attach(struct acpi_device * device,const struct acpi_device_id * not_used)2029*4882a593Smuzhiyun static int acpi_sba_ioc_attach(struct acpi_device *device,
2030*4882a593Smuzhiyun 			       const struct acpi_device_id *not_used)
2031*4882a593Smuzhiyun {
2032*4882a593Smuzhiyun 	struct ioc *ioc;
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2035*4882a593Smuzhiyun 	if (!ioc)
2036*4882a593Smuzhiyun 		return -ENOMEM;
2037*4882a593Smuzhiyun 
2038*4882a593Smuzhiyun 	ioc->next = ioc_found;
2039*4882a593Smuzhiyun 	ioc_found = ioc;
2040*4882a593Smuzhiyun 	ioc->handle = device->handle;
2041*4882a593Smuzhiyun 	return 1;
2042*4882a593Smuzhiyun }
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 
2045*4882a593Smuzhiyun static struct acpi_scan_handler acpi_sba_ioc_handler = {
2046*4882a593Smuzhiyun 	.ids	= hp_ioc_iommu_device_ids,
2047*4882a593Smuzhiyun 	.attach	= acpi_sba_ioc_attach,
2048*4882a593Smuzhiyun };
2049*4882a593Smuzhiyun 
acpi_sba_ioc_init_acpi(void)2050*4882a593Smuzhiyun static int __init acpi_sba_ioc_init_acpi(void)
2051*4882a593Smuzhiyun {
2052*4882a593Smuzhiyun 	return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2053*4882a593Smuzhiyun }
2054*4882a593Smuzhiyun /* This has to run before acpi_scan_init(). */
2055*4882a593Smuzhiyun arch_initcall(acpi_sba_ioc_init_acpi);
2056*4882a593Smuzhiyun 
sba_dma_supported(struct device * dev,u64 mask)2057*4882a593Smuzhiyun static int sba_dma_supported (struct device *dev, u64 mask)
2058*4882a593Smuzhiyun {
2059*4882a593Smuzhiyun 	/* make sure it's at least 32bit capable */
2060*4882a593Smuzhiyun 	return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2061*4882a593Smuzhiyun }
2062*4882a593Smuzhiyun 
2063*4882a593Smuzhiyun static const struct dma_map_ops sba_dma_ops = {
2064*4882a593Smuzhiyun 	.alloc			= sba_alloc_coherent,
2065*4882a593Smuzhiyun 	.free			= sba_free_coherent,
2066*4882a593Smuzhiyun 	.map_page		= sba_map_page,
2067*4882a593Smuzhiyun 	.unmap_page		= sba_unmap_page,
2068*4882a593Smuzhiyun 	.map_sg			= sba_map_sg_attrs,
2069*4882a593Smuzhiyun 	.unmap_sg		= sba_unmap_sg_attrs,
2070*4882a593Smuzhiyun 	.dma_supported		= sba_dma_supported,
2071*4882a593Smuzhiyun 	.mmap			= dma_common_mmap,
2072*4882a593Smuzhiyun 	.get_sgtable		= dma_common_get_sgtable,
2073*4882a593Smuzhiyun 	.alloc_pages		= dma_common_alloc_pages,
2074*4882a593Smuzhiyun 	.free_pages		= dma_common_free_pages,
2075*4882a593Smuzhiyun };
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun static int __init
sba_init(void)2078*4882a593Smuzhiyun sba_init(void)
2079*4882a593Smuzhiyun {
2080*4882a593Smuzhiyun 	/*
2081*4882a593Smuzhiyun 	 * If we are booting a kdump kernel, the sba_iommu will cause devices
2082*4882a593Smuzhiyun 	 * that were not shutdown properly to MCA as soon as they are turned
2083*4882a593Smuzhiyun 	 * back on.  Our only option for a successful kdump kernel boot is to
2084*4882a593Smuzhiyun 	 * use swiotlb.
2085*4882a593Smuzhiyun 	 */
2086*4882a593Smuzhiyun 	if (is_kdump_kernel())
2087*4882a593Smuzhiyun 		return 0;
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	/*
2090*4882a593Smuzhiyun 	 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2091*4882a593Smuzhiyun 	 * routine, but that only happens if acpi_scan_init() has already run.
2092*4882a593Smuzhiyun 	 */
2093*4882a593Smuzhiyun 	while (ioc_found)
2094*4882a593Smuzhiyun 		acpi_sba_ioc_add(ioc_found);
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	if (!ioc_list)
2097*4882a593Smuzhiyun 		return 0;
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	{
2100*4882a593Smuzhiyun 		struct pci_bus *b = NULL;
2101*4882a593Smuzhiyun 		while ((b = pci_find_next_bus(b)) != NULL)
2102*4882a593Smuzhiyun 			sba_connect_bus(b);
2103*4882a593Smuzhiyun 	}
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	/* no need for swiotlb with the iommu */
2106*4882a593Smuzhiyun 	swiotlb_exit();
2107*4882a593Smuzhiyun 	dma_ops = &sba_dma_ops;
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun #ifdef CONFIG_PROC_FS
2110*4882a593Smuzhiyun 	ioc_proc_init();
2111*4882a593Smuzhiyun #endif
2112*4882a593Smuzhiyun 	return 0;
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2116*4882a593Smuzhiyun 
2117*4882a593Smuzhiyun static int __init
nosbagart(char * str)2118*4882a593Smuzhiyun nosbagart(char *str)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun 	reserve_sba_gart = 0;
2121*4882a593Smuzhiyun 	return 1;
2122*4882a593Smuzhiyun }
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun __setup("nosbagart", nosbagart);
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun static int __init
sba_page_override(char * str)2127*4882a593Smuzhiyun sba_page_override(char *str)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun 	unsigned long page_size;
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	page_size = memparse(str, &str);
2132*4882a593Smuzhiyun 	switch (page_size) {
2133*4882a593Smuzhiyun 		case 4096:
2134*4882a593Smuzhiyun 		case 8192:
2135*4882a593Smuzhiyun 		case 16384:
2136*4882a593Smuzhiyun 		case 65536:
2137*4882a593Smuzhiyun 			iovp_shift = ffs(page_size) - 1;
2138*4882a593Smuzhiyun 			break;
2139*4882a593Smuzhiyun 		default:
2140*4882a593Smuzhiyun 			printk("%s: unknown/unsupported iommu page size %ld\n",
2141*4882a593Smuzhiyun 			       __func__, page_size);
2142*4882a593Smuzhiyun 	}
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	return 1;
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
2147*4882a593Smuzhiyun __setup("sbapagesize=",sba_page_override);
2148