1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * IOMMU implementation for Cell Broadband Processor Architecture
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * (C) Copyright IBM Corporation 2006-2008
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author: Jeremy Kerr <jk@ozlabs.org>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #undef DEBUG
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/interrupt.h>
15*4882a593Smuzhiyun #include <linux/notifier.h>
16*4882a593Smuzhiyun #include <linux/of.h>
17*4882a593Smuzhiyun #include <linux/of_platform.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/memblock.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #include <asm/prom.h>
22*4882a593Smuzhiyun #include <asm/iommu.h>
23*4882a593Smuzhiyun #include <asm/machdep.h>
24*4882a593Smuzhiyun #include <asm/pci-bridge.h>
25*4882a593Smuzhiyun #include <asm/udbg.h>
26*4882a593Smuzhiyun #include <asm/firmware.h>
27*4882a593Smuzhiyun #include <asm/cell-regs.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "cell.h"
30*4882a593Smuzhiyun #include "interrupt.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
33*4882a593Smuzhiyun * instead of leaving them mapped to some dummy page. This can be
34*4882a593Smuzhiyun * enabled once the appropriate workarounds for spider bugs have
35*4882a593Smuzhiyun * been enabled
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun #define CELL_IOMMU_REAL_UNMAP
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* Define CELL_IOMMU_STRICT_PROTECTION to enforce protection of
40*4882a593Smuzhiyun * IO PTEs based on the transfer direction. That can be enabled
41*4882a593Smuzhiyun * once spider-net has been fixed to pass the correct direction
42*4882a593Smuzhiyun * to the DMA mapping functions
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun #define CELL_IOMMU_STRICT_PROTECTION
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define NR_IOMMUS 2
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* IOC mmap registers */
50*4882a593Smuzhiyun #define IOC_Reg_Size 0x2000
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #define IOC_IOPT_CacheInvd 0x908
53*4882a593Smuzhiyun #define IOC_IOPT_CacheInvd_NE_Mask 0xffe0000000000000ul
54*4882a593Smuzhiyun #define IOC_IOPT_CacheInvd_IOPTE_Mask 0x000003fffffffff8ul
55*4882a593Smuzhiyun #define IOC_IOPT_CacheInvd_Busy 0x0000000000000001ul
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun #define IOC_IOST_Origin 0x918
58*4882a593Smuzhiyun #define IOC_IOST_Origin_E 0x8000000000000000ul
59*4882a593Smuzhiyun #define IOC_IOST_Origin_HW 0x0000000000000800ul
60*4882a593Smuzhiyun #define IOC_IOST_Origin_HL 0x0000000000000400ul
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun #define IOC_IO_ExcpStat 0x920
63*4882a593Smuzhiyun #define IOC_IO_ExcpStat_V 0x8000000000000000ul
64*4882a593Smuzhiyun #define IOC_IO_ExcpStat_SPF_Mask 0x6000000000000000ul
65*4882a593Smuzhiyun #define IOC_IO_ExcpStat_SPF_S 0x6000000000000000ul
66*4882a593Smuzhiyun #define IOC_IO_ExcpStat_SPF_P 0x2000000000000000ul
67*4882a593Smuzhiyun #define IOC_IO_ExcpStat_ADDR_Mask 0x00000007fffff000ul
68*4882a593Smuzhiyun #define IOC_IO_ExcpStat_RW_Mask 0x0000000000000800ul
69*4882a593Smuzhiyun #define IOC_IO_ExcpStat_IOID_Mask 0x00000000000007fful
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define IOC_IO_ExcpMask 0x928
72*4882a593Smuzhiyun #define IOC_IO_ExcpMask_SFE 0x4000000000000000ul
73*4882a593Smuzhiyun #define IOC_IO_ExcpMask_PFE 0x2000000000000000ul
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define IOC_IOCmd_Offset 0x1000
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun #define IOC_IOCmd_Cfg 0xc00
78*4882a593Smuzhiyun #define IOC_IOCmd_Cfg_TE 0x0000800000000000ul
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Segment table entries */
82*4882a593Smuzhiyun #define IOSTE_V 0x8000000000000000ul /* valid */
83*4882a593Smuzhiyun #define IOSTE_H 0x4000000000000000ul /* cache hint */
84*4882a593Smuzhiyun #define IOSTE_PT_Base_RPN_Mask 0x3ffffffffffff000ul /* base RPN of IOPT */
85*4882a593Smuzhiyun #define IOSTE_NPPT_Mask 0x0000000000000fe0ul /* no. pages in IOPT */
86*4882a593Smuzhiyun #define IOSTE_PS_Mask 0x0000000000000007ul /* page size */
87*4882a593Smuzhiyun #define IOSTE_PS_4K 0x0000000000000001ul /* - 4kB */
88*4882a593Smuzhiyun #define IOSTE_PS_64K 0x0000000000000003ul /* - 64kB */
89*4882a593Smuzhiyun #define IOSTE_PS_1M 0x0000000000000005ul /* - 1MB */
90*4882a593Smuzhiyun #define IOSTE_PS_16M 0x0000000000000007ul /* - 16MB */
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* IOMMU sizing */
94*4882a593Smuzhiyun #define IO_SEGMENT_SHIFT 28
95*4882a593Smuzhiyun #define IO_PAGENO_BITS(shift) (IO_SEGMENT_SHIFT - (shift))
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* The high bit needs to be set on every DMA address */
98*4882a593Smuzhiyun #define SPIDER_DMA_OFFSET 0x80000000ul
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct iommu_window {
101*4882a593Smuzhiyun struct list_head list;
102*4882a593Smuzhiyun struct cbe_iommu *iommu;
103*4882a593Smuzhiyun unsigned long offset;
104*4882a593Smuzhiyun unsigned long size;
105*4882a593Smuzhiyun unsigned int ioid;
106*4882a593Smuzhiyun struct iommu_table table;
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #define NAMESIZE 8
110*4882a593Smuzhiyun struct cbe_iommu {
111*4882a593Smuzhiyun int nid;
112*4882a593Smuzhiyun char name[NAMESIZE];
113*4882a593Smuzhiyun void __iomem *xlate_regs;
114*4882a593Smuzhiyun void __iomem *cmd_regs;
115*4882a593Smuzhiyun unsigned long *stab;
116*4882a593Smuzhiyun unsigned long *ptab;
117*4882a593Smuzhiyun void *pad_page;
118*4882a593Smuzhiyun struct list_head windows;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Static array of iommus, one per node
122*4882a593Smuzhiyun * each contains a list of windows, keyed from dma_window property
123*4882a593Smuzhiyun * - on bus setup, look for a matching window, or create one
124*4882a593Smuzhiyun * - on dev setup, assign iommu_table ptr
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun static struct cbe_iommu iommus[NR_IOMMUS];
127*4882a593Smuzhiyun static int cbe_nr_iommus;
128*4882a593Smuzhiyun
invalidate_tce_cache(struct cbe_iommu * iommu,unsigned long * pte,long n_ptes)129*4882a593Smuzhiyun static void invalidate_tce_cache(struct cbe_iommu *iommu, unsigned long *pte,
130*4882a593Smuzhiyun long n_ptes)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun u64 __iomem *reg;
133*4882a593Smuzhiyun u64 val;
134*4882a593Smuzhiyun long n;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun reg = iommu->xlate_regs + IOC_IOPT_CacheInvd;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun while (n_ptes > 0) {
139*4882a593Smuzhiyun /* we can invalidate up to 1 << 11 PTEs at once */
140*4882a593Smuzhiyun n = min(n_ptes, 1l << 11);
141*4882a593Smuzhiyun val = (((n /*- 1*/) << 53) & IOC_IOPT_CacheInvd_NE_Mask)
142*4882a593Smuzhiyun | (__pa(pte) & IOC_IOPT_CacheInvd_IOPTE_Mask)
143*4882a593Smuzhiyun | IOC_IOPT_CacheInvd_Busy;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun out_be64(reg, val);
146*4882a593Smuzhiyun while (in_be64(reg) & IOC_IOPT_CacheInvd_Busy)
147*4882a593Smuzhiyun ;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun n_ptes -= n;
150*4882a593Smuzhiyun pte += n;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
tce_build_cell(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)154*4882a593Smuzhiyun static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
155*4882a593Smuzhiyun unsigned long uaddr, enum dma_data_direction direction,
156*4882a593Smuzhiyun unsigned long attrs)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun int i;
159*4882a593Smuzhiyun unsigned long *io_pte, base_pte;
160*4882a593Smuzhiyun struct iommu_window *window =
161*4882a593Smuzhiyun container_of(tbl, struct iommu_window, table);
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* implementing proper protection causes problems with the spidernet
164*4882a593Smuzhiyun * driver - check mapping directions later, but allow read & write by
165*4882a593Smuzhiyun * default for now.*/
166*4882a593Smuzhiyun #ifdef CELL_IOMMU_STRICT_PROTECTION
167*4882a593Smuzhiyun /* to avoid referencing a global, we use a trick here to setup the
168*4882a593Smuzhiyun * protection bit. "prot" is setup to be 3 fields of 4 bits appended
169*4882a593Smuzhiyun * together for each of the 3 supported direction values. It is then
170*4882a593Smuzhiyun * shifted left so that the fields matching the desired direction
171*4882a593Smuzhiyun * lands on the appropriate bits, and other bits are masked out.
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun const unsigned long prot = 0xc48;
174*4882a593Smuzhiyun base_pte =
175*4882a593Smuzhiyun ((prot << (52 + 4 * direction)) &
176*4882a593Smuzhiyun (CBE_IOPTE_PP_W | CBE_IOPTE_PP_R)) |
177*4882a593Smuzhiyun CBE_IOPTE_M | CBE_IOPTE_SO_RW |
178*4882a593Smuzhiyun (window->ioid & CBE_IOPTE_IOID_Mask);
179*4882a593Smuzhiyun #else
180*4882a593Smuzhiyun base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
181*4882a593Smuzhiyun CBE_IOPTE_SO_RW | (window->ioid & CBE_IOPTE_IOID_Mask);
182*4882a593Smuzhiyun #endif
183*4882a593Smuzhiyun if (unlikely(attrs & DMA_ATTR_WEAK_ORDERING))
184*4882a593Smuzhiyun base_pte &= ~CBE_IOPTE_SO_RW;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
189*4882a593Smuzhiyun io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun mb();
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun invalidate_tce_cache(window->iommu, io_pte, npages);
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun pr_debug("tce_build_cell(index=%lx,n=%lx,dir=%d,base_pte=%lx)\n",
196*4882a593Smuzhiyun index, npages, direction, base_pte);
197*4882a593Smuzhiyun return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
tce_free_cell(struct iommu_table * tbl,long index,long npages)200*4882a593Smuzhiyun static void tce_free_cell(struct iommu_table *tbl, long index, long npages)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun int i;
204*4882a593Smuzhiyun unsigned long *io_pte, pte;
205*4882a593Smuzhiyun struct iommu_window *window =
206*4882a593Smuzhiyun container_of(tbl, struct iommu_window, table);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun pr_debug("tce_free_cell(index=%lx,n=%lx)\n", index, npages);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun #ifdef CELL_IOMMU_REAL_UNMAP
211*4882a593Smuzhiyun pte = 0;
212*4882a593Smuzhiyun #else
213*4882a593Smuzhiyun /* spider bridge does PCI reads after freeing - insert a mapping
214*4882a593Smuzhiyun * to a scratch page instead of an invalid entry */
215*4882a593Smuzhiyun pte = CBE_IOPTE_PP_R | CBE_IOPTE_M | CBE_IOPTE_SO_RW |
216*4882a593Smuzhiyun __pa(window->iommu->pad_page) |
217*4882a593Smuzhiyun (window->ioid & CBE_IOPTE_IOID_Mask);
218*4882a593Smuzhiyun #endif
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun for (i = 0; i < npages; i++)
223*4882a593Smuzhiyun io_pte[i] = pte;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun mb();
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun invalidate_tce_cache(window->iommu, io_pte, npages);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
ioc_interrupt(int irq,void * data)230*4882a593Smuzhiyun static irqreturn_t ioc_interrupt(int irq, void *data)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun unsigned long stat, spf;
233*4882a593Smuzhiyun struct cbe_iommu *iommu = data;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun stat = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
236*4882a593Smuzhiyun spf = stat & IOC_IO_ExcpStat_SPF_Mask;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Might want to rate limit it */
239*4882a593Smuzhiyun printk(KERN_ERR "iommu: DMA exception 0x%016lx\n", stat);
240*4882a593Smuzhiyun printk(KERN_ERR " V=%d, SPF=[%c%c], RW=%s, IOID=0x%04x\n",
241*4882a593Smuzhiyun !!(stat & IOC_IO_ExcpStat_V),
242*4882a593Smuzhiyun (spf == IOC_IO_ExcpStat_SPF_S) ? 'S' : ' ',
243*4882a593Smuzhiyun (spf == IOC_IO_ExcpStat_SPF_P) ? 'P' : ' ',
244*4882a593Smuzhiyun (stat & IOC_IO_ExcpStat_RW_Mask) ? "Read" : "Write",
245*4882a593Smuzhiyun (unsigned int)(stat & IOC_IO_ExcpStat_IOID_Mask));
246*4882a593Smuzhiyun printk(KERN_ERR " page=0x%016lx\n",
247*4882a593Smuzhiyun stat & IOC_IO_ExcpStat_ADDR_Mask);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun /* clear interrupt */
250*4882a593Smuzhiyun stat &= ~IOC_IO_ExcpStat_V;
251*4882a593Smuzhiyun out_be64(iommu->xlate_regs + IOC_IO_ExcpStat, stat);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return IRQ_HANDLED;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
cell_iommu_find_ioc(int nid,unsigned long * base)256*4882a593Smuzhiyun static int cell_iommu_find_ioc(int nid, unsigned long *base)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun struct device_node *np;
259*4882a593Smuzhiyun struct resource r;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun *base = 0;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* First look for new style /be nodes */
264*4882a593Smuzhiyun for_each_node_by_name(np, "ioc") {
265*4882a593Smuzhiyun if (of_node_to_nid(np) != nid)
266*4882a593Smuzhiyun continue;
267*4882a593Smuzhiyun if (of_address_to_resource(np, 0, &r)) {
268*4882a593Smuzhiyun printk(KERN_ERR "iommu: can't get address for %pOF\n",
269*4882a593Smuzhiyun np);
270*4882a593Smuzhiyun continue;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun *base = r.start;
273*4882a593Smuzhiyun of_node_put(np);
274*4882a593Smuzhiyun return 0;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun /* Ok, let's try the old way */
278*4882a593Smuzhiyun for_each_node_by_type(np, "cpu") {
279*4882a593Smuzhiyun const unsigned int *nidp;
280*4882a593Smuzhiyun const unsigned long *tmp;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun nidp = of_get_property(np, "node-id", NULL);
283*4882a593Smuzhiyun if (nidp && *nidp == nid) {
284*4882a593Smuzhiyun tmp = of_get_property(np, "ioc-translation", NULL);
285*4882a593Smuzhiyun if (tmp) {
286*4882a593Smuzhiyun *base = *tmp;
287*4882a593Smuzhiyun of_node_put(np);
288*4882a593Smuzhiyun return 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return -ENODEV;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
cell_iommu_setup_stab(struct cbe_iommu * iommu,unsigned long dbase,unsigned long dsize,unsigned long fbase,unsigned long fsize)296*4882a593Smuzhiyun static void cell_iommu_setup_stab(struct cbe_iommu *iommu,
297*4882a593Smuzhiyun unsigned long dbase, unsigned long dsize,
298*4882a593Smuzhiyun unsigned long fbase, unsigned long fsize)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun struct page *page;
301*4882a593Smuzhiyun unsigned long segments, stab_size;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun segments = max(dbase + dsize, fbase + fsize) >> IO_SEGMENT_SHIFT;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun pr_debug("%s: iommu[%d]: segments: %lu\n",
306*4882a593Smuzhiyun __func__, iommu->nid, segments);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /* set up the segment table */
309*4882a593Smuzhiyun stab_size = segments * sizeof(unsigned long);
310*4882a593Smuzhiyun page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(stab_size));
311*4882a593Smuzhiyun BUG_ON(!page);
312*4882a593Smuzhiyun iommu->stab = page_address(page);
313*4882a593Smuzhiyun memset(iommu->stab, 0, stab_size);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
cell_iommu_alloc_ptab(struct cbe_iommu * iommu,unsigned long base,unsigned long size,unsigned long gap_base,unsigned long gap_size,unsigned long page_shift)316*4882a593Smuzhiyun static unsigned long *cell_iommu_alloc_ptab(struct cbe_iommu *iommu,
317*4882a593Smuzhiyun unsigned long base, unsigned long size, unsigned long gap_base,
318*4882a593Smuzhiyun unsigned long gap_size, unsigned long page_shift)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun struct page *page;
321*4882a593Smuzhiyun int i;
322*4882a593Smuzhiyun unsigned long reg, segments, pages_per_segment, ptab_size,
323*4882a593Smuzhiyun n_pte_pages, start_seg, *ptab;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun start_seg = base >> IO_SEGMENT_SHIFT;
326*4882a593Smuzhiyun segments = size >> IO_SEGMENT_SHIFT;
327*4882a593Smuzhiyun pages_per_segment = 1ull << IO_PAGENO_BITS(page_shift);
328*4882a593Smuzhiyun /* PTEs for each segment must start on a 4K boundary */
329*4882a593Smuzhiyun pages_per_segment = max(pages_per_segment,
330*4882a593Smuzhiyun (1 << 12) / sizeof(unsigned long));
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ptab_size = segments * pages_per_segment * sizeof(unsigned long);
333*4882a593Smuzhiyun pr_debug("%s: iommu[%d]: ptab_size: %lu, order: %d\n", __func__,
334*4882a593Smuzhiyun iommu->nid, ptab_size, get_order(ptab_size));
335*4882a593Smuzhiyun page = alloc_pages_node(iommu->nid, GFP_KERNEL, get_order(ptab_size));
336*4882a593Smuzhiyun BUG_ON(!page);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ptab = page_address(page);
339*4882a593Smuzhiyun memset(ptab, 0, ptab_size);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* number of 4K pages needed for a page table */
342*4882a593Smuzhiyun n_pte_pages = (pages_per_segment * sizeof(unsigned long)) >> 12;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun pr_debug("%s: iommu[%d]: stab at %p, ptab at %p, n_pte_pages: %lu\n",
345*4882a593Smuzhiyun __func__, iommu->nid, iommu->stab, ptab,
346*4882a593Smuzhiyun n_pte_pages);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* initialise the STEs */
349*4882a593Smuzhiyun reg = IOSTE_V | ((n_pte_pages - 1) << 5);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun switch (page_shift) {
352*4882a593Smuzhiyun case 12: reg |= IOSTE_PS_4K; break;
353*4882a593Smuzhiyun case 16: reg |= IOSTE_PS_64K; break;
354*4882a593Smuzhiyun case 20: reg |= IOSTE_PS_1M; break;
355*4882a593Smuzhiyun case 24: reg |= IOSTE_PS_16M; break;
356*4882a593Smuzhiyun default: BUG();
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun gap_base = gap_base >> IO_SEGMENT_SHIFT;
360*4882a593Smuzhiyun gap_size = gap_size >> IO_SEGMENT_SHIFT;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun pr_debug("Setting up IOMMU stab:\n");
363*4882a593Smuzhiyun for (i = start_seg; i < (start_seg + segments); i++) {
364*4882a593Smuzhiyun if (i >= gap_base && i < (gap_base + gap_size)) {
365*4882a593Smuzhiyun pr_debug("\toverlap at %d, skipping\n", i);
366*4882a593Smuzhiyun continue;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun iommu->stab[i] = reg | (__pa(ptab) + (n_pte_pages << 12) *
369*4882a593Smuzhiyun (i - start_seg));
370*4882a593Smuzhiyun pr_debug("\t[%d] 0x%016lx\n", i, iommu->stab[i]);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun return ptab;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
cell_iommu_enable_hardware(struct cbe_iommu * iommu)376*4882a593Smuzhiyun static void cell_iommu_enable_hardware(struct cbe_iommu *iommu)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun int ret;
379*4882a593Smuzhiyun unsigned long reg, xlate_base;
380*4882a593Smuzhiyun unsigned int virq;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun if (cell_iommu_find_ioc(iommu->nid, &xlate_base))
383*4882a593Smuzhiyun panic("%s: missing IOC register mappings for node %d\n",
384*4882a593Smuzhiyun __func__, iommu->nid);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun iommu->xlate_regs = ioremap(xlate_base, IOC_Reg_Size);
387*4882a593Smuzhiyun iommu->cmd_regs = iommu->xlate_regs + IOC_IOCmd_Offset;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun /* ensure that the STEs have updated */
390*4882a593Smuzhiyun mb();
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* setup interrupts for the iommu. */
393*4882a593Smuzhiyun reg = in_be64(iommu->xlate_regs + IOC_IO_ExcpStat);
394*4882a593Smuzhiyun out_be64(iommu->xlate_regs + IOC_IO_ExcpStat,
395*4882a593Smuzhiyun reg & ~IOC_IO_ExcpStat_V);
396*4882a593Smuzhiyun out_be64(iommu->xlate_regs + IOC_IO_ExcpMask,
397*4882a593Smuzhiyun IOC_IO_ExcpMask_PFE | IOC_IO_ExcpMask_SFE);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun virq = irq_create_mapping(NULL,
400*4882a593Smuzhiyun IIC_IRQ_IOEX_ATI | (iommu->nid << IIC_IRQ_NODE_SHIFT));
401*4882a593Smuzhiyun BUG_ON(!virq);
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun ret = request_irq(virq, ioc_interrupt, 0, iommu->name, iommu);
404*4882a593Smuzhiyun BUG_ON(ret);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* set the IOC segment table origin register (and turn on the iommu) */
407*4882a593Smuzhiyun reg = IOC_IOST_Origin_E | __pa(iommu->stab) | IOC_IOST_Origin_HW;
408*4882a593Smuzhiyun out_be64(iommu->xlate_regs + IOC_IOST_Origin, reg);
409*4882a593Smuzhiyun in_be64(iommu->xlate_regs + IOC_IOST_Origin);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* turn on IO translation */
412*4882a593Smuzhiyun reg = in_be64(iommu->cmd_regs + IOC_IOCmd_Cfg) | IOC_IOCmd_Cfg_TE;
413*4882a593Smuzhiyun out_be64(iommu->cmd_regs + IOC_IOCmd_Cfg, reg);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
cell_iommu_setup_hardware(struct cbe_iommu * iommu,unsigned long base,unsigned long size)416*4882a593Smuzhiyun static void cell_iommu_setup_hardware(struct cbe_iommu *iommu,
417*4882a593Smuzhiyun unsigned long base, unsigned long size)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun cell_iommu_setup_stab(iommu, base, size, 0, 0);
420*4882a593Smuzhiyun iommu->ptab = cell_iommu_alloc_ptab(iommu, base, size, 0, 0,
421*4882a593Smuzhiyun IOMMU_PAGE_SHIFT_4K);
422*4882a593Smuzhiyun cell_iommu_enable_hardware(iommu);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun #if 0/* Unused for now */
426*4882a593Smuzhiyun static struct iommu_window *find_window(struct cbe_iommu *iommu,
427*4882a593Smuzhiyun unsigned long offset, unsigned long size)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct iommu_window *window;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* todo: check for overlapping (but not equal) windows) */
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun list_for_each_entry(window, &(iommu->windows), list) {
434*4882a593Smuzhiyun if (window->offset == offset && window->size == size)
435*4882a593Smuzhiyun return window;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun return NULL;
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun #endif
441*4882a593Smuzhiyun
cell_iommu_get_ioid(struct device_node * np)442*4882a593Smuzhiyun static inline u32 cell_iommu_get_ioid(struct device_node *np)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun const u32 *ioid;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun ioid = of_get_property(np, "ioid", NULL);
447*4882a593Smuzhiyun if (ioid == NULL) {
448*4882a593Smuzhiyun printk(KERN_WARNING "iommu: missing ioid for %pOF using 0\n",
449*4882a593Smuzhiyun np);
450*4882a593Smuzhiyun return 0;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun return *ioid;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun static struct iommu_table_ops cell_iommu_ops = {
457*4882a593Smuzhiyun .set = tce_build_cell,
458*4882a593Smuzhiyun .clear = tce_free_cell
459*4882a593Smuzhiyun };
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun static struct iommu_window * __init
cell_iommu_setup_window(struct cbe_iommu * iommu,struct device_node * np,unsigned long offset,unsigned long size,unsigned long pte_offset)462*4882a593Smuzhiyun cell_iommu_setup_window(struct cbe_iommu *iommu, struct device_node *np,
463*4882a593Smuzhiyun unsigned long offset, unsigned long size,
464*4882a593Smuzhiyun unsigned long pte_offset)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun struct iommu_window *window;
467*4882a593Smuzhiyun struct page *page;
468*4882a593Smuzhiyun u32 ioid;
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ioid = cell_iommu_get_ioid(np);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun window = kzalloc_node(sizeof(*window), GFP_KERNEL, iommu->nid);
473*4882a593Smuzhiyun BUG_ON(window == NULL);
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun window->offset = offset;
476*4882a593Smuzhiyun window->size = size;
477*4882a593Smuzhiyun window->ioid = ioid;
478*4882a593Smuzhiyun window->iommu = iommu;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun window->table.it_blocksize = 16;
481*4882a593Smuzhiyun window->table.it_base = (unsigned long)iommu->ptab;
482*4882a593Smuzhiyun window->table.it_index = iommu->nid;
483*4882a593Smuzhiyun window->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
484*4882a593Smuzhiyun window->table.it_offset =
485*4882a593Smuzhiyun (offset >> window->table.it_page_shift) + pte_offset;
486*4882a593Smuzhiyun window->table.it_size = size >> window->table.it_page_shift;
487*4882a593Smuzhiyun window->table.it_ops = &cell_iommu_ops;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun iommu_init_table(&window->table, iommu->nid, 0, 0);
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun pr_debug("\tioid %d\n", window->ioid);
492*4882a593Smuzhiyun pr_debug("\tblocksize %ld\n", window->table.it_blocksize);
493*4882a593Smuzhiyun pr_debug("\tbase 0x%016lx\n", window->table.it_base);
494*4882a593Smuzhiyun pr_debug("\toffset 0x%lx\n", window->table.it_offset);
495*4882a593Smuzhiyun pr_debug("\tsize %ld\n", window->table.it_size);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun list_add(&window->list, &iommu->windows);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (offset != 0)
500*4882a593Smuzhiyun return window;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* We need to map and reserve the first IOMMU page since it's used
503*4882a593Smuzhiyun * by the spider workaround. In theory, we only need to do that when
504*4882a593Smuzhiyun * running on spider but it doesn't really matter.
505*4882a593Smuzhiyun *
506*4882a593Smuzhiyun * This code also assumes that we have a window that starts at 0,
507*4882a593Smuzhiyun * which is the case on all spider based blades.
508*4882a593Smuzhiyun */
509*4882a593Smuzhiyun page = alloc_pages_node(iommu->nid, GFP_KERNEL, 0);
510*4882a593Smuzhiyun BUG_ON(!page);
511*4882a593Smuzhiyun iommu->pad_page = page_address(page);
512*4882a593Smuzhiyun clear_page(iommu->pad_page);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun __set_bit(0, window->table.it_map);
515*4882a593Smuzhiyun tce_build_cell(&window->table, window->table.it_offset, 1,
516*4882a593Smuzhiyun (unsigned long)iommu->pad_page, DMA_TO_DEVICE, 0);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return window;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
cell_iommu_for_node(int nid)521*4882a593Smuzhiyun static struct cbe_iommu *cell_iommu_for_node(int nid)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun int i;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun for (i = 0; i < cbe_nr_iommus; i++)
526*4882a593Smuzhiyun if (iommus[i].nid == nid)
527*4882a593Smuzhiyun return &iommus[i];
528*4882a593Smuzhiyun return NULL;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun static unsigned long cell_dma_nommu_offset;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun static unsigned long dma_iommu_fixed_base;
534*4882a593Smuzhiyun static bool cell_iommu_enabled;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun /* iommu_fixed_is_weak is set if booted with iommu_fixed=weak */
537*4882a593Smuzhiyun bool iommu_fixed_is_weak;
538*4882a593Smuzhiyun
cell_get_iommu_table(struct device * dev)539*4882a593Smuzhiyun static struct iommu_table *cell_get_iommu_table(struct device *dev)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct iommu_window *window;
542*4882a593Smuzhiyun struct cbe_iommu *iommu;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Current implementation uses the first window available in that
545*4882a593Smuzhiyun * node's iommu. We -might- do something smarter later though it may
546*4882a593Smuzhiyun * never be necessary
547*4882a593Smuzhiyun */
548*4882a593Smuzhiyun iommu = cell_iommu_for_node(dev_to_node(dev));
549*4882a593Smuzhiyun if (iommu == NULL || list_empty(&iommu->windows)) {
550*4882a593Smuzhiyun dev_err(dev, "iommu: missing iommu for %pOF (node %d)\n",
551*4882a593Smuzhiyun dev->of_node, dev_to_node(dev));
552*4882a593Smuzhiyun return NULL;
553*4882a593Smuzhiyun }
554*4882a593Smuzhiyun window = list_entry(iommu->windows.next, struct iommu_window, list);
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun return &window->table;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun static u64 cell_iommu_get_fixed_address(struct device *dev);
560*4882a593Smuzhiyun
cell_dma_dev_setup(struct device * dev)561*4882a593Smuzhiyun static void cell_dma_dev_setup(struct device *dev)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun if (cell_iommu_enabled) {
564*4882a593Smuzhiyun u64 addr = cell_iommu_get_fixed_address(dev);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun if (addr != OF_BAD_ADDR)
567*4882a593Smuzhiyun dev->archdata.dma_offset = addr + dma_iommu_fixed_base;
568*4882a593Smuzhiyun set_iommu_table_base(dev, cell_get_iommu_table(dev));
569*4882a593Smuzhiyun } else {
570*4882a593Smuzhiyun dev->archdata.dma_offset = cell_dma_nommu_offset;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun
cell_pci_dma_dev_setup(struct pci_dev * dev)574*4882a593Smuzhiyun static void cell_pci_dma_dev_setup(struct pci_dev *dev)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun cell_dma_dev_setup(&dev->dev);
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun
cell_of_bus_notify(struct notifier_block * nb,unsigned long action,void * data)579*4882a593Smuzhiyun static int cell_of_bus_notify(struct notifier_block *nb, unsigned long action,
580*4882a593Smuzhiyun void *data)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun struct device *dev = data;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* We are only intereted in device addition */
585*4882a593Smuzhiyun if (action != BUS_NOTIFY_ADD_DEVICE)
586*4882a593Smuzhiyun return 0;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun if (cell_iommu_enabled)
589*4882a593Smuzhiyun dev->dma_ops = &dma_iommu_ops;
590*4882a593Smuzhiyun cell_dma_dev_setup(dev);
591*4882a593Smuzhiyun return 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun static struct notifier_block cell_of_bus_notifier = {
595*4882a593Smuzhiyun .notifier_call = cell_of_bus_notify
596*4882a593Smuzhiyun };
597*4882a593Smuzhiyun
cell_iommu_get_window(struct device_node * np,unsigned long * base,unsigned long * size)598*4882a593Smuzhiyun static int __init cell_iommu_get_window(struct device_node *np,
599*4882a593Smuzhiyun unsigned long *base,
600*4882a593Smuzhiyun unsigned long *size)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun const __be32 *dma_window;
603*4882a593Smuzhiyun unsigned long index;
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /* Use ibm,dma-window if available, else, hard code ! */
606*4882a593Smuzhiyun dma_window = of_get_property(np, "ibm,dma-window", NULL);
607*4882a593Smuzhiyun if (dma_window == NULL) {
608*4882a593Smuzhiyun *base = 0;
609*4882a593Smuzhiyun *size = 0x80000000u;
610*4882a593Smuzhiyun return -ENODEV;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun of_parse_dma_window(np, dma_window, &index, base, size);
614*4882a593Smuzhiyun return 0;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
cell_iommu_alloc(struct device_node * np)617*4882a593Smuzhiyun static struct cbe_iommu * __init cell_iommu_alloc(struct device_node *np)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct cbe_iommu *iommu;
620*4882a593Smuzhiyun int nid, i;
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Get node ID */
623*4882a593Smuzhiyun nid = of_node_to_nid(np);
624*4882a593Smuzhiyun if (nid < 0) {
625*4882a593Smuzhiyun printk(KERN_ERR "iommu: failed to get node for %pOF\n",
626*4882a593Smuzhiyun np);
627*4882a593Smuzhiyun return NULL;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun pr_debug("iommu: setting up iommu for node %d (%pOF)\n",
630*4882a593Smuzhiyun nid, np);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* XXX todo: If we can have multiple windows on the same IOMMU, which
633*4882a593Smuzhiyun * isn't the case today, we probably want here to check whether the
634*4882a593Smuzhiyun * iommu for that node is already setup.
635*4882a593Smuzhiyun * However, there might be issue with getting the size right so let's
636*4882a593Smuzhiyun * ignore that for now. We might want to completely get rid of the
637*4882a593Smuzhiyun * multiple window support since the cell iommu supports per-page ioids
638*4882a593Smuzhiyun */
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun if (cbe_nr_iommus >= NR_IOMMUS) {
641*4882a593Smuzhiyun printk(KERN_ERR "iommu: too many IOMMUs detected ! (%pOF)\n",
642*4882a593Smuzhiyun np);
643*4882a593Smuzhiyun return NULL;
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun /* Init base fields */
647*4882a593Smuzhiyun i = cbe_nr_iommus++;
648*4882a593Smuzhiyun iommu = &iommus[i];
649*4882a593Smuzhiyun iommu->stab = NULL;
650*4882a593Smuzhiyun iommu->nid = nid;
651*4882a593Smuzhiyun snprintf(iommu->name, sizeof(iommu->name), "iommu%d", i);
652*4882a593Smuzhiyun INIT_LIST_HEAD(&iommu->windows);
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun return iommu;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun
cell_iommu_init_one(struct device_node * np,unsigned long offset)657*4882a593Smuzhiyun static void __init cell_iommu_init_one(struct device_node *np,
658*4882a593Smuzhiyun unsigned long offset)
659*4882a593Smuzhiyun {
660*4882a593Smuzhiyun struct cbe_iommu *iommu;
661*4882a593Smuzhiyun unsigned long base, size;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun iommu = cell_iommu_alloc(np);
664*4882a593Smuzhiyun if (!iommu)
665*4882a593Smuzhiyun return;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* Obtain a window for it */
668*4882a593Smuzhiyun cell_iommu_get_window(np, &base, &size);
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun pr_debug("\ttranslating window 0x%lx...0x%lx\n",
671*4882a593Smuzhiyun base, base + size - 1);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun /* Initialize the hardware */
674*4882a593Smuzhiyun cell_iommu_setup_hardware(iommu, base, size);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* Setup the iommu_table */
677*4882a593Smuzhiyun cell_iommu_setup_window(iommu, np, base, size,
678*4882a593Smuzhiyun offset >> IOMMU_PAGE_SHIFT_4K);
679*4882a593Smuzhiyun }
680*4882a593Smuzhiyun
cell_disable_iommus(void)681*4882a593Smuzhiyun static void __init cell_disable_iommus(void)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun int node;
684*4882a593Smuzhiyun unsigned long base, val;
685*4882a593Smuzhiyun void __iomem *xregs, *cregs;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /* Make sure IOC translation is disabled on all nodes */
688*4882a593Smuzhiyun for_each_online_node(node) {
689*4882a593Smuzhiyun if (cell_iommu_find_ioc(node, &base))
690*4882a593Smuzhiyun continue;
691*4882a593Smuzhiyun xregs = ioremap(base, IOC_Reg_Size);
692*4882a593Smuzhiyun if (xregs == NULL)
693*4882a593Smuzhiyun continue;
694*4882a593Smuzhiyun cregs = xregs + IOC_IOCmd_Offset;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun pr_debug("iommu: cleaning up iommu on node %d\n", node);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun out_be64(xregs + IOC_IOST_Origin, 0);
699*4882a593Smuzhiyun (void)in_be64(xregs + IOC_IOST_Origin);
700*4882a593Smuzhiyun val = in_be64(cregs + IOC_IOCmd_Cfg);
701*4882a593Smuzhiyun val &= ~IOC_IOCmd_Cfg_TE;
702*4882a593Smuzhiyun out_be64(cregs + IOC_IOCmd_Cfg, val);
703*4882a593Smuzhiyun (void)in_be64(cregs + IOC_IOCmd_Cfg);
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun iounmap(xregs);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun
cell_iommu_init_disabled(void)709*4882a593Smuzhiyun static int __init cell_iommu_init_disabled(void)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun struct device_node *np = NULL;
712*4882a593Smuzhiyun unsigned long base = 0, size;
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun /* When no iommu is present, we use direct DMA ops */
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* First make sure all IOC translation is turned off */
717*4882a593Smuzhiyun cell_disable_iommus();
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* If we have no Axon, we set up the spider DMA magic offset */
720*4882a593Smuzhiyun if (of_find_node_by_name(NULL, "axon") == NULL)
721*4882a593Smuzhiyun cell_dma_nommu_offset = SPIDER_DMA_OFFSET;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /* Now we need to check to see where the memory is mapped
724*4882a593Smuzhiyun * in PCI space. We assume that all busses use the same dma
725*4882a593Smuzhiyun * window which is always the case so far on Cell, thus we
726*4882a593Smuzhiyun * pick up the first pci-internal node we can find and check
727*4882a593Smuzhiyun * the DMA window from there.
728*4882a593Smuzhiyun */
729*4882a593Smuzhiyun for_each_node_by_name(np, "axon") {
730*4882a593Smuzhiyun if (np->parent == NULL || np->parent->parent != NULL)
731*4882a593Smuzhiyun continue;
732*4882a593Smuzhiyun if (cell_iommu_get_window(np, &base, &size) == 0)
733*4882a593Smuzhiyun break;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun if (np == NULL) {
736*4882a593Smuzhiyun for_each_node_by_name(np, "pci-internal") {
737*4882a593Smuzhiyun if (np->parent == NULL || np->parent->parent != NULL)
738*4882a593Smuzhiyun continue;
739*4882a593Smuzhiyun if (cell_iommu_get_window(np, &base, &size) == 0)
740*4882a593Smuzhiyun break;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun of_node_put(np);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* If we found a DMA window, we check if it's big enough to enclose
746*4882a593Smuzhiyun * all of physical memory. If not, we force enable IOMMU
747*4882a593Smuzhiyun */
748*4882a593Smuzhiyun if (np && size < memblock_end_of_DRAM()) {
749*4882a593Smuzhiyun printk(KERN_WARNING "iommu: force-enabled, dma window"
750*4882a593Smuzhiyun " (%ldMB) smaller than total memory (%lldMB)\n",
751*4882a593Smuzhiyun size >> 20, memblock_end_of_DRAM() >> 20);
752*4882a593Smuzhiyun return -ENODEV;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun cell_dma_nommu_offset += base;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (cell_dma_nommu_offset != 0)
758*4882a593Smuzhiyun cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun printk("iommu: disabled, direct DMA offset is 0x%lx\n",
761*4882a593Smuzhiyun cell_dma_nommu_offset);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun return 0;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun * Fixed IOMMU mapping support
768*4882a593Smuzhiyun *
769*4882a593Smuzhiyun * This code adds support for setting up a fixed IOMMU mapping on certain
770*4882a593Smuzhiyun * cell machines. For 64-bit devices this avoids the performance overhead of
771*4882a593Smuzhiyun * mapping and unmapping pages at runtime. 32-bit devices are unable to use
772*4882a593Smuzhiyun * the fixed mapping.
773*4882a593Smuzhiyun *
774*4882a593Smuzhiyun * The fixed mapping is established at boot, and maps all of physical memory
775*4882a593Smuzhiyun * 1:1 into device space at some offset. On machines with < 30 GB of memory
776*4882a593Smuzhiyun * we setup the fixed mapping immediately above the normal IOMMU window.
777*4882a593Smuzhiyun *
778*4882a593Smuzhiyun * For example a machine with 4GB of memory would end up with the normal
779*4882a593Smuzhiyun * IOMMU window from 0-2GB and the fixed mapping window from 2GB to 6GB. In
780*4882a593Smuzhiyun * this case a 64-bit device wishing to DMA to 1GB would be told to DMA to
781*4882a593Smuzhiyun * 3GB, plus any offset required by firmware. The firmware offset is encoded
782*4882a593Smuzhiyun * in the "dma-ranges" property.
783*4882a593Smuzhiyun *
784*4882a593Smuzhiyun * On machines with 30GB or more of memory, we are unable to place the fixed
785*4882a593Smuzhiyun * mapping above the normal IOMMU window as we would run out of address space.
786*4882a593Smuzhiyun * Instead we move the normal IOMMU window to coincide with the hash page
787*4882a593Smuzhiyun * table, this region does not need to be part of the fixed mapping as no
788*4882a593Smuzhiyun * device should ever be DMA'ing to it. We then setup the fixed mapping
789*4882a593Smuzhiyun * from 0 to 32GB.
790*4882a593Smuzhiyun */
791*4882a593Smuzhiyun
cell_iommu_get_fixed_address(struct device * dev)792*4882a593Smuzhiyun static u64 cell_iommu_get_fixed_address(struct device *dev)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun u64 cpu_addr, size, best_size, dev_addr = OF_BAD_ADDR;
795*4882a593Smuzhiyun struct device_node *np;
796*4882a593Smuzhiyun const u32 *ranges = NULL;
797*4882a593Smuzhiyun int i, len, best, naddr, nsize, pna, range_size;
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun /* We can be called for platform devices that have no of_node */
800*4882a593Smuzhiyun np = of_node_get(dev->of_node);
801*4882a593Smuzhiyun if (!np)
802*4882a593Smuzhiyun goto out;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun while (1) {
805*4882a593Smuzhiyun naddr = of_n_addr_cells(np);
806*4882a593Smuzhiyun nsize = of_n_size_cells(np);
807*4882a593Smuzhiyun np = of_get_next_parent(np);
808*4882a593Smuzhiyun if (!np)
809*4882a593Smuzhiyun break;
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun ranges = of_get_property(np, "dma-ranges", &len);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /* Ignore empty ranges, they imply no translation required */
814*4882a593Smuzhiyun if (ranges && len > 0)
815*4882a593Smuzhiyun break;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun
818*4882a593Smuzhiyun if (!ranges) {
819*4882a593Smuzhiyun dev_dbg(dev, "iommu: no dma-ranges found\n");
820*4882a593Smuzhiyun goto out;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun len /= sizeof(u32);
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun pna = of_n_addr_cells(np);
826*4882a593Smuzhiyun range_size = naddr + nsize + pna;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun /* dma-ranges format:
829*4882a593Smuzhiyun * child addr : naddr cells
830*4882a593Smuzhiyun * parent addr : pna cells
831*4882a593Smuzhiyun * size : nsize cells
832*4882a593Smuzhiyun */
833*4882a593Smuzhiyun for (i = 0, best = -1, best_size = 0; i < len; i += range_size) {
834*4882a593Smuzhiyun cpu_addr = of_translate_dma_address(np, ranges + i + naddr);
835*4882a593Smuzhiyun size = of_read_number(ranges + i + naddr + pna, nsize);
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun if (cpu_addr == 0 && size > best_size) {
838*4882a593Smuzhiyun best = i;
839*4882a593Smuzhiyun best_size = size;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun if (best >= 0) {
844*4882a593Smuzhiyun dev_addr = of_read_number(ranges + best, naddr);
845*4882a593Smuzhiyun } else
846*4882a593Smuzhiyun dev_dbg(dev, "iommu: no suitable range found!\n");
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun out:
849*4882a593Smuzhiyun of_node_put(np);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun return dev_addr;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
cell_pci_iommu_bypass_supported(struct pci_dev * pdev,u64 mask)854*4882a593Smuzhiyun static bool cell_pci_iommu_bypass_supported(struct pci_dev *pdev, u64 mask)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun return mask == DMA_BIT_MASK(64) &&
857*4882a593Smuzhiyun cell_iommu_get_fixed_address(&pdev->dev) != OF_BAD_ADDR;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
insert_16M_pte(unsigned long addr,unsigned long * ptab,unsigned long base_pte)860*4882a593Smuzhiyun static void insert_16M_pte(unsigned long addr, unsigned long *ptab,
861*4882a593Smuzhiyun unsigned long base_pte)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun unsigned long segment, offset;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun segment = addr >> IO_SEGMENT_SHIFT;
866*4882a593Smuzhiyun offset = (addr >> 24) - (segment << IO_PAGENO_BITS(24));
867*4882a593Smuzhiyun ptab = ptab + (segment * (1 << 12) / sizeof(unsigned long));
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun pr_debug("iommu: addr %lx ptab %p segment %lx offset %lx\n",
870*4882a593Smuzhiyun addr, ptab, segment, offset);
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun ptab[offset] = base_pte | (__pa(addr) & CBE_IOPTE_RPN_Mask);
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
cell_iommu_setup_fixed_ptab(struct cbe_iommu * iommu,struct device_node * np,unsigned long dbase,unsigned long dsize,unsigned long fbase,unsigned long fsize)875*4882a593Smuzhiyun static void cell_iommu_setup_fixed_ptab(struct cbe_iommu *iommu,
876*4882a593Smuzhiyun struct device_node *np, unsigned long dbase, unsigned long dsize,
877*4882a593Smuzhiyun unsigned long fbase, unsigned long fsize)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun unsigned long base_pte, uaddr, ioaddr, *ptab;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun ptab = cell_iommu_alloc_ptab(iommu, fbase, fsize, dbase, dsize, 24);
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun dma_iommu_fixed_base = fbase;
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun pr_debug("iommu: mapping 0x%lx pages from 0x%lx\n", fsize, fbase);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun base_pte = CBE_IOPTE_PP_W | CBE_IOPTE_PP_R | CBE_IOPTE_M |
888*4882a593Smuzhiyun (cell_iommu_get_ioid(np) & CBE_IOPTE_IOID_Mask);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun if (iommu_fixed_is_weak)
891*4882a593Smuzhiyun pr_info("IOMMU: Using weak ordering for fixed mapping\n");
892*4882a593Smuzhiyun else {
893*4882a593Smuzhiyun pr_info("IOMMU: Using strong ordering for fixed mapping\n");
894*4882a593Smuzhiyun base_pte |= CBE_IOPTE_SO_RW;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun for (uaddr = 0; uaddr < fsize; uaddr += (1 << 24)) {
898*4882a593Smuzhiyun /* Don't touch the dynamic region */
899*4882a593Smuzhiyun ioaddr = uaddr + fbase;
900*4882a593Smuzhiyun if (ioaddr >= dbase && ioaddr < (dbase + dsize)) {
901*4882a593Smuzhiyun pr_debug("iommu: fixed/dynamic overlap, skipping\n");
902*4882a593Smuzhiyun continue;
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun insert_16M_pte(uaddr, ptab, base_pte);
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun mb();
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
cell_iommu_fixed_mapping_init(void)911*4882a593Smuzhiyun static int __init cell_iommu_fixed_mapping_init(void)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun unsigned long dbase, dsize, fbase, fsize, hbase, hend;
914*4882a593Smuzhiyun struct cbe_iommu *iommu;
915*4882a593Smuzhiyun struct device_node *np;
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* The fixed mapping is only supported on axon machines */
918*4882a593Smuzhiyun np = of_find_node_by_name(NULL, "axon");
919*4882a593Smuzhiyun of_node_put(np);
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun if (!np) {
922*4882a593Smuzhiyun pr_debug("iommu: fixed mapping disabled, no axons found\n");
923*4882a593Smuzhiyun return -1;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun /* We must have dma-ranges properties for fixed mapping to work */
927*4882a593Smuzhiyun np = of_find_node_with_property(NULL, "dma-ranges");
928*4882a593Smuzhiyun of_node_put(np);
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (!np) {
931*4882a593Smuzhiyun pr_debug("iommu: no dma-ranges found, no fixed mapping\n");
932*4882a593Smuzhiyun return -1;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun /* The default setup is to have the fixed mapping sit after the
936*4882a593Smuzhiyun * dynamic region, so find the top of the largest IOMMU window
937*4882a593Smuzhiyun * on any axon, then add the size of RAM and that's our max value.
938*4882a593Smuzhiyun * If that is > 32GB we have to do other shennanigans.
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun fbase = 0;
941*4882a593Smuzhiyun for_each_node_by_name(np, "axon") {
942*4882a593Smuzhiyun cell_iommu_get_window(np, &dbase, &dsize);
943*4882a593Smuzhiyun fbase = max(fbase, dbase + dsize);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun fbase = ALIGN(fbase, 1 << IO_SEGMENT_SHIFT);
947*4882a593Smuzhiyun fsize = memblock_phys_mem_size();
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun if ((fbase + fsize) <= 0x800000000ul)
950*4882a593Smuzhiyun hbase = 0; /* use the device tree window */
951*4882a593Smuzhiyun else {
952*4882a593Smuzhiyun /* If we're over 32 GB we need to cheat. We can't map all of
953*4882a593Smuzhiyun * RAM with the fixed mapping, and also fit the dynamic
954*4882a593Smuzhiyun * region. So try to place the dynamic region where the hash
955*4882a593Smuzhiyun * table sits, drivers never need to DMA to it, we don't
956*4882a593Smuzhiyun * need a fixed mapping for that area.
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun if (!htab_address) {
959*4882a593Smuzhiyun pr_debug("iommu: htab is NULL, on LPAR? Huh?\n");
960*4882a593Smuzhiyun return -1;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun hbase = __pa(htab_address);
963*4882a593Smuzhiyun hend = hbase + htab_size_bytes;
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun /* The window must start and end on a segment boundary */
966*4882a593Smuzhiyun if ((hbase != ALIGN(hbase, 1 << IO_SEGMENT_SHIFT)) ||
967*4882a593Smuzhiyun (hend != ALIGN(hend, 1 << IO_SEGMENT_SHIFT))) {
968*4882a593Smuzhiyun pr_debug("iommu: hash window not segment aligned\n");
969*4882a593Smuzhiyun return -1;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* Check the hash window fits inside the real DMA window */
973*4882a593Smuzhiyun for_each_node_by_name(np, "axon") {
974*4882a593Smuzhiyun cell_iommu_get_window(np, &dbase, &dsize);
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun if (hbase < dbase || (hend > (dbase + dsize))) {
977*4882a593Smuzhiyun pr_debug("iommu: hash window doesn't fit in"
978*4882a593Smuzhiyun "real DMA window\n");
979*4882a593Smuzhiyun of_node_put(np);
980*4882a593Smuzhiyun return -1;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun }
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun fbase = 0;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /* Setup the dynamic regions */
988*4882a593Smuzhiyun for_each_node_by_name(np, "axon") {
989*4882a593Smuzhiyun iommu = cell_iommu_alloc(np);
990*4882a593Smuzhiyun BUG_ON(!iommu);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun if (hbase == 0)
993*4882a593Smuzhiyun cell_iommu_get_window(np, &dbase, &dsize);
994*4882a593Smuzhiyun else {
995*4882a593Smuzhiyun dbase = hbase;
996*4882a593Smuzhiyun dsize = htab_size_bytes;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun printk(KERN_DEBUG "iommu: node %d, dynamic window 0x%lx-0x%lx "
1000*4882a593Smuzhiyun "fixed window 0x%lx-0x%lx\n", iommu->nid, dbase,
1001*4882a593Smuzhiyun dbase + dsize, fbase, fbase + fsize);
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun cell_iommu_setup_stab(iommu, dbase, dsize, fbase, fsize);
1004*4882a593Smuzhiyun iommu->ptab = cell_iommu_alloc_ptab(iommu, dbase, dsize, 0, 0,
1005*4882a593Smuzhiyun IOMMU_PAGE_SHIFT_4K);
1006*4882a593Smuzhiyun cell_iommu_setup_fixed_ptab(iommu, np, dbase, dsize,
1007*4882a593Smuzhiyun fbase, fsize);
1008*4882a593Smuzhiyun cell_iommu_enable_hardware(iommu);
1009*4882a593Smuzhiyun cell_iommu_setup_window(iommu, np, dbase, dsize, 0);
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun cell_pci_controller_ops.iommu_bypass_supported =
1013*4882a593Smuzhiyun cell_pci_iommu_bypass_supported;
1014*4882a593Smuzhiyun return 0;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun static int iommu_fixed_disabled;
1018*4882a593Smuzhiyun
setup_iommu_fixed(char * str)1019*4882a593Smuzhiyun static int __init setup_iommu_fixed(char *str)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun struct device_node *pciep;
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun if (strcmp(str, "off") == 0)
1024*4882a593Smuzhiyun iommu_fixed_disabled = 1;
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun /* If we can find a pcie-endpoint in the device tree assume that
1027*4882a593Smuzhiyun * we're on a triblade or a CAB so by default the fixed mapping
1028*4882a593Smuzhiyun * should be set to be weakly ordered; but only if the boot
1029*4882a593Smuzhiyun * option WASN'T set for strong ordering
1030*4882a593Smuzhiyun */
1031*4882a593Smuzhiyun pciep = of_find_node_by_type(NULL, "pcie-endpoint");
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun if (strcmp(str, "weak") == 0 || (pciep && strcmp(str, "strong") != 0))
1034*4882a593Smuzhiyun iommu_fixed_is_weak = true;
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun of_node_put(pciep);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun return 1;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun __setup("iommu_fixed=", setup_iommu_fixed);
1041*4882a593Smuzhiyun
cell_iommu_init(void)1042*4882a593Smuzhiyun static int __init cell_iommu_init(void)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun struct device_node *np;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun /* If IOMMU is disabled or we have little enough RAM to not need
1047*4882a593Smuzhiyun * to enable it, we setup a direct mapping.
1048*4882a593Smuzhiyun *
1049*4882a593Smuzhiyun * Note: should we make sure we have the IOMMU actually disabled ?
1050*4882a593Smuzhiyun */
1051*4882a593Smuzhiyun if (iommu_is_off ||
1052*4882a593Smuzhiyun (!iommu_force_on && memblock_end_of_DRAM() <= 0x80000000ull))
1053*4882a593Smuzhiyun if (cell_iommu_init_disabled() == 0)
1054*4882a593Smuzhiyun goto bail;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun /* Setup various callbacks */
1057*4882a593Smuzhiyun cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun if (!iommu_fixed_disabled && cell_iommu_fixed_mapping_init() == 0)
1060*4882a593Smuzhiyun goto done;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun /* Create an iommu for each /axon node. */
1063*4882a593Smuzhiyun for_each_node_by_name(np, "axon") {
1064*4882a593Smuzhiyun if (np->parent == NULL || np->parent->parent != NULL)
1065*4882a593Smuzhiyun continue;
1066*4882a593Smuzhiyun cell_iommu_init_one(np, 0);
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun /* Create an iommu for each toplevel /pci-internal node for
1070*4882a593Smuzhiyun * old hardware/firmware
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun for_each_node_by_name(np, "pci-internal") {
1073*4882a593Smuzhiyun if (np->parent == NULL || np->parent->parent != NULL)
1074*4882a593Smuzhiyun continue;
1075*4882a593Smuzhiyun cell_iommu_init_one(np, SPIDER_DMA_OFFSET);
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun done:
1078*4882a593Smuzhiyun /* Setup default PCI iommu ops */
1079*4882a593Smuzhiyun set_pci_dma_ops(&dma_iommu_ops);
1080*4882a593Smuzhiyun cell_iommu_enabled = true;
1081*4882a593Smuzhiyun bail:
1082*4882a593Smuzhiyun /* Register callbacks on OF platform device addition/removal
1083*4882a593Smuzhiyun * to handle linking them to the right DMA operations
1084*4882a593Smuzhiyun */
1085*4882a593Smuzhiyun bus_register_notifier(&platform_bus_type, &cell_of_bus_notifier);
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun return 0;
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun machine_arch_initcall(cell, cell_iommu_init);
1090