1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Rewrite, cleanup:
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
8*4882a593Smuzhiyun * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Dynamic DMA mapping support, pSeries-specific parts, both SMP and LPAR.
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/init.h>
14*4882a593Smuzhiyun #include <linux/types.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/mm.h>
17*4882a593Smuzhiyun #include <linux/memblock.h>
18*4882a593Smuzhiyun #include <linux/spinlock.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/pci.h>
21*4882a593Smuzhiyun #include <linux/dma-mapping.h>
22*4882a593Smuzhiyun #include <linux/crash_dump.h>
23*4882a593Smuzhiyun #include <linux/memory.h>
24*4882a593Smuzhiyun #include <linux/of.h>
25*4882a593Smuzhiyun #include <linux/iommu.h>
26*4882a593Smuzhiyun #include <linux/rculist.h>
27*4882a593Smuzhiyun #include <asm/io.h>
28*4882a593Smuzhiyun #include <asm/prom.h>
29*4882a593Smuzhiyun #include <asm/rtas.h>
30*4882a593Smuzhiyun #include <asm/iommu.h>
31*4882a593Smuzhiyun #include <asm/pci-bridge.h>
32*4882a593Smuzhiyun #include <asm/machdep.h>
33*4882a593Smuzhiyun #include <asm/firmware.h>
34*4882a593Smuzhiyun #include <asm/tce.h>
35*4882a593Smuzhiyun #include <asm/ppc-pci.h>
36*4882a593Smuzhiyun #include <asm/udbg.h>
37*4882a593Smuzhiyun #include <asm/mmzone.h>
38*4882a593Smuzhiyun #include <asm/plpar_wrappers.h>
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include "pseries.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun enum {
43*4882a593Smuzhiyun DDW_QUERY_PE_DMA_WIN = 0,
44*4882a593Smuzhiyun DDW_CREATE_PE_DMA_WIN = 1,
45*4882a593Smuzhiyun DDW_REMOVE_PE_DMA_WIN = 2,
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun DDW_APPLICABLE_SIZE
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun enum {
51*4882a593Smuzhiyun DDW_EXT_SIZE = 0,
52*4882a593Smuzhiyun DDW_EXT_RESET_DMA_WIN = 1,
53*4882a593Smuzhiyun DDW_EXT_QUERY_OUT_SIZE = 2
54*4882a593Smuzhiyun };
55*4882a593Smuzhiyun
iommu_pseries_alloc_group(int node)56*4882a593Smuzhiyun static struct iommu_table_group *iommu_pseries_alloc_group(int node)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct iommu_table_group *table_group;
59*4882a593Smuzhiyun struct iommu_table *tbl;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun table_group = kzalloc_node(sizeof(struct iommu_table_group), GFP_KERNEL,
62*4882a593Smuzhiyun node);
63*4882a593Smuzhiyun if (!table_group)
64*4882a593Smuzhiyun return NULL;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, node);
67*4882a593Smuzhiyun if (!tbl)
68*4882a593Smuzhiyun goto free_group;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun INIT_LIST_HEAD_RCU(&tbl->it_group_list);
71*4882a593Smuzhiyun kref_init(&tbl->it_kref);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun table_group->tables[0] = tbl;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun return table_group;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun free_group:
78*4882a593Smuzhiyun kfree(table_group);
79*4882a593Smuzhiyun return NULL;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
iommu_pseries_free_group(struct iommu_table_group * table_group,const char * node_name)82*4882a593Smuzhiyun static void iommu_pseries_free_group(struct iommu_table_group *table_group,
83*4882a593Smuzhiyun const char *node_name)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun struct iommu_table *tbl;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (!table_group)
88*4882a593Smuzhiyun return;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun tbl = table_group->tables[0];
91*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_API
92*4882a593Smuzhiyun if (table_group->group) {
93*4882a593Smuzhiyun iommu_group_put(table_group->group);
94*4882a593Smuzhiyun BUG_ON(table_group->group);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun #endif
97*4882a593Smuzhiyun iommu_tce_table_put(tbl);
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun kfree(table_group);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
tce_build_pSeries(struct iommu_table * tbl,long index,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)102*4882a593Smuzhiyun static int tce_build_pSeries(struct iommu_table *tbl, long index,
103*4882a593Smuzhiyun long npages, unsigned long uaddr,
104*4882a593Smuzhiyun enum dma_data_direction direction,
105*4882a593Smuzhiyun unsigned long attrs)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun u64 proto_tce;
108*4882a593Smuzhiyun __be64 *tcep;
109*4882a593Smuzhiyun u64 rpn;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun proto_tce = TCE_PCI_READ; // Read allowed
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (direction != DMA_TO_DEVICE)
114*4882a593Smuzhiyun proto_tce |= TCE_PCI_WRITE;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun tcep = ((__be64 *)tbl->it_base) + index;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun while (npages--) {
119*4882a593Smuzhiyun /* can't move this out since we might cross MEMBLOCK boundary */
120*4882a593Smuzhiyun rpn = __pa(uaddr) >> TCE_SHIFT;
121*4882a593Smuzhiyun *tcep = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun uaddr += TCE_PAGE_SIZE;
124*4882a593Smuzhiyun tcep++;
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun
tce_free_pSeries(struct iommu_table * tbl,long index,long npages)130*4882a593Smuzhiyun static void tce_free_pSeries(struct iommu_table *tbl, long index, long npages)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun __be64 *tcep;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun tcep = ((__be64 *)tbl->it_base) + index;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun while (npages--)
137*4882a593Smuzhiyun *(tcep++) = 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
tce_get_pseries(struct iommu_table * tbl,long index)140*4882a593Smuzhiyun static unsigned long tce_get_pseries(struct iommu_table *tbl, long index)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun __be64 *tcep;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun tcep = ((__be64 *)tbl->it_base) + index;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun return be64_to_cpu(*tcep);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun static void tce_free_pSeriesLP(unsigned long liobn, long, long);
150*4882a593Smuzhiyun static void tce_freemulti_pSeriesLP(struct iommu_table*, long, long);
151*4882a593Smuzhiyun
tce_build_pSeriesLP(unsigned long liobn,long tcenum,long tceshift,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)152*4882a593Smuzhiyun static int tce_build_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
153*4882a593Smuzhiyun long npages, unsigned long uaddr,
154*4882a593Smuzhiyun enum dma_data_direction direction,
155*4882a593Smuzhiyun unsigned long attrs)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun u64 rc = 0;
158*4882a593Smuzhiyun u64 proto_tce, tce;
159*4882a593Smuzhiyun u64 rpn;
160*4882a593Smuzhiyun int ret = 0;
161*4882a593Smuzhiyun long tcenum_start = tcenum, npages_start = npages;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun rpn = __pa(uaddr) >> tceshift;
164*4882a593Smuzhiyun proto_tce = TCE_PCI_READ;
165*4882a593Smuzhiyun if (direction != DMA_TO_DEVICE)
166*4882a593Smuzhiyun proto_tce |= TCE_PCI_WRITE;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun while (npages--) {
169*4882a593Smuzhiyun tce = proto_tce | (rpn & TCE_RPN_MASK) << tceshift;
170*4882a593Smuzhiyun rc = plpar_tce_put((u64)liobn, (u64)tcenum << tceshift, tce);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
173*4882a593Smuzhiyun ret = (int)rc;
174*4882a593Smuzhiyun tce_free_pSeriesLP(liobn, tcenum_start,
175*4882a593Smuzhiyun (npages_start - (npages + 1)));
176*4882a593Smuzhiyun break;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (rc && printk_ratelimit()) {
180*4882a593Smuzhiyun printk("tce_build_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
181*4882a593Smuzhiyun printk("\tindex = 0x%llx\n", (u64)liobn);
182*4882a593Smuzhiyun printk("\ttcenum = 0x%llx\n", (u64)tcenum);
183*4882a593Smuzhiyun printk("\ttce val = 0x%llx\n", tce );
184*4882a593Smuzhiyun dump_stack();
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun tcenum++;
188*4882a593Smuzhiyun rpn++;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun return ret;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun static DEFINE_PER_CPU(__be64 *, tce_page);
194*4882a593Smuzhiyun
tce_buildmulti_pSeriesLP(struct iommu_table * tbl,long tcenum,long npages,unsigned long uaddr,enum dma_data_direction direction,unsigned long attrs)195*4882a593Smuzhiyun static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
196*4882a593Smuzhiyun long npages, unsigned long uaddr,
197*4882a593Smuzhiyun enum dma_data_direction direction,
198*4882a593Smuzhiyun unsigned long attrs)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun u64 rc = 0;
201*4882a593Smuzhiyun u64 proto_tce;
202*4882a593Smuzhiyun __be64 *tcep;
203*4882a593Smuzhiyun u64 rpn;
204*4882a593Smuzhiyun long l, limit;
205*4882a593Smuzhiyun long tcenum_start = tcenum, npages_start = npages;
206*4882a593Smuzhiyun int ret = 0;
207*4882a593Smuzhiyun unsigned long flags;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun if ((npages == 1) || !firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
210*4882a593Smuzhiyun return tce_build_pSeriesLP(tbl->it_index, tcenum,
211*4882a593Smuzhiyun tbl->it_page_shift, npages, uaddr,
212*4882a593Smuzhiyun direction, attrs);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun local_irq_save(flags); /* to protect tcep and the page behind it */
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun tcep = __this_cpu_read(tce_page);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* This is safe to do since interrupts are off when we're called
220*4882a593Smuzhiyun * from iommu_alloc{,_sg}()
221*4882a593Smuzhiyun */
222*4882a593Smuzhiyun if (!tcep) {
223*4882a593Smuzhiyun tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
224*4882a593Smuzhiyun /* If allocation fails, fall back to the loop implementation */
225*4882a593Smuzhiyun if (!tcep) {
226*4882a593Smuzhiyun local_irq_restore(flags);
227*4882a593Smuzhiyun return tce_build_pSeriesLP(tbl->it_index, tcenum,
228*4882a593Smuzhiyun tbl->it_page_shift,
229*4882a593Smuzhiyun npages, uaddr, direction, attrs);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun __this_cpu_write(tce_page, tcep);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun rpn = __pa(uaddr) >> TCE_SHIFT;
235*4882a593Smuzhiyun proto_tce = TCE_PCI_READ;
236*4882a593Smuzhiyun if (direction != DMA_TO_DEVICE)
237*4882a593Smuzhiyun proto_tce |= TCE_PCI_WRITE;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun /* We can map max one pageful of TCEs at a time */
240*4882a593Smuzhiyun do {
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun * Set up the page with TCE data, looping through and setting
243*4882a593Smuzhiyun * the values.
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun limit = min_t(long, npages, 4096/TCE_ENTRY_SIZE);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun for (l = 0; l < limit; l++) {
248*4882a593Smuzhiyun tcep[l] = cpu_to_be64(proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT);
249*4882a593Smuzhiyun rpn++;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun rc = plpar_tce_put_indirect((u64)tbl->it_index,
253*4882a593Smuzhiyun (u64)tcenum << 12,
254*4882a593Smuzhiyun (u64)__pa(tcep),
255*4882a593Smuzhiyun limit);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun npages -= limit;
258*4882a593Smuzhiyun tcenum += limit;
259*4882a593Smuzhiyun } while (npages > 0 && !rc);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun local_irq_restore(flags);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
264*4882a593Smuzhiyun ret = (int)rc;
265*4882a593Smuzhiyun tce_freemulti_pSeriesLP(tbl, tcenum_start,
266*4882a593Smuzhiyun (npages_start - (npages + limit)));
267*4882a593Smuzhiyun return ret;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun if (rc && printk_ratelimit()) {
271*4882a593Smuzhiyun printk("tce_buildmulti_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
272*4882a593Smuzhiyun printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
273*4882a593Smuzhiyun printk("\tnpages = 0x%llx\n", (u64)npages);
274*4882a593Smuzhiyun printk("\ttce[0] val = 0x%llx\n", tcep[0]);
275*4882a593Smuzhiyun dump_stack();
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun return ret;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
tce_free_pSeriesLP(unsigned long liobn,long tcenum,long npages)280*4882a593Smuzhiyun static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long npages)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun u64 rc;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun while (npages--) {
285*4882a593Smuzhiyun rc = plpar_tce_put((u64)liobn, (u64)tcenum << 12, 0);
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (rc && printk_ratelimit()) {
288*4882a593Smuzhiyun printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
289*4882a593Smuzhiyun printk("\tindex = 0x%llx\n", (u64)liobn);
290*4882a593Smuzhiyun printk("\ttcenum = 0x%llx\n", (u64)tcenum);
291*4882a593Smuzhiyun dump_stack();
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun tcenum++;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun
tce_freemulti_pSeriesLP(struct iommu_table * tbl,long tcenum,long npages)299*4882a593Smuzhiyun static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun u64 rc;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
304*4882a593Smuzhiyun return tce_free_pSeriesLP(tbl->it_index, tcenum, npages);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun rc = plpar_tce_stuff((u64)tbl->it_index, (u64)tcenum << 12, 0, npages);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (rc && printk_ratelimit()) {
309*4882a593Smuzhiyun printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
310*4882a593Smuzhiyun printk("\trc = %lld\n", rc);
311*4882a593Smuzhiyun printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
312*4882a593Smuzhiyun printk("\tnpages = 0x%llx\n", (u64)npages);
313*4882a593Smuzhiyun dump_stack();
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
tce_get_pSeriesLP(struct iommu_table * tbl,long tcenum)317*4882a593Smuzhiyun static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun u64 rc;
320*4882a593Smuzhiyun unsigned long tce_ret;
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun rc = plpar_tce_get((u64)tbl->it_index, (u64)tcenum << 12, &tce_ret);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (rc && printk_ratelimit()) {
325*4882a593Smuzhiyun printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
326*4882a593Smuzhiyun printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
327*4882a593Smuzhiyun printk("\ttcenum = 0x%llx\n", (u64)tcenum);
328*4882a593Smuzhiyun dump_stack();
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun return tce_ret;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* this is compatible with cells for the device tree property */
335*4882a593Smuzhiyun struct dynamic_dma_window_prop {
336*4882a593Smuzhiyun __be32 liobn; /* tce table number */
337*4882a593Smuzhiyun __be64 dma_base; /* address hi,lo */
338*4882a593Smuzhiyun __be32 tce_shift; /* ilog2(tce_page_size) */
339*4882a593Smuzhiyun __be32 window_shift; /* ilog2(tce_window_size) */
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun struct direct_window {
343*4882a593Smuzhiyun struct device_node *device;
344*4882a593Smuzhiyun const struct dynamic_dma_window_prop *prop;
345*4882a593Smuzhiyun struct list_head list;
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Dynamic DMA Window support */
349*4882a593Smuzhiyun struct ddw_query_response {
350*4882a593Smuzhiyun u32 windows_available;
351*4882a593Smuzhiyun u64 largest_available_block;
352*4882a593Smuzhiyun u32 page_size;
353*4882a593Smuzhiyun u32 migration_capable;
354*4882a593Smuzhiyun };
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun struct ddw_create_response {
357*4882a593Smuzhiyun u32 liobn;
358*4882a593Smuzhiyun u32 addr_hi;
359*4882a593Smuzhiyun u32 addr_lo;
360*4882a593Smuzhiyun };
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun static LIST_HEAD(direct_window_list);
363*4882a593Smuzhiyun /* prevents races between memory on/offline and window creation */
364*4882a593Smuzhiyun static DEFINE_SPINLOCK(direct_window_list_lock);
365*4882a593Smuzhiyun /* protects initializing window twice for same device */
366*4882a593Smuzhiyun static DEFINE_MUTEX(direct_window_init_mutex);
367*4882a593Smuzhiyun #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info"
368*4882a593Smuzhiyun
tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,unsigned long num_pfn,const void * arg)369*4882a593Smuzhiyun static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn,
370*4882a593Smuzhiyun unsigned long num_pfn, const void *arg)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun const struct dynamic_dma_window_prop *maprange = arg;
373*4882a593Smuzhiyun int rc;
374*4882a593Smuzhiyun u64 tce_size, num_tce, dma_offset, next;
375*4882a593Smuzhiyun u32 tce_shift;
376*4882a593Smuzhiyun long limit;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun tce_shift = be32_to_cpu(maprange->tce_shift);
379*4882a593Smuzhiyun tce_size = 1ULL << tce_shift;
380*4882a593Smuzhiyun next = start_pfn << PAGE_SHIFT;
381*4882a593Smuzhiyun num_tce = num_pfn << PAGE_SHIFT;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /* round back to the beginning of the tce page size */
384*4882a593Smuzhiyun num_tce += next & (tce_size - 1);
385*4882a593Smuzhiyun next &= ~(tce_size - 1);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* covert to number of tces */
388*4882a593Smuzhiyun num_tce |= tce_size - 1;
389*4882a593Smuzhiyun num_tce >>= tce_shift;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun do {
392*4882a593Smuzhiyun /*
393*4882a593Smuzhiyun * Set up the page with TCE data, looping through and setting
394*4882a593Smuzhiyun * the values.
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun limit = min_t(long, num_tce, 512);
397*4882a593Smuzhiyun dma_offset = next + be64_to_cpu(maprange->dma_base);
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn),
400*4882a593Smuzhiyun dma_offset,
401*4882a593Smuzhiyun 0, limit);
402*4882a593Smuzhiyun next += limit * tce_size;
403*4882a593Smuzhiyun num_tce -= limit;
404*4882a593Smuzhiyun } while (num_tce > 0 && !rc);
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun return rc;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
tce_setrange_multi_pSeriesLP(unsigned long start_pfn,unsigned long num_pfn,const void * arg)409*4882a593Smuzhiyun static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
410*4882a593Smuzhiyun unsigned long num_pfn, const void *arg)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun const struct dynamic_dma_window_prop *maprange = arg;
413*4882a593Smuzhiyun u64 tce_size, num_tce, dma_offset, next, proto_tce, liobn;
414*4882a593Smuzhiyun __be64 *tcep;
415*4882a593Smuzhiyun u32 tce_shift;
416*4882a593Smuzhiyun u64 rc = 0;
417*4882a593Smuzhiyun long l, limit;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun if (!firmware_has_feature(FW_FEATURE_PUT_TCE_IND)) {
420*4882a593Smuzhiyun unsigned long tceshift = be32_to_cpu(maprange->tce_shift);
421*4882a593Smuzhiyun unsigned long dmastart = (start_pfn << PAGE_SHIFT) +
422*4882a593Smuzhiyun be64_to_cpu(maprange->dma_base);
423*4882a593Smuzhiyun unsigned long tcenum = dmastart >> tceshift;
424*4882a593Smuzhiyun unsigned long npages = num_pfn << PAGE_SHIFT >> tceshift;
425*4882a593Smuzhiyun void *uaddr = __va(start_pfn << PAGE_SHIFT);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun return tce_build_pSeriesLP(be32_to_cpu(maprange->liobn),
428*4882a593Smuzhiyun tcenum, tceshift, npages, (unsigned long) uaddr,
429*4882a593Smuzhiyun DMA_BIDIRECTIONAL, 0);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun local_irq_disable(); /* to protect tcep and the page behind it */
433*4882a593Smuzhiyun tcep = __this_cpu_read(tce_page);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (!tcep) {
436*4882a593Smuzhiyun tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
437*4882a593Smuzhiyun if (!tcep) {
438*4882a593Smuzhiyun local_irq_enable();
439*4882a593Smuzhiyun return -ENOMEM;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun __this_cpu_write(tce_page, tcep);
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun liobn = (u64)be32_to_cpu(maprange->liobn);
447*4882a593Smuzhiyun tce_shift = be32_to_cpu(maprange->tce_shift);
448*4882a593Smuzhiyun tce_size = 1ULL << tce_shift;
449*4882a593Smuzhiyun next = start_pfn << PAGE_SHIFT;
450*4882a593Smuzhiyun num_tce = num_pfn << PAGE_SHIFT;
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun /* round back to the beginning of the tce page size */
453*4882a593Smuzhiyun num_tce += next & (tce_size - 1);
454*4882a593Smuzhiyun next &= ~(tce_size - 1);
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* covert to number of tces */
457*4882a593Smuzhiyun num_tce |= tce_size - 1;
458*4882a593Smuzhiyun num_tce >>= tce_shift;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* We can map max one pageful of TCEs at a time */
461*4882a593Smuzhiyun do {
462*4882a593Smuzhiyun /*
463*4882a593Smuzhiyun * Set up the page with TCE data, looping through and setting
464*4882a593Smuzhiyun * the values.
465*4882a593Smuzhiyun */
466*4882a593Smuzhiyun limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE);
467*4882a593Smuzhiyun dma_offset = next + be64_to_cpu(maprange->dma_base);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun for (l = 0; l < limit; l++) {
470*4882a593Smuzhiyun tcep[l] = cpu_to_be64(proto_tce | next);
471*4882a593Smuzhiyun next += tce_size;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun rc = plpar_tce_put_indirect(liobn,
475*4882a593Smuzhiyun dma_offset,
476*4882a593Smuzhiyun (u64)__pa(tcep),
477*4882a593Smuzhiyun limit);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun num_tce -= limit;
480*4882a593Smuzhiyun } while (num_tce > 0 && !rc);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun /* error cleanup: caller will clear whole range */
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun local_irq_enable();
485*4882a593Smuzhiyun return rc;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,unsigned long num_pfn,void * arg)488*4882a593Smuzhiyun static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn,
489*4882a593Smuzhiyun unsigned long num_pfn, void *arg)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
iommu_table_setparms(struct pci_controller * phb,struct device_node * dn,struct iommu_table * tbl)494*4882a593Smuzhiyun static void iommu_table_setparms(struct pci_controller *phb,
495*4882a593Smuzhiyun struct device_node *dn,
496*4882a593Smuzhiyun struct iommu_table *tbl)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct device_node *node;
499*4882a593Smuzhiyun const unsigned long *basep;
500*4882a593Smuzhiyun const u32 *sizep;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun node = phb->dn;
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun basep = of_get_property(node, "linux,tce-base", NULL);
505*4882a593Smuzhiyun sizep = of_get_property(node, "linux,tce-size", NULL);
506*4882a593Smuzhiyun if (basep == NULL || sizep == NULL) {
507*4882a593Smuzhiyun printk(KERN_ERR "PCI_DMA: iommu_table_setparms: %pOF has "
508*4882a593Smuzhiyun "missing tce entries !\n", dn);
509*4882a593Smuzhiyun return;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun tbl->it_base = (unsigned long)__va(*basep);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!is_kdump_kernel())
515*4882a593Smuzhiyun memset((void *)tbl->it_base, 0, *sizep);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun tbl->it_busno = phb->bus->number;
518*4882a593Smuzhiyun tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* Units of tce entries */
521*4882a593Smuzhiyun tbl->it_offset = phb->dma_window_base_cur >> tbl->it_page_shift;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /* Test if we are going over 2GB of DMA space */
524*4882a593Smuzhiyun if (phb->dma_window_base_cur + phb->dma_window_size > 0x80000000ul) {
525*4882a593Smuzhiyun udbg_printf("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
526*4882a593Smuzhiyun panic("PCI_DMA: Unexpected number of IOAs under this PHB.\n");
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun phb->dma_window_base_cur += phb->dma_window_size;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* Set the tce table size - measured in entries */
532*4882a593Smuzhiyun tbl->it_size = phb->dma_window_size >> tbl->it_page_shift;
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun tbl->it_index = 0;
535*4882a593Smuzhiyun tbl->it_blocksize = 16;
536*4882a593Smuzhiyun tbl->it_type = TCE_PCI;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /*
540*4882a593Smuzhiyun * iommu_table_setparms_lpar
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * Function: On pSeries LPAR systems, return TCE table info, given a pci bus.
543*4882a593Smuzhiyun */
iommu_table_setparms_lpar(struct pci_controller * phb,struct device_node * dn,struct iommu_table * tbl,struct iommu_table_group * table_group,const __be32 * dma_window)544*4882a593Smuzhiyun static void iommu_table_setparms_lpar(struct pci_controller *phb,
545*4882a593Smuzhiyun struct device_node *dn,
546*4882a593Smuzhiyun struct iommu_table *tbl,
547*4882a593Smuzhiyun struct iommu_table_group *table_group,
548*4882a593Smuzhiyun const __be32 *dma_window)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun unsigned long offset, size;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun tbl->it_busno = phb->bus->number;
555*4882a593Smuzhiyun tbl->it_page_shift = IOMMU_PAGE_SHIFT_4K;
556*4882a593Smuzhiyun tbl->it_base = 0;
557*4882a593Smuzhiyun tbl->it_blocksize = 16;
558*4882a593Smuzhiyun tbl->it_type = TCE_PCI;
559*4882a593Smuzhiyun tbl->it_offset = offset >> tbl->it_page_shift;
560*4882a593Smuzhiyun tbl->it_size = size >> tbl->it_page_shift;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun table_group->tce32_start = offset;
563*4882a593Smuzhiyun table_group->tce32_size = size;
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun struct iommu_table_ops iommu_table_pseries_ops = {
567*4882a593Smuzhiyun .set = tce_build_pSeries,
568*4882a593Smuzhiyun .clear = tce_free_pSeries,
569*4882a593Smuzhiyun .get = tce_get_pseries
570*4882a593Smuzhiyun };
571*4882a593Smuzhiyun
pci_dma_bus_setup_pSeries(struct pci_bus * bus)572*4882a593Smuzhiyun static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct device_node *dn;
575*4882a593Smuzhiyun struct iommu_table *tbl;
576*4882a593Smuzhiyun struct device_node *isa_dn, *isa_dn_orig;
577*4882a593Smuzhiyun struct device_node *tmp;
578*4882a593Smuzhiyun struct pci_dn *pci;
579*4882a593Smuzhiyun int children;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun dn = pci_bus_to_OF_node(bus);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun pr_debug("pci_dma_bus_setup_pSeries: setting up bus %pOF\n", dn);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (bus->self) {
586*4882a593Smuzhiyun /* This is not a root bus, any setup will be done for the
587*4882a593Smuzhiyun * device-side of the bridge in iommu_dev_setup_pSeries().
588*4882a593Smuzhiyun */
589*4882a593Smuzhiyun return;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun pci = PCI_DN(dn);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun /* Check if the ISA bus on the system is under
594*4882a593Smuzhiyun * this PHB.
595*4882a593Smuzhiyun */
596*4882a593Smuzhiyun isa_dn = isa_dn_orig = of_find_node_by_type(NULL, "isa");
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun while (isa_dn && isa_dn != dn)
599*4882a593Smuzhiyun isa_dn = isa_dn->parent;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun of_node_put(isa_dn_orig);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* Count number of direct PCI children of the PHB. */
604*4882a593Smuzhiyun for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
605*4882a593Smuzhiyun children++;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun pr_debug("Children: %d\n", children);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun /* Calculate amount of DMA window per slot. Each window must be
610*4882a593Smuzhiyun * a power of two (due to pci_alloc_consistent requirements).
611*4882a593Smuzhiyun *
612*4882a593Smuzhiyun * Keep 256MB aside for PHBs with ISA.
613*4882a593Smuzhiyun */
614*4882a593Smuzhiyun
615*4882a593Smuzhiyun if (!isa_dn) {
616*4882a593Smuzhiyun /* No ISA/IDE - just set window size and return */
617*4882a593Smuzhiyun pci->phb->dma_window_size = 0x80000000ul; /* To be divided */
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun while (pci->phb->dma_window_size * children > 0x80000000ul)
620*4882a593Smuzhiyun pci->phb->dma_window_size >>= 1;
621*4882a593Smuzhiyun pr_debug("No ISA/IDE, window size is 0x%llx\n",
622*4882a593Smuzhiyun pci->phb->dma_window_size);
623*4882a593Smuzhiyun pci->phb->dma_window_base_cur = 0;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return;
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* If we have ISA, then we probably have an IDE
629*4882a593Smuzhiyun * controller too. Allocate a 128MB table but
630*4882a593Smuzhiyun * skip the first 128MB to avoid stepping on ISA
631*4882a593Smuzhiyun * space.
632*4882a593Smuzhiyun */
633*4882a593Smuzhiyun pci->phb->dma_window_size = 0x8000000ul;
634*4882a593Smuzhiyun pci->phb->dma_window_base_cur = 0x8000000ul;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
637*4882a593Smuzhiyun tbl = pci->table_group->tables[0];
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun iommu_table_setparms(pci->phb, dn, tbl);
640*4882a593Smuzhiyun tbl->it_ops = &iommu_table_pseries_ops;
641*4882a593Smuzhiyun iommu_init_table(tbl, pci->phb->node, 0, 0);
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* Divide the rest (1.75GB) among the children */
644*4882a593Smuzhiyun pci->phb->dma_window_size = 0x80000000ul;
645*4882a593Smuzhiyun while (pci->phb->dma_window_size * children > 0x70000000ul)
646*4882a593Smuzhiyun pci->phb->dma_window_size >>= 1;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun pr_debug("ISA/IDE, window size is 0x%llx\n", pci->phb->dma_window_size);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_API
tce_exchange_pseries(struct iommu_table * tbl,long index,unsigned long * tce,enum dma_data_direction * direction,bool realmode)652*4882a593Smuzhiyun static int tce_exchange_pseries(struct iommu_table *tbl, long index, unsigned
653*4882a593Smuzhiyun long *tce, enum dma_data_direction *direction,
654*4882a593Smuzhiyun bool realmode)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun long rc;
657*4882a593Smuzhiyun unsigned long ioba = (unsigned long) index << tbl->it_page_shift;
658*4882a593Smuzhiyun unsigned long flags, oldtce = 0;
659*4882a593Smuzhiyun u64 proto_tce = iommu_direction_to_tce_perm(*direction);
660*4882a593Smuzhiyun unsigned long newtce = *tce | proto_tce;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun spin_lock_irqsave(&tbl->large_pool.lock, flags);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun rc = plpar_tce_get((u64)tbl->it_index, ioba, &oldtce);
665*4882a593Smuzhiyun if (!rc)
666*4882a593Smuzhiyun rc = plpar_tce_put((u64)tbl->it_index, ioba, newtce);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (!rc) {
669*4882a593Smuzhiyun *direction = iommu_tce_direction(oldtce);
670*4882a593Smuzhiyun *tce = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun return rc;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun #endif
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun struct iommu_table_ops iommu_table_lpar_multi_ops = {
680*4882a593Smuzhiyun .set = tce_buildmulti_pSeriesLP,
681*4882a593Smuzhiyun #ifdef CONFIG_IOMMU_API
682*4882a593Smuzhiyun .xchg_no_kill = tce_exchange_pseries,
683*4882a593Smuzhiyun #endif
684*4882a593Smuzhiyun .clear = tce_freemulti_pSeriesLP,
685*4882a593Smuzhiyun .get = tce_get_pSeriesLP
686*4882a593Smuzhiyun };
687*4882a593Smuzhiyun
pci_dma_bus_setup_pSeriesLP(struct pci_bus * bus)688*4882a593Smuzhiyun static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun struct iommu_table *tbl;
691*4882a593Smuzhiyun struct device_node *dn, *pdn;
692*4882a593Smuzhiyun struct pci_dn *ppci;
693*4882a593Smuzhiyun const __be32 *dma_window = NULL;
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun dn = pci_bus_to_OF_node(bus);
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun pr_debug("pci_dma_bus_setup_pSeriesLP: setting up bus %pOF\n",
698*4882a593Smuzhiyun dn);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun /* Find nearest ibm,dma-window, walking up the device tree */
701*4882a593Smuzhiyun for (pdn = dn; pdn != NULL; pdn = pdn->parent) {
702*4882a593Smuzhiyun dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
703*4882a593Smuzhiyun if (dma_window != NULL)
704*4882a593Smuzhiyun break;
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun if (dma_window == NULL) {
708*4882a593Smuzhiyun pr_debug(" no ibm,dma-window property !\n");
709*4882a593Smuzhiyun return;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun ppci = PCI_DN(pdn);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun pr_debug(" parent is %pOF, iommu_table: 0x%p\n",
715*4882a593Smuzhiyun pdn, ppci->table_group);
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (!ppci->table_group) {
718*4882a593Smuzhiyun ppci->table_group = iommu_pseries_alloc_group(ppci->phb->node);
719*4882a593Smuzhiyun tbl = ppci->table_group->tables[0];
720*4882a593Smuzhiyun iommu_table_setparms_lpar(ppci->phb, pdn, tbl,
721*4882a593Smuzhiyun ppci->table_group, dma_window);
722*4882a593Smuzhiyun tbl->it_ops = &iommu_table_lpar_multi_ops;
723*4882a593Smuzhiyun iommu_init_table(tbl, ppci->phb->node, 0, 0);
724*4882a593Smuzhiyun iommu_register_group(ppci->table_group,
725*4882a593Smuzhiyun pci_domain_nr(bus), 0);
726*4882a593Smuzhiyun pr_debug(" created table: %p\n", ppci->table_group);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun
pci_dma_dev_setup_pSeries(struct pci_dev * dev)731*4882a593Smuzhiyun static void pci_dma_dev_setup_pSeries(struct pci_dev *dev)
732*4882a593Smuzhiyun {
733*4882a593Smuzhiyun struct device_node *dn;
734*4882a593Smuzhiyun struct iommu_table *tbl;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun pr_debug("pci_dma_dev_setup_pSeries: %s\n", pci_name(dev));
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun dn = dev->dev.of_node;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun /* If we're the direct child of a root bus, then we need to allocate
741*4882a593Smuzhiyun * an iommu table ourselves. The bus setup code should have setup
742*4882a593Smuzhiyun * the window sizes already.
743*4882a593Smuzhiyun */
744*4882a593Smuzhiyun if (!dev->bus->self) {
745*4882a593Smuzhiyun struct pci_controller *phb = PCI_DN(dn)->phb;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun pr_debug(" --> first child, no bridge. Allocating iommu table.\n");
748*4882a593Smuzhiyun PCI_DN(dn)->table_group = iommu_pseries_alloc_group(phb->node);
749*4882a593Smuzhiyun tbl = PCI_DN(dn)->table_group->tables[0];
750*4882a593Smuzhiyun iommu_table_setparms(phb, dn, tbl);
751*4882a593Smuzhiyun tbl->it_ops = &iommu_table_pseries_ops;
752*4882a593Smuzhiyun iommu_init_table(tbl, phb->node, 0, 0);
753*4882a593Smuzhiyun set_iommu_table_base(&dev->dev, tbl);
754*4882a593Smuzhiyun return;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun /* If this device is further down the bus tree, search upwards until
758*4882a593Smuzhiyun * an already allocated iommu table is found and use that.
759*4882a593Smuzhiyun */
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun while (dn && PCI_DN(dn) && PCI_DN(dn)->table_group == NULL)
762*4882a593Smuzhiyun dn = dn->parent;
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun if (dn && PCI_DN(dn))
765*4882a593Smuzhiyun set_iommu_table_base(&dev->dev,
766*4882a593Smuzhiyun PCI_DN(dn)->table_group->tables[0]);
767*4882a593Smuzhiyun else
768*4882a593Smuzhiyun printk(KERN_WARNING "iommu: Device %s has no iommu table\n",
769*4882a593Smuzhiyun pci_name(dev));
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun static int __read_mostly disable_ddw;
773*4882a593Smuzhiyun
disable_ddw_setup(char * str)774*4882a593Smuzhiyun static int __init disable_ddw_setup(char *str)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun disable_ddw = 1;
777*4882a593Smuzhiyun printk(KERN_INFO "ppc iommu: disabling ddw.\n");
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun return 0;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun early_param("disable_ddw", disable_ddw_setup);
783*4882a593Smuzhiyun
remove_dma_window(struct device_node * np,u32 * ddw_avail,struct property * win)784*4882a593Smuzhiyun static void remove_dma_window(struct device_node *np, u32 *ddw_avail,
785*4882a593Smuzhiyun struct property *win)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun struct dynamic_dma_window_prop *dwp;
788*4882a593Smuzhiyun u64 liobn;
789*4882a593Smuzhiyun int ret;
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun dwp = win->value;
792*4882a593Smuzhiyun liobn = (u64)be32_to_cpu(dwp->liobn);
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun /* clear the whole window, note the arg is in kernel pages */
795*4882a593Smuzhiyun ret = tce_clearrange_multi_pSeriesLP(0,
796*4882a593Smuzhiyun 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp);
797*4882a593Smuzhiyun if (ret)
798*4882a593Smuzhiyun pr_warn("%pOF failed to clear tces in window.\n",
799*4882a593Smuzhiyun np);
800*4882a593Smuzhiyun else
801*4882a593Smuzhiyun pr_debug("%pOF successfully cleared tces in window.\n",
802*4882a593Smuzhiyun np);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun ret = rtas_call(ddw_avail[DDW_REMOVE_PE_DMA_WIN], 1, 1, NULL, liobn);
805*4882a593Smuzhiyun if (ret)
806*4882a593Smuzhiyun pr_warn("%pOF: failed to remove direct window: rtas returned "
807*4882a593Smuzhiyun "%d to ibm,remove-pe-dma-window(%x) %llx\n",
808*4882a593Smuzhiyun np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
809*4882a593Smuzhiyun else
810*4882a593Smuzhiyun pr_debug("%pOF: successfully removed direct window: rtas returned "
811*4882a593Smuzhiyun "%d to ibm,remove-pe-dma-window(%x) %llx\n",
812*4882a593Smuzhiyun np, ret, ddw_avail[DDW_REMOVE_PE_DMA_WIN], liobn);
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
remove_ddw(struct device_node * np,bool remove_prop)815*4882a593Smuzhiyun static void remove_ddw(struct device_node *np, bool remove_prop)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun struct property *win;
818*4882a593Smuzhiyun u32 ddw_avail[DDW_APPLICABLE_SIZE];
819*4882a593Smuzhiyun int ret = 0;
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun ret = of_property_read_u32_array(np, "ibm,ddw-applicable",
822*4882a593Smuzhiyun &ddw_avail[0], DDW_APPLICABLE_SIZE);
823*4882a593Smuzhiyun if (ret)
824*4882a593Smuzhiyun return;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun win = of_find_property(np, DIRECT64_PROPNAME, NULL);
827*4882a593Smuzhiyun if (!win)
828*4882a593Smuzhiyun return;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun if (win->length >= sizeof(struct dynamic_dma_window_prop))
831*4882a593Smuzhiyun remove_dma_window(np, ddw_avail, win);
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun if (!remove_prop)
834*4882a593Smuzhiyun return;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun ret = of_remove_property(np, win);
837*4882a593Smuzhiyun if (ret)
838*4882a593Smuzhiyun pr_warn("%pOF: failed to remove direct window property: %d\n",
839*4882a593Smuzhiyun np, ret);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
find_existing_ddw(struct device_node * pdn)842*4882a593Smuzhiyun static u64 find_existing_ddw(struct device_node *pdn)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun struct direct_window *window;
845*4882a593Smuzhiyun const struct dynamic_dma_window_prop *direct64;
846*4882a593Smuzhiyun u64 dma_addr = 0;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun spin_lock(&direct_window_list_lock);
849*4882a593Smuzhiyun /* check if we already created a window and dupe that config if so */
850*4882a593Smuzhiyun list_for_each_entry(window, &direct_window_list, list) {
851*4882a593Smuzhiyun if (window->device == pdn) {
852*4882a593Smuzhiyun direct64 = window->prop;
853*4882a593Smuzhiyun dma_addr = be64_to_cpu(direct64->dma_base);
854*4882a593Smuzhiyun break;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun spin_unlock(&direct_window_list_lock);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun return dma_addr;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
find_existing_ddw_windows(void)862*4882a593Smuzhiyun static int find_existing_ddw_windows(void)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun int len;
865*4882a593Smuzhiyun struct device_node *pdn;
866*4882a593Smuzhiyun struct direct_window *window;
867*4882a593Smuzhiyun const struct dynamic_dma_window_prop *direct64;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (!firmware_has_feature(FW_FEATURE_LPAR))
870*4882a593Smuzhiyun return 0;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun for_each_node_with_property(pdn, DIRECT64_PROPNAME) {
873*4882a593Smuzhiyun direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len);
874*4882a593Smuzhiyun if (!direct64)
875*4882a593Smuzhiyun continue;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun window = kzalloc(sizeof(*window), GFP_KERNEL);
878*4882a593Smuzhiyun if (!window || len < sizeof(struct dynamic_dma_window_prop)) {
879*4882a593Smuzhiyun kfree(window);
880*4882a593Smuzhiyun remove_ddw(pdn, true);
881*4882a593Smuzhiyun continue;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun window->device = pdn;
885*4882a593Smuzhiyun window->prop = direct64;
886*4882a593Smuzhiyun spin_lock(&direct_window_list_lock);
887*4882a593Smuzhiyun list_add(&window->list, &direct_window_list);
888*4882a593Smuzhiyun spin_unlock(&direct_window_list_lock);
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun return 0;
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun machine_arch_initcall(pseries, find_existing_ddw_windows);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun /**
896*4882a593Smuzhiyun * ddw_read_ext - Get the value of an DDW extension
897*4882a593Smuzhiyun * @np: device node from which the extension value is to be read.
898*4882a593Smuzhiyun * @extnum: index number of the extension.
899*4882a593Smuzhiyun * @value: pointer to return value, modified when extension is available.
900*4882a593Smuzhiyun *
901*4882a593Smuzhiyun * Checks if "ibm,ddw-extensions" exists for this node, and get the value
902*4882a593Smuzhiyun * on index 'extnum'.
903*4882a593Smuzhiyun * It can be used only to check if a property exists, passing value == NULL.
904*4882a593Smuzhiyun *
905*4882a593Smuzhiyun * Returns:
906*4882a593Smuzhiyun * 0 if extension successfully read
907*4882a593Smuzhiyun * -EINVAL if the "ibm,ddw-extensions" does not exist,
908*4882a593Smuzhiyun * -ENODATA if "ibm,ddw-extensions" does not have a value, and
909*4882a593Smuzhiyun * -EOVERFLOW if "ibm,ddw-extensions" does not contain this extension.
910*4882a593Smuzhiyun */
ddw_read_ext(const struct device_node * np,int extnum,u32 * value)911*4882a593Smuzhiyun static inline int ddw_read_ext(const struct device_node *np, int extnum,
912*4882a593Smuzhiyun u32 *value)
913*4882a593Smuzhiyun {
914*4882a593Smuzhiyun static const char propname[] = "ibm,ddw-extensions";
915*4882a593Smuzhiyun u32 count;
916*4882a593Smuzhiyun int ret;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun ret = of_property_read_u32_index(np, propname, DDW_EXT_SIZE, &count);
919*4882a593Smuzhiyun if (ret)
920*4882a593Smuzhiyun return ret;
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun if (count < extnum)
923*4882a593Smuzhiyun return -EOVERFLOW;
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun if (!value)
926*4882a593Smuzhiyun value = &count;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun return of_property_read_u32_index(np, propname, extnum, value);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
query_ddw(struct pci_dev * dev,const u32 * ddw_avail,struct ddw_query_response * query,struct device_node * parent)931*4882a593Smuzhiyun static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
932*4882a593Smuzhiyun struct ddw_query_response *query,
933*4882a593Smuzhiyun struct device_node *parent)
934*4882a593Smuzhiyun {
935*4882a593Smuzhiyun struct device_node *dn;
936*4882a593Smuzhiyun struct pci_dn *pdn;
937*4882a593Smuzhiyun u32 cfg_addr, ext_query, query_out[5];
938*4882a593Smuzhiyun u64 buid;
939*4882a593Smuzhiyun int ret, out_sz;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * From LoPAR level 2.8, "ibm,ddw-extensions" index 3 can rule how many
943*4882a593Smuzhiyun * output parameters ibm,query-pe-dma-windows will have, ranging from
944*4882a593Smuzhiyun * 5 to 6.
945*4882a593Smuzhiyun */
946*4882a593Smuzhiyun ret = ddw_read_ext(parent, DDW_EXT_QUERY_OUT_SIZE, &ext_query);
947*4882a593Smuzhiyun if (!ret && ext_query == 1)
948*4882a593Smuzhiyun out_sz = 6;
949*4882a593Smuzhiyun else
950*4882a593Smuzhiyun out_sz = 5;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /*
953*4882a593Smuzhiyun * Get the config address and phb buid of the PE window.
954*4882a593Smuzhiyun * Rely on eeh to retrieve this for us.
955*4882a593Smuzhiyun * Retrieve them from the pci device, not the node with the
956*4882a593Smuzhiyun * dma-window property
957*4882a593Smuzhiyun */
958*4882a593Smuzhiyun dn = pci_device_to_OF_node(dev);
959*4882a593Smuzhiyun pdn = PCI_DN(dn);
960*4882a593Smuzhiyun buid = pdn->phb->buid;
961*4882a593Smuzhiyun cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun ret = rtas_call(ddw_avail[DDW_QUERY_PE_DMA_WIN], 3, out_sz, query_out,
964*4882a593Smuzhiyun cfg_addr, BUID_HI(buid), BUID_LO(buid));
965*4882a593Smuzhiyun dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x returned %d\n",
966*4882a593Smuzhiyun ddw_avail[DDW_QUERY_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
967*4882a593Smuzhiyun BUID_LO(buid), ret);
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun switch (out_sz) {
970*4882a593Smuzhiyun case 5:
971*4882a593Smuzhiyun query->windows_available = query_out[0];
972*4882a593Smuzhiyun query->largest_available_block = query_out[1];
973*4882a593Smuzhiyun query->page_size = query_out[2];
974*4882a593Smuzhiyun query->migration_capable = query_out[3];
975*4882a593Smuzhiyun break;
976*4882a593Smuzhiyun case 6:
977*4882a593Smuzhiyun query->windows_available = query_out[0];
978*4882a593Smuzhiyun query->largest_available_block = ((u64)query_out[1] << 32) |
979*4882a593Smuzhiyun query_out[2];
980*4882a593Smuzhiyun query->page_size = query_out[3];
981*4882a593Smuzhiyun query->migration_capable = query_out[4];
982*4882a593Smuzhiyun break;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun
985*4882a593Smuzhiyun return ret;
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun
create_ddw(struct pci_dev * dev,const u32 * ddw_avail,struct ddw_create_response * create,int page_shift,int window_shift)988*4882a593Smuzhiyun static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
989*4882a593Smuzhiyun struct ddw_create_response *create, int page_shift,
990*4882a593Smuzhiyun int window_shift)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct device_node *dn;
993*4882a593Smuzhiyun struct pci_dn *pdn;
994*4882a593Smuzhiyun u32 cfg_addr;
995*4882a593Smuzhiyun u64 buid;
996*4882a593Smuzhiyun int ret;
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun /*
999*4882a593Smuzhiyun * Get the config address and phb buid of the PE window.
1000*4882a593Smuzhiyun * Rely on eeh to retrieve this for us.
1001*4882a593Smuzhiyun * Retrieve them from the pci device, not the node with the
1002*4882a593Smuzhiyun * dma-window property
1003*4882a593Smuzhiyun */
1004*4882a593Smuzhiyun dn = pci_device_to_OF_node(dev);
1005*4882a593Smuzhiyun pdn = PCI_DN(dn);
1006*4882a593Smuzhiyun buid = pdn->phb->buid;
1007*4882a593Smuzhiyun cfg_addr = ((pdn->busno << 16) | (pdn->devfn << 8));
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun do {
1010*4882a593Smuzhiyun /* extra outputs are LIOBN and dma-addr (hi, lo) */
1011*4882a593Smuzhiyun ret = rtas_call(ddw_avail[DDW_CREATE_PE_DMA_WIN], 5, 4,
1012*4882a593Smuzhiyun (u32 *)create, cfg_addr, BUID_HI(buid),
1013*4882a593Smuzhiyun BUID_LO(buid), page_shift, window_shift);
1014*4882a593Smuzhiyun } while (rtas_busy_delay(ret));
1015*4882a593Smuzhiyun dev_info(&dev->dev,
1016*4882a593Smuzhiyun "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d "
1017*4882a593Smuzhiyun "(liobn = 0x%x starting addr = %x %x)\n",
1018*4882a593Smuzhiyun ddw_avail[DDW_CREATE_PE_DMA_WIN], cfg_addr, BUID_HI(buid),
1019*4882a593Smuzhiyun BUID_LO(buid), page_shift, window_shift, ret, create->liobn,
1020*4882a593Smuzhiyun create->addr_hi, create->addr_lo);
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun return ret;
1023*4882a593Smuzhiyun }
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun struct failed_ddw_pdn {
1026*4882a593Smuzhiyun struct device_node *pdn;
1027*4882a593Smuzhiyun struct list_head list;
1028*4882a593Smuzhiyun };
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun static LIST_HEAD(failed_ddw_pdn_list);
1031*4882a593Smuzhiyun
ddw_memory_hotplug_max(void)1032*4882a593Smuzhiyun static phys_addr_t ddw_memory_hotplug_max(void)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun phys_addr_t max_addr = memory_hotplug_max();
1035*4882a593Smuzhiyun struct device_node *memory;
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun for_each_node_by_type(memory, "memory") {
1038*4882a593Smuzhiyun unsigned long start, size;
1039*4882a593Smuzhiyun int n_mem_addr_cells, n_mem_size_cells, len;
1040*4882a593Smuzhiyun const __be32 *memcell_buf;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun memcell_buf = of_get_property(memory, "reg", &len);
1043*4882a593Smuzhiyun if (!memcell_buf || len <= 0)
1044*4882a593Smuzhiyun continue;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun n_mem_addr_cells = of_n_addr_cells(memory);
1047*4882a593Smuzhiyun n_mem_size_cells = of_n_size_cells(memory);
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun start = of_read_number(memcell_buf, n_mem_addr_cells);
1050*4882a593Smuzhiyun memcell_buf += n_mem_addr_cells;
1051*4882a593Smuzhiyun size = of_read_number(memcell_buf, n_mem_size_cells);
1052*4882a593Smuzhiyun memcell_buf += n_mem_size_cells;
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun max_addr = max_t(phys_addr_t, max_addr, start + size);
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun return max_addr;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun /*
1061*4882a593Smuzhiyun * Platforms supporting the DDW option starting with LoPAR level 2.7 implement
1062*4882a593Smuzhiyun * ibm,ddw-extensions, which carries the rtas token for
1063*4882a593Smuzhiyun * ibm,reset-pe-dma-windows.
1064*4882a593Smuzhiyun * That rtas-call can be used to restore the default DMA window for the device.
1065*4882a593Smuzhiyun */
reset_dma_window(struct pci_dev * dev,struct device_node * par_dn)1066*4882a593Smuzhiyun static void reset_dma_window(struct pci_dev *dev, struct device_node *par_dn)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun int ret;
1069*4882a593Smuzhiyun u32 cfg_addr, reset_dma_win;
1070*4882a593Smuzhiyun u64 buid;
1071*4882a593Smuzhiyun struct device_node *dn;
1072*4882a593Smuzhiyun struct pci_dn *pdn;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun ret = ddw_read_ext(par_dn, DDW_EXT_RESET_DMA_WIN, &reset_dma_win);
1075*4882a593Smuzhiyun if (ret)
1076*4882a593Smuzhiyun return;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun dn = pci_device_to_OF_node(dev);
1079*4882a593Smuzhiyun pdn = PCI_DN(dn);
1080*4882a593Smuzhiyun buid = pdn->phb->buid;
1081*4882a593Smuzhiyun cfg_addr = (pdn->busno << 16) | (pdn->devfn << 8);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun ret = rtas_call(reset_dma_win, 3, 1, NULL, cfg_addr, BUID_HI(buid),
1084*4882a593Smuzhiyun BUID_LO(buid));
1085*4882a593Smuzhiyun if (ret)
1086*4882a593Smuzhiyun dev_info(&dev->dev,
1087*4882a593Smuzhiyun "ibm,reset-pe-dma-windows(%x) %x %x %x returned %d ",
1088*4882a593Smuzhiyun reset_dma_win, cfg_addr, BUID_HI(buid), BUID_LO(buid),
1089*4882a593Smuzhiyun ret);
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun /*
1093*4882a593Smuzhiyun * If the PE supports dynamic dma windows, and there is space for a table
1094*4882a593Smuzhiyun * that can map all pages in a linear offset, then setup such a table,
1095*4882a593Smuzhiyun * and record the dma-offset in the struct device.
1096*4882a593Smuzhiyun *
1097*4882a593Smuzhiyun * dev: the pci device we are checking
1098*4882a593Smuzhiyun * pdn: the parent pe node with the ibm,dma_window property
1099*4882a593Smuzhiyun * Future: also check if we can remap the base window for our base page size
1100*4882a593Smuzhiyun *
1101*4882a593Smuzhiyun * returns the dma offset for use by the direct mapped DMA code.
1102*4882a593Smuzhiyun */
enable_ddw(struct pci_dev * dev,struct device_node * pdn)1103*4882a593Smuzhiyun static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
1104*4882a593Smuzhiyun {
1105*4882a593Smuzhiyun int len, ret;
1106*4882a593Smuzhiyun struct ddw_query_response query;
1107*4882a593Smuzhiyun struct ddw_create_response create;
1108*4882a593Smuzhiyun int page_shift;
1109*4882a593Smuzhiyun u64 dma_addr, max_addr;
1110*4882a593Smuzhiyun struct device_node *dn;
1111*4882a593Smuzhiyun u32 ddw_avail[DDW_APPLICABLE_SIZE];
1112*4882a593Smuzhiyun struct direct_window *window;
1113*4882a593Smuzhiyun struct property *win64;
1114*4882a593Smuzhiyun struct dynamic_dma_window_prop *ddwprop;
1115*4882a593Smuzhiyun struct failed_ddw_pdn *fpdn;
1116*4882a593Smuzhiyun bool default_win_removed = false;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun mutex_lock(&direct_window_init_mutex);
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun dma_addr = find_existing_ddw(pdn);
1121*4882a593Smuzhiyun if (dma_addr != 0)
1122*4882a593Smuzhiyun goto out_unlock;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /*
1125*4882a593Smuzhiyun * If we already went through this for a previous function of
1126*4882a593Smuzhiyun * the same device and failed, we don't want to muck with the
1127*4882a593Smuzhiyun * DMA window again, as it will race with in-flight operations
1128*4882a593Smuzhiyun * and can lead to EEHs. The above mutex protects access to the
1129*4882a593Smuzhiyun * list.
1130*4882a593Smuzhiyun */
1131*4882a593Smuzhiyun list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
1132*4882a593Smuzhiyun if (fpdn->pdn == pdn)
1133*4882a593Smuzhiyun goto out_unlock;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun /*
1137*4882a593Smuzhiyun * the ibm,ddw-applicable property holds the tokens for:
1138*4882a593Smuzhiyun * ibm,query-pe-dma-window
1139*4882a593Smuzhiyun * ibm,create-pe-dma-window
1140*4882a593Smuzhiyun * ibm,remove-pe-dma-window
1141*4882a593Smuzhiyun * for the given node in that order.
1142*4882a593Smuzhiyun * the property is actually in the parent, not the PE
1143*4882a593Smuzhiyun */
1144*4882a593Smuzhiyun ret = of_property_read_u32_array(pdn, "ibm,ddw-applicable",
1145*4882a593Smuzhiyun &ddw_avail[0], DDW_APPLICABLE_SIZE);
1146*4882a593Smuzhiyun if (ret)
1147*4882a593Smuzhiyun goto out_failed;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /*
1150*4882a593Smuzhiyun * Query if there is a second window of size to map the
1151*4882a593Smuzhiyun * whole partition. Query returns number of windows, largest
1152*4882a593Smuzhiyun * block assigned to PE (partition endpoint), and two bitmasks
1153*4882a593Smuzhiyun * of page sizes: supported and supported for migrate-dma.
1154*4882a593Smuzhiyun */
1155*4882a593Smuzhiyun dn = pci_device_to_OF_node(dev);
1156*4882a593Smuzhiyun ret = query_ddw(dev, ddw_avail, &query, pdn);
1157*4882a593Smuzhiyun if (ret != 0)
1158*4882a593Smuzhiyun goto out_failed;
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun /*
1161*4882a593Smuzhiyun * If there is no window available, remove the default DMA window,
1162*4882a593Smuzhiyun * if it's present. This will make all the resources available to the
1163*4882a593Smuzhiyun * new DDW window.
1164*4882a593Smuzhiyun * If anything fails after this, we need to restore it, so also check
1165*4882a593Smuzhiyun * for extensions presence.
1166*4882a593Smuzhiyun */
1167*4882a593Smuzhiyun if (query.windows_available == 0) {
1168*4882a593Smuzhiyun struct property *default_win;
1169*4882a593Smuzhiyun int reset_win_ext;
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun default_win = of_find_property(pdn, "ibm,dma-window", NULL);
1172*4882a593Smuzhiyun if (!default_win)
1173*4882a593Smuzhiyun goto out_failed;
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun reset_win_ext = ddw_read_ext(pdn, DDW_EXT_RESET_DMA_WIN, NULL);
1176*4882a593Smuzhiyun if (reset_win_ext)
1177*4882a593Smuzhiyun goto out_failed;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun remove_dma_window(pdn, ddw_avail, default_win);
1180*4882a593Smuzhiyun default_win_removed = true;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /* Query again, to check if the window is available */
1183*4882a593Smuzhiyun ret = query_ddw(dev, ddw_avail, &query, pdn);
1184*4882a593Smuzhiyun if (ret != 0)
1185*4882a593Smuzhiyun goto out_failed;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun if (query.windows_available == 0) {
1188*4882a593Smuzhiyun /* no windows are available for this device. */
1189*4882a593Smuzhiyun dev_dbg(&dev->dev, "no free dynamic windows");
1190*4882a593Smuzhiyun goto out_failed;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun if (query.page_size & 4) {
1194*4882a593Smuzhiyun page_shift = 24; /* 16MB */
1195*4882a593Smuzhiyun } else if (query.page_size & 2) {
1196*4882a593Smuzhiyun page_shift = 16; /* 64kB */
1197*4882a593Smuzhiyun } else if (query.page_size & 1) {
1198*4882a593Smuzhiyun page_shift = 12; /* 4kB */
1199*4882a593Smuzhiyun } else {
1200*4882a593Smuzhiyun dev_dbg(&dev->dev, "no supported direct page size in mask %x",
1201*4882a593Smuzhiyun query.page_size);
1202*4882a593Smuzhiyun goto out_failed;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun /* verify the window * number of ptes will map the partition */
1205*4882a593Smuzhiyun /* check largest block * page size > max memory hotplug addr */
1206*4882a593Smuzhiyun max_addr = ddw_memory_hotplug_max();
1207*4882a593Smuzhiyun if (query.largest_available_block < (max_addr >> page_shift)) {
1208*4882a593Smuzhiyun dev_dbg(&dev->dev, "can't map partition max 0x%llx with %llu "
1209*4882a593Smuzhiyun "%llu-sized pages\n", max_addr, query.largest_available_block,
1210*4882a593Smuzhiyun 1ULL << page_shift);
1211*4882a593Smuzhiyun goto out_failed;
1212*4882a593Smuzhiyun }
1213*4882a593Smuzhiyun len = order_base_2(max_addr);
1214*4882a593Smuzhiyun win64 = kzalloc(sizeof(struct property), GFP_KERNEL);
1215*4882a593Smuzhiyun if (!win64) {
1216*4882a593Smuzhiyun dev_info(&dev->dev,
1217*4882a593Smuzhiyun "couldn't allocate property for 64bit dma window\n");
1218*4882a593Smuzhiyun goto out_failed;
1219*4882a593Smuzhiyun }
1220*4882a593Smuzhiyun win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL);
1221*4882a593Smuzhiyun win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL);
1222*4882a593Smuzhiyun win64->length = sizeof(*ddwprop);
1223*4882a593Smuzhiyun if (!win64->name || !win64->value) {
1224*4882a593Smuzhiyun dev_info(&dev->dev,
1225*4882a593Smuzhiyun "couldn't allocate property name and value\n");
1226*4882a593Smuzhiyun goto out_free_prop;
1227*4882a593Smuzhiyun }
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun ret = create_ddw(dev, ddw_avail, &create, page_shift, len);
1230*4882a593Smuzhiyun if (ret != 0)
1231*4882a593Smuzhiyun goto out_free_prop;
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun ddwprop->liobn = cpu_to_be32(create.liobn);
1234*4882a593Smuzhiyun ddwprop->dma_base = cpu_to_be64(((u64)create.addr_hi << 32) |
1235*4882a593Smuzhiyun create.addr_lo);
1236*4882a593Smuzhiyun ddwprop->tce_shift = cpu_to_be32(page_shift);
1237*4882a593Smuzhiyun ddwprop->window_shift = cpu_to_be32(len);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %pOF\n",
1240*4882a593Smuzhiyun create.liobn, dn);
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun window = kzalloc(sizeof(*window), GFP_KERNEL);
1243*4882a593Smuzhiyun if (!window)
1244*4882a593Smuzhiyun goto out_clear_window;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
1247*4882a593Smuzhiyun win64->value, tce_setrange_multi_pSeriesLP_walk);
1248*4882a593Smuzhiyun if (ret) {
1249*4882a593Smuzhiyun dev_info(&dev->dev, "failed to map direct window for %pOF: %d\n",
1250*4882a593Smuzhiyun dn, ret);
1251*4882a593Smuzhiyun goto out_free_window;
1252*4882a593Smuzhiyun }
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun ret = of_add_property(pdn, win64);
1255*4882a593Smuzhiyun if (ret) {
1256*4882a593Smuzhiyun dev_err(&dev->dev, "unable to add dma window property for %pOF: %d",
1257*4882a593Smuzhiyun pdn, ret);
1258*4882a593Smuzhiyun goto out_free_window;
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun window->device = pdn;
1262*4882a593Smuzhiyun window->prop = ddwprop;
1263*4882a593Smuzhiyun spin_lock(&direct_window_list_lock);
1264*4882a593Smuzhiyun list_add(&window->list, &direct_window_list);
1265*4882a593Smuzhiyun spin_unlock(&direct_window_list_lock);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun dma_addr = be64_to_cpu(ddwprop->dma_base);
1268*4882a593Smuzhiyun goto out_unlock;
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun out_free_window:
1271*4882a593Smuzhiyun kfree(window);
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun out_clear_window:
1274*4882a593Smuzhiyun remove_ddw(pdn, true);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun out_free_prop:
1277*4882a593Smuzhiyun kfree(win64->name);
1278*4882a593Smuzhiyun kfree(win64->value);
1279*4882a593Smuzhiyun kfree(win64);
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun out_failed:
1282*4882a593Smuzhiyun if (default_win_removed)
1283*4882a593Smuzhiyun reset_dma_window(dev, pdn);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
1286*4882a593Smuzhiyun if (!fpdn)
1287*4882a593Smuzhiyun goto out_unlock;
1288*4882a593Smuzhiyun fpdn->pdn = pdn;
1289*4882a593Smuzhiyun list_add(&fpdn->list, &failed_ddw_pdn_list);
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun out_unlock:
1292*4882a593Smuzhiyun mutex_unlock(&direct_window_init_mutex);
1293*4882a593Smuzhiyun return dma_addr;
1294*4882a593Smuzhiyun }
1295*4882a593Smuzhiyun
pci_dma_dev_setup_pSeriesLP(struct pci_dev * dev)1296*4882a593Smuzhiyun static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun struct device_node *pdn, *dn;
1299*4882a593Smuzhiyun struct iommu_table *tbl;
1300*4882a593Smuzhiyun const __be32 *dma_window = NULL;
1301*4882a593Smuzhiyun struct pci_dn *pci;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev));
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun /* dev setup for LPAR is a little tricky, since the device tree might
1306*4882a593Smuzhiyun * contain the dma-window properties per-device and not necessarily
1307*4882a593Smuzhiyun * for the bus. So we need to search upwards in the tree until we
1308*4882a593Smuzhiyun * either hit a dma-window property, OR find a parent with a table
1309*4882a593Smuzhiyun * already allocated.
1310*4882a593Smuzhiyun */
1311*4882a593Smuzhiyun dn = pci_device_to_OF_node(dev);
1312*4882a593Smuzhiyun pr_debug(" node is %pOF\n", dn);
1313*4882a593Smuzhiyun
1314*4882a593Smuzhiyun for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1315*4882a593Smuzhiyun pdn = pdn->parent) {
1316*4882a593Smuzhiyun dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1317*4882a593Smuzhiyun if (dma_window)
1318*4882a593Smuzhiyun break;
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun if (!pdn || !PCI_DN(pdn)) {
1322*4882a593Smuzhiyun printk(KERN_WARNING "pci_dma_dev_setup_pSeriesLP: "
1323*4882a593Smuzhiyun "no DMA window found for pci dev=%s dn=%pOF\n",
1324*4882a593Smuzhiyun pci_name(dev), dn);
1325*4882a593Smuzhiyun return;
1326*4882a593Smuzhiyun }
1327*4882a593Smuzhiyun pr_debug(" parent is %pOF\n", pdn);
1328*4882a593Smuzhiyun
1329*4882a593Smuzhiyun pci = PCI_DN(pdn);
1330*4882a593Smuzhiyun if (!pci->table_group) {
1331*4882a593Smuzhiyun pci->table_group = iommu_pseries_alloc_group(pci->phb->node);
1332*4882a593Smuzhiyun tbl = pci->table_group->tables[0];
1333*4882a593Smuzhiyun iommu_table_setparms_lpar(pci->phb, pdn, tbl,
1334*4882a593Smuzhiyun pci->table_group, dma_window);
1335*4882a593Smuzhiyun tbl->it_ops = &iommu_table_lpar_multi_ops;
1336*4882a593Smuzhiyun iommu_init_table(tbl, pci->phb->node, 0, 0);
1337*4882a593Smuzhiyun iommu_register_group(pci->table_group,
1338*4882a593Smuzhiyun pci_domain_nr(pci->phb->bus), 0);
1339*4882a593Smuzhiyun pr_debug(" created table: %p\n", pci->table_group);
1340*4882a593Smuzhiyun } else {
1341*4882a593Smuzhiyun pr_debug(" found DMA window, table: %p\n", pci->table_group);
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun set_iommu_table_base(&dev->dev, pci->table_group->tables[0]);
1345*4882a593Smuzhiyun iommu_add_device(pci->table_group, &dev->dev);
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun
iommu_bypass_supported_pSeriesLP(struct pci_dev * pdev,u64 dma_mask)1348*4882a593Smuzhiyun static bool iommu_bypass_supported_pSeriesLP(struct pci_dev *pdev, u64 dma_mask)
1349*4882a593Smuzhiyun {
1350*4882a593Smuzhiyun struct device_node *dn = pci_device_to_OF_node(pdev), *pdn;
1351*4882a593Smuzhiyun const __be32 *dma_window = NULL;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* only attempt to use a new window if 64-bit DMA is requested */
1354*4882a593Smuzhiyun if (dma_mask < DMA_BIT_MASK(64))
1355*4882a593Smuzhiyun return false;
1356*4882a593Smuzhiyun
1357*4882a593Smuzhiyun dev_dbg(&pdev->dev, "node is %pOF\n", dn);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun /*
1360*4882a593Smuzhiyun * the device tree might contain the dma-window properties
1361*4882a593Smuzhiyun * per-device and not necessarily for the bus. So we need to
1362*4882a593Smuzhiyun * search upwards in the tree until we either hit a dma-window
1363*4882a593Smuzhiyun * property, OR find a parent with a table already allocated.
1364*4882a593Smuzhiyun */
1365*4882a593Smuzhiyun for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->table_group;
1366*4882a593Smuzhiyun pdn = pdn->parent) {
1367*4882a593Smuzhiyun dma_window = of_get_property(pdn, "ibm,dma-window", NULL);
1368*4882a593Smuzhiyun if (dma_window)
1369*4882a593Smuzhiyun break;
1370*4882a593Smuzhiyun }
1371*4882a593Smuzhiyun
1372*4882a593Smuzhiyun if (pdn && PCI_DN(pdn)) {
1373*4882a593Smuzhiyun pdev->dev.archdata.dma_offset = enable_ddw(pdev, pdn);
1374*4882a593Smuzhiyun if (pdev->dev.archdata.dma_offset)
1375*4882a593Smuzhiyun return true;
1376*4882a593Smuzhiyun }
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun return false;
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun
iommu_mem_notifier(struct notifier_block * nb,unsigned long action,void * data)1381*4882a593Smuzhiyun static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
1382*4882a593Smuzhiyun void *data)
1383*4882a593Smuzhiyun {
1384*4882a593Smuzhiyun struct direct_window *window;
1385*4882a593Smuzhiyun struct memory_notify *arg = data;
1386*4882a593Smuzhiyun int ret = 0;
1387*4882a593Smuzhiyun
1388*4882a593Smuzhiyun switch (action) {
1389*4882a593Smuzhiyun case MEM_GOING_ONLINE:
1390*4882a593Smuzhiyun spin_lock(&direct_window_list_lock);
1391*4882a593Smuzhiyun list_for_each_entry(window, &direct_window_list, list) {
1392*4882a593Smuzhiyun ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
1393*4882a593Smuzhiyun arg->nr_pages, window->prop);
1394*4882a593Smuzhiyun /* XXX log error */
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun spin_unlock(&direct_window_list_lock);
1397*4882a593Smuzhiyun break;
1398*4882a593Smuzhiyun case MEM_CANCEL_ONLINE:
1399*4882a593Smuzhiyun case MEM_OFFLINE:
1400*4882a593Smuzhiyun spin_lock(&direct_window_list_lock);
1401*4882a593Smuzhiyun list_for_each_entry(window, &direct_window_list, list) {
1402*4882a593Smuzhiyun ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
1403*4882a593Smuzhiyun arg->nr_pages, window->prop);
1404*4882a593Smuzhiyun /* XXX log error */
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun spin_unlock(&direct_window_list_lock);
1407*4882a593Smuzhiyun break;
1408*4882a593Smuzhiyun default:
1409*4882a593Smuzhiyun break;
1410*4882a593Smuzhiyun }
1411*4882a593Smuzhiyun if (ret && action != MEM_CANCEL_ONLINE)
1412*4882a593Smuzhiyun return NOTIFY_BAD;
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun return NOTIFY_OK;
1415*4882a593Smuzhiyun }
1416*4882a593Smuzhiyun
1417*4882a593Smuzhiyun static struct notifier_block iommu_mem_nb = {
1418*4882a593Smuzhiyun .notifier_call = iommu_mem_notifier,
1419*4882a593Smuzhiyun };
1420*4882a593Smuzhiyun
iommu_reconfig_notifier(struct notifier_block * nb,unsigned long action,void * data)1421*4882a593Smuzhiyun static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *data)
1422*4882a593Smuzhiyun {
1423*4882a593Smuzhiyun int err = NOTIFY_OK;
1424*4882a593Smuzhiyun struct of_reconfig_data *rd = data;
1425*4882a593Smuzhiyun struct device_node *np = rd->dn;
1426*4882a593Smuzhiyun struct pci_dn *pci = PCI_DN(np);
1427*4882a593Smuzhiyun struct direct_window *window;
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun switch (action) {
1430*4882a593Smuzhiyun case OF_RECONFIG_DETACH_NODE:
1431*4882a593Smuzhiyun /*
1432*4882a593Smuzhiyun * Removing the property will invoke the reconfig
1433*4882a593Smuzhiyun * notifier again, which causes dead-lock on the
1434*4882a593Smuzhiyun * read-write semaphore of the notifier chain. So
1435*4882a593Smuzhiyun * we have to remove the property when releasing
1436*4882a593Smuzhiyun * the device node.
1437*4882a593Smuzhiyun */
1438*4882a593Smuzhiyun remove_ddw(np, false);
1439*4882a593Smuzhiyun if (pci && pci->table_group)
1440*4882a593Smuzhiyun iommu_pseries_free_group(pci->table_group,
1441*4882a593Smuzhiyun np->full_name);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun spin_lock(&direct_window_list_lock);
1444*4882a593Smuzhiyun list_for_each_entry(window, &direct_window_list, list) {
1445*4882a593Smuzhiyun if (window->device == np) {
1446*4882a593Smuzhiyun list_del(&window->list);
1447*4882a593Smuzhiyun kfree(window);
1448*4882a593Smuzhiyun break;
1449*4882a593Smuzhiyun }
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun spin_unlock(&direct_window_list_lock);
1452*4882a593Smuzhiyun break;
1453*4882a593Smuzhiyun default:
1454*4882a593Smuzhiyun err = NOTIFY_DONE;
1455*4882a593Smuzhiyun break;
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun return err;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun static struct notifier_block iommu_reconfig_nb = {
1461*4882a593Smuzhiyun .notifier_call = iommu_reconfig_notifier,
1462*4882a593Smuzhiyun };
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun /* These are called very early. */
iommu_init_early_pSeries(void)1465*4882a593Smuzhiyun void iommu_init_early_pSeries(void)
1466*4882a593Smuzhiyun {
1467*4882a593Smuzhiyun if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL))
1468*4882a593Smuzhiyun return;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun if (firmware_has_feature(FW_FEATURE_LPAR)) {
1471*4882a593Smuzhiyun pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
1472*4882a593Smuzhiyun pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
1473*4882a593Smuzhiyun if (!disable_ddw)
1474*4882a593Smuzhiyun pseries_pci_controller_ops.iommu_bypass_supported =
1475*4882a593Smuzhiyun iommu_bypass_supported_pSeriesLP;
1476*4882a593Smuzhiyun } else {
1477*4882a593Smuzhiyun pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
1478*4882a593Smuzhiyun pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun of_reconfig_notifier_register(&iommu_reconfig_nb);
1483*4882a593Smuzhiyun register_memory_notifier(&iommu_mem_nb);
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun set_pci_dma_ops(&dma_iommu_ops);
1486*4882a593Smuzhiyun }
1487*4882a593Smuzhiyun
disable_multitce(char * str)1488*4882a593Smuzhiyun static int __init disable_multitce(char *str)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun if (strcmp(str, "off") == 0 &&
1491*4882a593Smuzhiyun firmware_has_feature(FW_FEATURE_LPAR) &&
1492*4882a593Smuzhiyun (firmware_has_feature(FW_FEATURE_PUT_TCE_IND) ||
1493*4882a593Smuzhiyun firmware_has_feature(FW_FEATURE_STUFF_TCE))) {
1494*4882a593Smuzhiyun printk(KERN_INFO "Disabling MULTITCE firmware feature\n");
1495*4882a593Smuzhiyun powerpc_firmware_features &=
1496*4882a593Smuzhiyun ~(FW_FEATURE_PUT_TCE_IND | FW_FEATURE_STUFF_TCE);
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun return 1;
1499*4882a593Smuzhiyun }
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun __setup("multitce=", disable_multitce);
1502*4882a593Smuzhiyun
tce_iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)1503*4882a593Smuzhiyun static int tce_iommu_bus_notifier(struct notifier_block *nb,
1504*4882a593Smuzhiyun unsigned long action, void *data)
1505*4882a593Smuzhiyun {
1506*4882a593Smuzhiyun struct device *dev = data;
1507*4882a593Smuzhiyun
1508*4882a593Smuzhiyun switch (action) {
1509*4882a593Smuzhiyun case BUS_NOTIFY_DEL_DEVICE:
1510*4882a593Smuzhiyun iommu_del_device(dev);
1511*4882a593Smuzhiyun return 0;
1512*4882a593Smuzhiyun default:
1513*4882a593Smuzhiyun return 0;
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun static struct notifier_block tce_iommu_bus_nb = {
1518*4882a593Smuzhiyun .notifier_call = tce_iommu_bus_notifier,
1519*4882a593Smuzhiyun };
1520*4882a593Smuzhiyun
tce_iommu_bus_notifier_init(void)1521*4882a593Smuzhiyun static int __init tce_iommu_bus_notifier_init(void)
1522*4882a593Smuzhiyun {
1523*4882a593Smuzhiyun bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1524*4882a593Smuzhiyun return 0;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
1527