xref: /OK3568_Linux_fs/kernel/arch/powerpc/platforms/powernv/pci.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Support PCI/PCIe on PowerNV platforms
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/pci.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/string.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/irq.h>
14*4882a593Smuzhiyun #include <linux/io.h>
15*4882a593Smuzhiyun #include <linux/msi.h>
16*4882a593Smuzhiyun #include <linux/iommu.h>
17*4882a593Smuzhiyun #include <linux/sched/mm.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <asm/sections.h>
20*4882a593Smuzhiyun #include <asm/io.h>
21*4882a593Smuzhiyun #include <asm/prom.h>
22*4882a593Smuzhiyun #include <asm/pci-bridge.h>
23*4882a593Smuzhiyun #include <asm/machdep.h>
24*4882a593Smuzhiyun #include <asm/msi_bitmap.h>
25*4882a593Smuzhiyun #include <asm/ppc-pci.h>
26*4882a593Smuzhiyun #include <asm/pnv-pci.h>
27*4882a593Smuzhiyun #include <asm/opal.h>
28*4882a593Smuzhiyun #include <asm/iommu.h>
29*4882a593Smuzhiyun #include <asm/tce.h>
30*4882a593Smuzhiyun #include <asm/firmware.h>
31*4882a593Smuzhiyun #include <asm/eeh_event.h>
32*4882a593Smuzhiyun #include <asm/eeh.h>
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include "powernv.h"
35*4882a593Smuzhiyun #include "pci.h"
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static DEFINE_MUTEX(tunnel_mutex);
38*4882a593Smuzhiyun 
pnv_pci_get_slot_id(struct device_node * np,uint64_t * id)39*4882a593Smuzhiyun int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	struct device_node *node = np;
42*4882a593Smuzhiyun 	u32 bdfn;
43*4882a593Smuzhiyun 	u64 phbid;
44*4882a593Smuzhiyun 	int ret;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "reg", &bdfn);
47*4882a593Smuzhiyun 	if (ret)
48*4882a593Smuzhiyun 		return -ENXIO;
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	bdfn = ((bdfn & 0x00ffff00) >> 8);
51*4882a593Smuzhiyun 	for (node = np; node; node = of_get_parent(node)) {
52*4882a593Smuzhiyun 		if (!PCI_DN(node)) {
53*4882a593Smuzhiyun 			of_node_put(node);
54*4882a593Smuzhiyun 			break;
55*4882a593Smuzhiyun 		}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 		if (!of_device_is_compatible(node, "ibm,ioda2-phb") &&
58*4882a593Smuzhiyun 		    !of_device_is_compatible(node, "ibm,ioda3-phb") &&
59*4882a593Smuzhiyun 		    !of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb")) {
60*4882a593Smuzhiyun 			of_node_put(node);
61*4882a593Smuzhiyun 			continue;
62*4882a593Smuzhiyun 		}
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		ret = of_property_read_u64(node, "ibm,opal-phbid", &phbid);
65*4882a593Smuzhiyun 		if (ret) {
66*4882a593Smuzhiyun 			of_node_put(node);
67*4882a593Smuzhiyun 			return -ENXIO;
68*4882a593Smuzhiyun 		}
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 		if (of_device_is_compatible(node, "ibm,ioda2-npu2-opencapi-phb"))
71*4882a593Smuzhiyun 			*id = PCI_PHB_SLOT_ID(phbid);
72*4882a593Smuzhiyun 		else
73*4882a593Smuzhiyun 			*id = PCI_SLOT_ID(phbid, bdfn);
74*4882a593Smuzhiyun 		return 0;
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	return -ENODEV;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
80*4882a593Smuzhiyun 
pnv_pci_get_device_tree(uint32_t phandle,void * buf,uint64_t len)81*4882a593Smuzhiyun int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	int64_t rc;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	if (!opal_check_token(OPAL_GET_DEVICE_TREE))
86*4882a593Smuzhiyun 		return -ENXIO;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
89*4882a593Smuzhiyun 	if (rc < OPAL_SUCCESS)
90*4882a593Smuzhiyun 		return -EIO;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	return rc;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
95*4882a593Smuzhiyun 
pnv_pci_get_presence_state(uint64_t id,uint8_t * state)96*4882a593Smuzhiyun int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	int64_t rc;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
101*4882a593Smuzhiyun 		return -ENXIO;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	rc = opal_pci_get_presence_state(id, (uint64_t)state);
104*4882a593Smuzhiyun 	if (rc != OPAL_SUCCESS)
105*4882a593Smuzhiyun 		return -EIO;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
110*4882a593Smuzhiyun 
pnv_pci_get_power_state(uint64_t id,uint8_t * state)111*4882a593Smuzhiyun int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	int64_t rc;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
116*4882a593Smuzhiyun 		return -ENXIO;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	rc = opal_pci_get_power_state(id, (uint64_t)state);
119*4882a593Smuzhiyun 	if (rc != OPAL_SUCCESS)
120*4882a593Smuzhiyun 		return -EIO;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
125*4882a593Smuzhiyun 
pnv_pci_set_power_state(uint64_t id,uint8_t state,struct opal_msg * msg)126*4882a593Smuzhiyun int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct opal_msg m;
129*4882a593Smuzhiyun 	int token, ret;
130*4882a593Smuzhiyun 	int64_t rc;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
133*4882a593Smuzhiyun 		return -ENXIO;
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	token = opal_async_get_token_interruptible();
136*4882a593Smuzhiyun 	if (unlikely(token < 0))
137*4882a593Smuzhiyun 		return token;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
140*4882a593Smuzhiyun 	if (rc == OPAL_SUCCESS) {
141*4882a593Smuzhiyun 		ret = 0;
142*4882a593Smuzhiyun 		goto exit;
143*4882a593Smuzhiyun 	} else if (rc != OPAL_ASYNC_COMPLETION) {
144*4882a593Smuzhiyun 		ret = -EIO;
145*4882a593Smuzhiyun 		goto exit;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	ret = opal_async_wait_response(token, &m);
149*4882a593Smuzhiyun 	if (ret < 0)
150*4882a593Smuzhiyun 		goto exit;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (msg) {
153*4882a593Smuzhiyun 		ret = 1;
154*4882a593Smuzhiyun 		memcpy(msg, &m, sizeof(m));
155*4882a593Smuzhiyun 	}
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun exit:
158*4882a593Smuzhiyun 	opal_async_release_token(token);
159*4882a593Smuzhiyun 	return ret;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
162*4882a593Smuzhiyun 
pnv_setup_msi_irqs(struct pci_dev * pdev,int nvec,int type)163*4882a593Smuzhiyun int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun 	struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
166*4882a593Smuzhiyun 	struct msi_desc *entry;
167*4882a593Smuzhiyun 	struct msi_msg msg;
168*4882a593Smuzhiyun 	int hwirq;
169*4882a593Smuzhiyun 	unsigned int virq;
170*4882a593Smuzhiyun 	int rc;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
173*4882a593Smuzhiyun 		return -ENODEV;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (pdev->no_64bit_msi && !phb->msi32_support)
176*4882a593Smuzhiyun 		return -ENODEV;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for_each_pci_msi_entry(entry, pdev) {
179*4882a593Smuzhiyun 		if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
180*4882a593Smuzhiyun 			pr_warn("%s: Supports only 64-bit MSIs\n",
181*4882a593Smuzhiyun 				pci_name(pdev));
182*4882a593Smuzhiyun 			return -ENXIO;
183*4882a593Smuzhiyun 		}
184*4882a593Smuzhiyun 		hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
185*4882a593Smuzhiyun 		if (hwirq < 0) {
186*4882a593Smuzhiyun 			pr_warn("%s: Failed to find a free MSI\n",
187*4882a593Smuzhiyun 				pci_name(pdev));
188*4882a593Smuzhiyun 			return -ENOSPC;
189*4882a593Smuzhiyun 		}
190*4882a593Smuzhiyun 		virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
191*4882a593Smuzhiyun 		if (!virq) {
192*4882a593Smuzhiyun 			pr_warn("%s: Failed to map MSI to linux irq\n",
193*4882a593Smuzhiyun 				pci_name(pdev));
194*4882a593Smuzhiyun 			msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
195*4882a593Smuzhiyun 			return -ENOMEM;
196*4882a593Smuzhiyun 		}
197*4882a593Smuzhiyun 		rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
198*4882a593Smuzhiyun 				    virq, entry->msi_attrib.is_64, &msg);
199*4882a593Smuzhiyun 		if (rc) {
200*4882a593Smuzhiyun 			pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
201*4882a593Smuzhiyun 			irq_dispose_mapping(virq);
202*4882a593Smuzhiyun 			msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
203*4882a593Smuzhiyun 			return rc;
204*4882a593Smuzhiyun 		}
205*4882a593Smuzhiyun 		irq_set_msi_desc(virq, entry);
206*4882a593Smuzhiyun 		pci_write_msi_msg(virq, &msg);
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 	return 0;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun 
pnv_teardown_msi_irqs(struct pci_dev * pdev)211*4882a593Smuzhiyun void pnv_teardown_msi_irqs(struct pci_dev *pdev)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct pnv_phb *phb = pci_bus_to_pnvhb(pdev->bus);
214*4882a593Smuzhiyun 	struct msi_desc *entry;
215*4882a593Smuzhiyun 	irq_hw_number_t hwirq;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	if (WARN_ON(!phb))
218*4882a593Smuzhiyun 		return;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	for_each_pci_msi_entry(entry, pdev) {
221*4882a593Smuzhiyun 		if (!entry->irq)
222*4882a593Smuzhiyun 			continue;
223*4882a593Smuzhiyun 		hwirq = virq_to_hw(entry->irq);
224*4882a593Smuzhiyun 		irq_set_msi_desc(entry->irq, NULL);
225*4882a593Smuzhiyun 		irq_dispose_mapping(entry->irq);
226*4882a593Smuzhiyun 		msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /* Nicely print the contents of the PE State Tables (PEST). */
pnv_pci_dump_pest(__be64 pestA[],__be64 pestB[],int pest_size)231*4882a593Smuzhiyun static void pnv_pci_dump_pest(__be64 pestA[], __be64 pestB[], int pest_size)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	__be64 prevA = ULONG_MAX, prevB = ULONG_MAX;
234*4882a593Smuzhiyun 	bool dup = false;
235*4882a593Smuzhiyun 	int i;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	for (i = 0; i < pest_size; i++) {
238*4882a593Smuzhiyun 		__be64 peA = be64_to_cpu(pestA[i]);
239*4882a593Smuzhiyun 		__be64 peB = be64_to_cpu(pestB[i]);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		if (peA != prevA || peB != prevB) {
242*4882a593Smuzhiyun 			if (dup) {
243*4882a593Smuzhiyun 				pr_info("PE[..%03x] A/B: as above\n", i-1);
244*4882a593Smuzhiyun 				dup = false;
245*4882a593Smuzhiyun 			}
246*4882a593Smuzhiyun 			prevA = peA;
247*4882a593Smuzhiyun 			prevB = peB;
248*4882a593Smuzhiyun 			if (peA & PNV_IODA_STOPPED_STATE ||
249*4882a593Smuzhiyun 			    peB & PNV_IODA_STOPPED_STATE)
250*4882a593Smuzhiyun 				pr_info("PE[%03x] A/B: %016llx %016llx\n",
251*4882a593Smuzhiyun 					i, peA, peB);
252*4882a593Smuzhiyun 		} else if (!dup && (peA & PNV_IODA_STOPPED_STATE ||
253*4882a593Smuzhiyun 				    peB & PNV_IODA_STOPPED_STATE)) {
254*4882a593Smuzhiyun 			dup = true;
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun 
pnv_pci_dump_p7ioc_diag_data(struct pci_controller * hose,struct OpalIoPhbErrorCommon * common)259*4882a593Smuzhiyun static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
260*4882a593Smuzhiyun 					 struct OpalIoPhbErrorCommon *common)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct OpalIoP7IOCPhbErrorData *data;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	data = (struct OpalIoP7IOCPhbErrorData *)common;
265*4882a593Smuzhiyun 	pr_info("P7IOC PHB#%x Diag-data (Version: %d)\n",
266*4882a593Smuzhiyun 		hose->global_number, be32_to_cpu(common->version));
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (data->brdgCtl)
269*4882a593Smuzhiyun 		pr_info("brdgCtl:     %08x\n",
270*4882a593Smuzhiyun 			be32_to_cpu(data->brdgCtl));
271*4882a593Smuzhiyun 	if (data->portStatusReg || data->rootCmplxStatus ||
272*4882a593Smuzhiyun 	    data->busAgentStatus)
273*4882a593Smuzhiyun 		pr_info("UtlSts:      %08x %08x %08x\n",
274*4882a593Smuzhiyun 			be32_to_cpu(data->portStatusReg),
275*4882a593Smuzhiyun 			be32_to_cpu(data->rootCmplxStatus),
276*4882a593Smuzhiyun 			be32_to_cpu(data->busAgentStatus));
277*4882a593Smuzhiyun 	if (data->deviceStatus || data->slotStatus   ||
278*4882a593Smuzhiyun 	    data->linkStatus   || data->devCmdStatus ||
279*4882a593Smuzhiyun 	    data->devSecStatus)
280*4882a593Smuzhiyun 		pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
281*4882a593Smuzhiyun 			be32_to_cpu(data->deviceStatus),
282*4882a593Smuzhiyun 			be32_to_cpu(data->slotStatus),
283*4882a593Smuzhiyun 			be32_to_cpu(data->linkStatus),
284*4882a593Smuzhiyun 			be32_to_cpu(data->devCmdStatus),
285*4882a593Smuzhiyun 			be32_to_cpu(data->devSecStatus));
286*4882a593Smuzhiyun 	if (data->rootErrorStatus   || data->uncorrErrorStatus ||
287*4882a593Smuzhiyun 	    data->corrErrorStatus)
288*4882a593Smuzhiyun 		pr_info("RootErrSts:  %08x %08x %08x\n",
289*4882a593Smuzhiyun 			be32_to_cpu(data->rootErrorStatus),
290*4882a593Smuzhiyun 			be32_to_cpu(data->uncorrErrorStatus),
291*4882a593Smuzhiyun 			be32_to_cpu(data->corrErrorStatus));
292*4882a593Smuzhiyun 	if (data->tlpHdr1 || data->tlpHdr2 ||
293*4882a593Smuzhiyun 	    data->tlpHdr3 || data->tlpHdr4)
294*4882a593Smuzhiyun 		pr_info("RootErrLog:  %08x %08x %08x %08x\n",
295*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr1),
296*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr2),
297*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr3),
298*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr4));
299*4882a593Smuzhiyun 	if (data->sourceId || data->errorClass ||
300*4882a593Smuzhiyun 	    data->correlator)
301*4882a593Smuzhiyun 		pr_info("RootErrLog1: %08x %016llx %016llx\n",
302*4882a593Smuzhiyun 			be32_to_cpu(data->sourceId),
303*4882a593Smuzhiyun 			be64_to_cpu(data->errorClass),
304*4882a593Smuzhiyun 			be64_to_cpu(data->correlator));
305*4882a593Smuzhiyun 	if (data->p7iocPlssr || data->p7iocCsr)
306*4882a593Smuzhiyun 		pr_info("PhbSts:      %016llx %016llx\n",
307*4882a593Smuzhiyun 			be64_to_cpu(data->p7iocPlssr),
308*4882a593Smuzhiyun 			be64_to_cpu(data->p7iocCsr));
309*4882a593Smuzhiyun 	if (data->lemFir)
310*4882a593Smuzhiyun 		pr_info("Lem:         %016llx %016llx %016llx\n",
311*4882a593Smuzhiyun 			be64_to_cpu(data->lemFir),
312*4882a593Smuzhiyun 			be64_to_cpu(data->lemErrorMask),
313*4882a593Smuzhiyun 			be64_to_cpu(data->lemWOF));
314*4882a593Smuzhiyun 	if (data->phbErrorStatus)
315*4882a593Smuzhiyun 		pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
316*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorStatus),
317*4882a593Smuzhiyun 			be64_to_cpu(data->phbFirstErrorStatus),
318*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorLog0),
319*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorLog1));
320*4882a593Smuzhiyun 	if (data->mmioErrorStatus)
321*4882a593Smuzhiyun 		pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
322*4882a593Smuzhiyun 			be64_to_cpu(data->mmioErrorStatus),
323*4882a593Smuzhiyun 			be64_to_cpu(data->mmioFirstErrorStatus),
324*4882a593Smuzhiyun 			be64_to_cpu(data->mmioErrorLog0),
325*4882a593Smuzhiyun 			be64_to_cpu(data->mmioErrorLog1));
326*4882a593Smuzhiyun 	if (data->dma0ErrorStatus)
327*4882a593Smuzhiyun 		pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
328*4882a593Smuzhiyun 			be64_to_cpu(data->dma0ErrorStatus),
329*4882a593Smuzhiyun 			be64_to_cpu(data->dma0FirstErrorStatus),
330*4882a593Smuzhiyun 			be64_to_cpu(data->dma0ErrorLog0),
331*4882a593Smuzhiyun 			be64_to_cpu(data->dma0ErrorLog1));
332*4882a593Smuzhiyun 	if (data->dma1ErrorStatus)
333*4882a593Smuzhiyun 		pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
334*4882a593Smuzhiyun 			be64_to_cpu(data->dma1ErrorStatus),
335*4882a593Smuzhiyun 			be64_to_cpu(data->dma1FirstErrorStatus),
336*4882a593Smuzhiyun 			be64_to_cpu(data->dma1ErrorLog0),
337*4882a593Smuzhiyun 			be64_to_cpu(data->dma1ErrorLog1));
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_P7IOC_NUM_PEST_REGS);
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
pnv_pci_dump_phb3_diag_data(struct pci_controller * hose,struct OpalIoPhbErrorCommon * common)342*4882a593Smuzhiyun static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
343*4882a593Smuzhiyun 					struct OpalIoPhbErrorCommon *common)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct OpalIoPhb3ErrorData *data;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	data = (struct OpalIoPhb3ErrorData*)common;
348*4882a593Smuzhiyun 	pr_info("PHB3 PHB#%x Diag-data (Version: %d)\n",
349*4882a593Smuzhiyun 		hose->global_number, be32_to_cpu(common->version));
350*4882a593Smuzhiyun 	if (data->brdgCtl)
351*4882a593Smuzhiyun 		pr_info("brdgCtl:     %08x\n",
352*4882a593Smuzhiyun 			be32_to_cpu(data->brdgCtl));
353*4882a593Smuzhiyun 	if (data->portStatusReg || data->rootCmplxStatus ||
354*4882a593Smuzhiyun 	    data->busAgentStatus)
355*4882a593Smuzhiyun 		pr_info("UtlSts:      %08x %08x %08x\n",
356*4882a593Smuzhiyun 			be32_to_cpu(data->portStatusReg),
357*4882a593Smuzhiyun 			be32_to_cpu(data->rootCmplxStatus),
358*4882a593Smuzhiyun 			be32_to_cpu(data->busAgentStatus));
359*4882a593Smuzhiyun 	if (data->deviceStatus || data->slotStatus   ||
360*4882a593Smuzhiyun 	    data->linkStatus   || data->devCmdStatus ||
361*4882a593Smuzhiyun 	    data->devSecStatus)
362*4882a593Smuzhiyun 		pr_info("RootSts:     %08x %08x %08x %08x %08x\n",
363*4882a593Smuzhiyun 			be32_to_cpu(data->deviceStatus),
364*4882a593Smuzhiyun 			be32_to_cpu(data->slotStatus),
365*4882a593Smuzhiyun 			be32_to_cpu(data->linkStatus),
366*4882a593Smuzhiyun 			be32_to_cpu(data->devCmdStatus),
367*4882a593Smuzhiyun 			be32_to_cpu(data->devSecStatus));
368*4882a593Smuzhiyun 	if (data->rootErrorStatus || data->uncorrErrorStatus ||
369*4882a593Smuzhiyun 	    data->corrErrorStatus)
370*4882a593Smuzhiyun 		pr_info("RootErrSts:  %08x %08x %08x\n",
371*4882a593Smuzhiyun 			be32_to_cpu(data->rootErrorStatus),
372*4882a593Smuzhiyun 			be32_to_cpu(data->uncorrErrorStatus),
373*4882a593Smuzhiyun 			be32_to_cpu(data->corrErrorStatus));
374*4882a593Smuzhiyun 	if (data->tlpHdr1 || data->tlpHdr2 ||
375*4882a593Smuzhiyun 	    data->tlpHdr3 || data->tlpHdr4)
376*4882a593Smuzhiyun 		pr_info("RootErrLog:  %08x %08x %08x %08x\n",
377*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr1),
378*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr2),
379*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr3),
380*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr4));
381*4882a593Smuzhiyun 	if (data->sourceId || data->errorClass ||
382*4882a593Smuzhiyun 	    data->correlator)
383*4882a593Smuzhiyun 		pr_info("RootErrLog1: %08x %016llx %016llx\n",
384*4882a593Smuzhiyun 			be32_to_cpu(data->sourceId),
385*4882a593Smuzhiyun 			be64_to_cpu(data->errorClass),
386*4882a593Smuzhiyun 			be64_to_cpu(data->correlator));
387*4882a593Smuzhiyun 	if (data->nFir)
388*4882a593Smuzhiyun 		pr_info("nFir:        %016llx %016llx %016llx\n",
389*4882a593Smuzhiyun 			be64_to_cpu(data->nFir),
390*4882a593Smuzhiyun 			be64_to_cpu(data->nFirMask),
391*4882a593Smuzhiyun 			be64_to_cpu(data->nFirWOF));
392*4882a593Smuzhiyun 	if (data->phbPlssr || data->phbCsr)
393*4882a593Smuzhiyun 		pr_info("PhbSts:      %016llx %016llx\n",
394*4882a593Smuzhiyun 			be64_to_cpu(data->phbPlssr),
395*4882a593Smuzhiyun 			be64_to_cpu(data->phbCsr));
396*4882a593Smuzhiyun 	if (data->lemFir)
397*4882a593Smuzhiyun 		pr_info("Lem:         %016llx %016llx %016llx\n",
398*4882a593Smuzhiyun 			be64_to_cpu(data->lemFir),
399*4882a593Smuzhiyun 			be64_to_cpu(data->lemErrorMask),
400*4882a593Smuzhiyun 			be64_to_cpu(data->lemWOF));
401*4882a593Smuzhiyun 	if (data->phbErrorStatus)
402*4882a593Smuzhiyun 		pr_info("PhbErr:      %016llx %016llx %016llx %016llx\n",
403*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorStatus),
404*4882a593Smuzhiyun 			be64_to_cpu(data->phbFirstErrorStatus),
405*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorLog0),
406*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorLog1));
407*4882a593Smuzhiyun 	if (data->mmioErrorStatus)
408*4882a593Smuzhiyun 		pr_info("OutErr:      %016llx %016llx %016llx %016llx\n",
409*4882a593Smuzhiyun 			be64_to_cpu(data->mmioErrorStatus),
410*4882a593Smuzhiyun 			be64_to_cpu(data->mmioFirstErrorStatus),
411*4882a593Smuzhiyun 			be64_to_cpu(data->mmioErrorLog0),
412*4882a593Smuzhiyun 			be64_to_cpu(data->mmioErrorLog1));
413*4882a593Smuzhiyun 	if (data->dma0ErrorStatus)
414*4882a593Smuzhiyun 		pr_info("InAErr:      %016llx %016llx %016llx %016llx\n",
415*4882a593Smuzhiyun 			be64_to_cpu(data->dma0ErrorStatus),
416*4882a593Smuzhiyun 			be64_to_cpu(data->dma0FirstErrorStatus),
417*4882a593Smuzhiyun 			be64_to_cpu(data->dma0ErrorLog0),
418*4882a593Smuzhiyun 			be64_to_cpu(data->dma0ErrorLog1));
419*4882a593Smuzhiyun 	if (data->dma1ErrorStatus)
420*4882a593Smuzhiyun 		pr_info("InBErr:      %016llx %016llx %016llx %016llx\n",
421*4882a593Smuzhiyun 			be64_to_cpu(data->dma1ErrorStatus),
422*4882a593Smuzhiyun 			be64_to_cpu(data->dma1FirstErrorStatus),
423*4882a593Smuzhiyun 			be64_to_cpu(data->dma1ErrorLog0),
424*4882a593Smuzhiyun 			be64_to_cpu(data->dma1ErrorLog1));
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB3_NUM_PEST_REGS);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
pnv_pci_dump_phb4_diag_data(struct pci_controller * hose,struct OpalIoPhbErrorCommon * common)429*4882a593Smuzhiyun static void pnv_pci_dump_phb4_diag_data(struct pci_controller *hose,
430*4882a593Smuzhiyun 					struct OpalIoPhbErrorCommon *common)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun 	struct OpalIoPhb4ErrorData *data;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	data = (struct OpalIoPhb4ErrorData*)common;
435*4882a593Smuzhiyun 	pr_info("PHB4 PHB#%d Diag-data (Version: %d)\n",
436*4882a593Smuzhiyun 		hose->global_number, be32_to_cpu(common->version));
437*4882a593Smuzhiyun 	if (data->brdgCtl)
438*4882a593Smuzhiyun 		pr_info("brdgCtl:    %08x\n",
439*4882a593Smuzhiyun 			be32_to_cpu(data->brdgCtl));
440*4882a593Smuzhiyun 	if (data->deviceStatus || data->slotStatus   ||
441*4882a593Smuzhiyun 	    data->linkStatus   || data->devCmdStatus ||
442*4882a593Smuzhiyun 	    data->devSecStatus)
443*4882a593Smuzhiyun 		pr_info("RootSts:    %08x %08x %08x %08x %08x\n",
444*4882a593Smuzhiyun 			be32_to_cpu(data->deviceStatus),
445*4882a593Smuzhiyun 			be32_to_cpu(data->slotStatus),
446*4882a593Smuzhiyun 			be32_to_cpu(data->linkStatus),
447*4882a593Smuzhiyun 			be32_to_cpu(data->devCmdStatus),
448*4882a593Smuzhiyun 			be32_to_cpu(data->devSecStatus));
449*4882a593Smuzhiyun 	if (data->rootErrorStatus || data->uncorrErrorStatus ||
450*4882a593Smuzhiyun 	    data->corrErrorStatus)
451*4882a593Smuzhiyun 		pr_info("RootErrSts: %08x %08x %08x\n",
452*4882a593Smuzhiyun 			be32_to_cpu(data->rootErrorStatus),
453*4882a593Smuzhiyun 			be32_to_cpu(data->uncorrErrorStatus),
454*4882a593Smuzhiyun 			be32_to_cpu(data->corrErrorStatus));
455*4882a593Smuzhiyun 	if (data->tlpHdr1 || data->tlpHdr2 ||
456*4882a593Smuzhiyun 	    data->tlpHdr3 || data->tlpHdr4)
457*4882a593Smuzhiyun 		pr_info("RootErrLog: %08x %08x %08x %08x\n",
458*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr1),
459*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr2),
460*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr3),
461*4882a593Smuzhiyun 			be32_to_cpu(data->tlpHdr4));
462*4882a593Smuzhiyun 	if (data->sourceId)
463*4882a593Smuzhiyun 		pr_info("sourceId:   %08x\n", be32_to_cpu(data->sourceId));
464*4882a593Smuzhiyun 	if (data->nFir)
465*4882a593Smuzhiyun 		pr_info("nFir:       %016llx %016llx %016llx\n",
466*4882a593Smuzhiyun 			be64_to_cpu(data->nFir),
467*4882a593Smuzhiyun 			be64_to_cpu(data->nFirMask),
468*4882a593Smuzhiyun 			be64_to_cpu(data->nFirWOF));
469*4882a593Smuzhiyun 	if (data->phbPlssr || data->phbCsr)
470*4882a593Smuzhiyun 		pr_info("PhbSts:     %016llx %016llx\n",
471*4882a593Smuzhiyun 			be64_to_cpu(data->phbPlssr),
472*4882a593Smuzhiyun 			be64_to_cpu(data->phbCsr));
473*4882a593Smuzhiyun 	if (data->lemFir)
474*4882a593Smuzhiyun 		pr_info("Lem:        %016llx %016llx %016llx\n",
475*4882a593Smuzhiyun 			be64_to_cpu(data->lemFir),
476*4882a593Smuzhiyun 			be64_to_cpu(data->lemErrorMask),
477*4882a593Smuzhiyun 			be64_to_cpu(data->lemWOF));
478*4882a593Smuzhiyun 	if (data->phbErrorStatus)
479*4882a593Smuzhiyun 		pr_info("PhbErr:     %016llx %016llx %016llx %016llx\n",
480*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorStatus),
481*4882a593Smuzhiyun 			be64_to_cpu(data->phbFirstErrorStatus),
482*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorLog0),
483*4882a593Smuzhiyun 			be64_to_cpu(data->phbErrorLog1));
484*4882a593Smuzhiyun 	if (data->phbTxeErrorStatus)
485*4882a593Smuzhiyun 		pr_info("PhbTxeErr:  %016llx %016llx %016llx %016llx\n",
486*4882a593Smuzhiyun 			be64_to_cpu(data->phbTxeErrorStatus),
487*4882a593Smuzhiyun 			be64_to_cpu(data->phbTxeFirstErrorStatus),
488*4882a593Smuzhiyun 			be64_to_cpu(data->phbTxeErrorLog0),
489*4882a593Smuzhiyun 			be64_to_cpu(data->phbTxeErrorLog1));
490*4882a593Smuzhiyun 	if (data->phbRxeArbErrorStatus)
491*4882a593Smuzhiyun 		pr_info("RxeArbErr:  %016llx %016llx %016llx %016llx\n",
492*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeArbErrorStatus),
493*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeArbFirstErrorStatus),
494*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeArbErrorLog0),
495*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeArbErrorLog1));
496*4882a593Smuzhiyun 	if (data->phbRxeMrgErrorStatus)
497*4882a593Smuzhiyun 		pr_info("RxeMrgErr:  %016llx %016llx %016llx %016llx\n",
498*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeMrgErrorStatus),
499*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeMrgFirstErrorStatus),
500*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeMrgErrorLog0),
501*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeMrgErrorLog1));
502*4882a593Smuzhiyun 	if (data->phbRxeTceErrorStatus)
503*4882a593Smuzhiyun 		pr_info("RxeTceErr:  %016llx %016llx %016llx %016llx\n",
504*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeTceErrorStatus),
505*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeTceFirstErrorStatus),
506*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeTceErrorLog0),
507*4882a593Smuzhiyun 			be64_to_cpu(data->phbRxeTceErrorLog1));
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if (data->phbPblErrorStatus)
510*4882a593Smuzhiyun 		pr_info("PblErr:     %016llx %016llx %016llx %016llx\n",
511*4882a593Smuzhiyun 			be64_to_cpu(data->phbPblErrorStatus),
512*4882a593Smuzhiyun 			be64_to_cpu(data->phbPblFirstErrorStatus),
513*4882a593Smuzhiyun 			be64_to_cpu(data->phbPblErrorLog0),
514*4882a593Smuzhiyun 			be64_to_cpu(data->phbPblErrorLog1));
515*4882a593Smuzhiyun 	if (data->phbPcieDlpErrorStatus)
516*4882a593Smuzhiyun 		pr_info("PcieDlp:    %016llx %016llx %016llx\n",
517*4882a593Smuzhiyun 			be64_to_cpu(data->phbPcieDlpErrorLog1),
518*4882a593Smuzhiyun 			be64_to_cpu(data->phbPcieDlpErrorLog2),
519*4882a593Smuzhiyun 			be64_to_cpu(data->phbPcieDlpErrorStatus));
520*4882a593Smuzhiyun 	if (data->phbRegbErrorStatus)
521*4882a593Smuzhiyun 		pr_info("RegbErr:    %016llx %016llx %016llx %016llx\n",
522*4882a593Smuzhiyun 			be64_to_cpu(data->phbRegbErrorStatus),
523*4882a593Smuzhiyun 			be64_to_cpu(data->phbRegbFirstErrorStatus),
524*4882a593Smuzhiyun 			be64_to_cpu(data->phbRegbErrorLog0),
525*4882a593Smuzhiyun 			be64_to_cpu(data->phbRegbErrorLog1));
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	pnv_pci_dump_pest(data->pestA, data->pestB, OPAL_PHB4_NUM_PEST_REGS);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
pnv_pci_dump_phb_diag_data(struct pci_controller * hose,unsigned char * log_buff)531*4882a593Smuzhiyun void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
532*4882a593Smuzhiyun 				unsigned char *log_buff)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct OpalIoPhbErrorCommon *common;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (!hose || !log_buff)
537*4882a593Smuzhiyun 		return;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	common = (struct OpalIoPhbErrorCommon *)log_buff;
540*4882a593Smuzhiyun 	switch (be32_to_cpu(common->ioType)) {
541*4882a593Smuzhiyun 	case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
542*4882a593Smuzhiyun 		pnv_pci_dump_p7ioc_diag_data(hose, common);
543*4882a593Smuzhiyun 		break;
544*4882a593Smuzhiyun 	case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
545*4882a593Smuzhiyun 		pnv_pci_dump_phb3_diag_data(hose, common);
546*4882a593Smuzhiyun 		break;
547*4882a593Smuzhiyun 	case OPAL_PHB_ERROR_DATA_TYPE_PHB4:
548*4882a593Smuzhiyun 		pnv_pci_dump_phb4_diag_data(hose, common);
549*4882a593Smuzhiyun 		break;
550*4882a593Smuzhiyun 	default:
551*4882a593Smuzhiyun 		pr_warn("%s: Unrecognized ioType %d\n",
552*4882a593Smuzhiyun 			__func__, be32_to_cpu(common->ioType));
553*4882a593Smuzhiyun 	}
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun 
pnv_pci_handle_eeh_config(struct pnv_phb * phb,u32 pe_no)556*4882a593Smuzhiyun static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	unsigned long flags, rc;
559*4882a593Smuzhiyun 	int has_diag, ret = 0;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	spin_lock_irqsave(&phb->lock, flags);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	/* Fetch PHB diag-data */
564*4882a593Smuzhiyun 	rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag_data,
565*4882a593Smuzhiyun 					 phb->diag_data_size);
566*4882a593Smuzhiyun 	has_diag = (rc == OPAL_SUCCESS);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/* If PHB supports compound PE, to handle it */
569*4882a593Smuzhiyun 	if (phb->unfreeze_pe) {
570*4882a593Smuzhiyun 		ret = phb->unfreeze_pe(phb,
571*4882a593Smuzhiyun 				       pe_no,
572*4882a593Smuzhiyun 				       OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
573*4882a593Smuzhiyun 	} else {
574*4882a593Smuzhiyun 		rc = opal_pci_eeh_freeze_clear(phb->opal_id,
575*4882a593Smuzhiyun 					     pe_no,
576*4882a593Smuzhiyun 					     OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
577*4882a593Smuzhiyun 		if (rc) {
578*4882a593Smuzhiyun 			pr_warn("%s: Failure %ld clearing frozen "
579*4882a593Smuzhiyun 				"PHB#%x-PE#%x\n",
580*4882a593Smuzhiyun 				__func__, rc, phb->hose->global_number,
581*4882a593Smuzhiyun 				pe_no);
582*4882a593Smuzhiyun 			ret = -EIO;
583*4882a593Smuzhiyun 		}
584*4882a593Smuzhiyun 	}
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	/*
587*4882a593Smuzhiyun 	 * For now, let's only display the diag buffer when we fail to clear
588*4882a593Smuzhiyun 	 * the EEH status. We'll do more sensible things later when we have
589*4882a593Smuzhiyun 	 * proper EEH support. We need to make sure we don't pollute ourselves
590*4882a593Smuzhiyun 	 * with the normal errors generated when probing empty slots
591*4882a593Smuzhiyun 	 */
592*4882a593Smuzhiyun 	if (has_diag && ret)
593*4882a593Smuzhiyun 		pnv_pci_dump_phb_diag_data(phb->hose, phb->diag_data);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	spin_unlock_irqrestore(&phb->lock, flags);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
pnv_pci_config_check_eeh(struct pci_dn * pdn)598*4882a593Smuzhiyun static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	struct pnv_phb *phb = pdn->phb->private_data;
601*4882a593Smuzhiyun 	u8	fstate = 0;
602*4882a593Smuzhiyun 	__be16	pcierr = 0;
603*4882a593Smuzhiyun 	unsigned int pe_no;
604*4882a593Smuzhiyun 	s64	rc;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/*
607*4882a593Smuzhiyun 	 * Get the PE#. During the PCI probe stage, we might not
608*4882a593Smuzhiyun 	 * setup that yet. So all ER errors should be mapped to
609*4882a593Smuzhiyun 	 * reserved PE.
610*4882a593Smuzhiyun 	 */
611*4882a593Smuzhiyun 	pe_no = pdn->pe_number;
612*4882a593Smuzhiyun 	if (pe_no == IODA_INVALID_PE) {
613*4882a593Smuzhiyun 		pe_no = phb->ioda.reserved_pe_idx;
614*4882a593Smuzhiyun 	}
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/*
617*4882a593Smuzhiyun 	 * Fetch frozen state. If the PHB support compound PE,
618*4882a593Smuzhiyun 	 * we need handle that case.
619*4882a593Smuzhiyun 	 */
620*4882a593Smuzhiyun 	if (phb->get_pe_state) {
621*4882a593Smuzhiyun 		fstate = phb->get_pe_state(phb, pe_no);
622*4882a593Smuzhiyun 	} else {
623*4882a593Smuzhiyun 		rc = opal_pci_eeh_freeze_status(phb->opal_id,
624*4882a593Smuzhiyun 						pe_no,
625*4882a593Smuzhiyun 						&fstate,
626*4882a593Smuzhiyun 						&pcierr,
627*4882a593Smuzhiyun 						NULL);
628*4882a593Smuzhiyun 		if (rc) {
629*4882a593Smuzhiyun 			pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
630*4882a593Smuzhiyun 				__func__, rc, phb->hose->global_number, pe_no);
631*4882a593Smuzhiyun 			return;
632*4882a593Smuzhiyun 		}
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	pr_devel(" -> EEH check, bdfn=%04x PE#%x fstate=%x\n",
636*4882a593Smuzhiyun 		 (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* Clear the frozen state if applicable */
639*4882a593Smuzhiyun 	if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
640*4882a593Smuzhiyun 	    fstate == OPAL_EEH_STOPPED_DMA_FREEZE  ||
641*4882a593Smuzhiyun 	    fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
642*4882a593Smuzhiyun 		/*
643*4882a593Smuzhiyun 		 * If PHB supports compound PE, freeze it for
644*4882a593Smuzhiyun 		 * consistency.
645*4882a593Smuzhiyun 		 */
646*4882a593Smuzhiyun 		if (phb->freeze_pe)
647*4882a593Smuzhiyun 			phb->freeze_pe(phb, pe_no);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 		pnv_pci_handle_eeh_config(phb, pe_no);
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
pnv_pci_cfg_read(struct pci_dn * pdn,int where,int size,u32 * val)653*4882a593Smuzhiyun int pnv_pci_cfg_read(struct pci_dn *pdn,
654*4882a593Smuzhiyun 		     int where, int size, u32 *val)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct pnv_phb *phb = pdn->phb->private_data;
657*4882a593Smuzhiyun 	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
658*4882a593Smuzhiyun 	s64 rc;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	switch (size) {
661*4882a593Smuzhiyun 	case 1: {
662*4882a593Smuzhiyun 		u8 v8;
663*4882a593Smuzhiyun 		rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
664*4882a593Smuzhiyun 		*val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
665*4882a593Smuzhiyun 		break;
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 	case 2: {
668*4882a593Smuzhiyun 		__be16 v16;
669*4882a593Smuzhiyun 		rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
670*4882a593Smuzhiyun 						   &v16);
671*4882a593Smuzhiyun 		*val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
672*4882a593Smuzhiyun 		break;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 	case 4: {
675*4882a593Smuzhiyun 		__be32 v32;
676*4882a593Smuzhiyun 		rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
677*4882a593Smuzhiyun 		*val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
678*4882a593Smuzhiyun 		break;
679*4882a593Smuzhiyun 	}
680*4882a593Smuzhiyun 	default:
681*4882a593Smuzhiyun 		return PCIBIOS_FUNC_NOT_SUPPORTED;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
685*4882a593Smuzhiyun 		 __func__, pdn->busno, pdn->devfn, where, size, *val);
686*4882a593Smuzhiyun 	return PCIBIOS_SUCCESSFUL;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
pnv_pci_cfg_write(struct pci_dn * pdn,int where,int size,u32 val)689*4882a593Smuzhiyun int pnv_pci_cfg_write(struct pci_dn *pdn,
690*4882a593Smuzhiyun 		      int where, int size, u32 val)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	struct pnv_phb *phb = pdn->phb->private_data;
693*4882a593Smuzhiyun 	u32 bdfn = (pdn->busno << 8) | pdn->devfn;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
696*4882a593Smuzhiyun 		 __func__, pdn->busno, pdn->devfn, where, size, val);
697*4882a593Smuzhiyun 	switch (size) {
698*4882a593Smuzhiyun 	case 1:
699*4882a593Smuzhiyun 		opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
700*4882a593Smuzhiyun 		break;
701*4882a593Smuzhiyun 	case 2:
702*4882a593Smuzhiyun 		opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
703*4882a593Smuzhiyun 		break;
704*4882a593Smuzhiyun 	case 4:
705*4882a593Smuzhiyun 		opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
706*4882a593Smuzhiyun 		break;
707*4882a593Smuzhiyun 	default:
708*4882a593Smuzhiyun 		return PCIBIOS_FUNC_NOT_SUPPORTED;
709*4882a593Smuzhiyun 	}
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	return PCIBIOS_SUCCESSFUL;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun #if CONFIG_EEH
pnv_pci_cfg_check(struct pci_dn * pdn)715*4882a593Smuzhiyun static bool pnv_pci_cfg_check(struct pci_dn *pdn)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun 	struct eeh_dev *edev = NULL;
718*4882a593Smuzhiyun 	struct pnv_phb *phb = pdn->phb->private_data;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	/* EEH not enabled ? */
721*4882a593Smuzhiyun 	if (!(phb->flags & PNV_PHB_FLAG_EEH))
722*4882a593Smuzhiyun 		return true;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	/* PE reset or device removed ? */
725*4882a593Smuzhiyun 	edev = pdn->edev;
726*4882a593Smuzhiyun 	if (edev) {
727*4882a593Smuzhiyun 		if (edev->pe &&
728*4882a593Smuzhiyun 		    (edev->pe->state & EEH_PE_CFG_BLOCKED))
729*4882a593Smuzhiyun 			return false;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		if (edev->mode & EEH_DEV_REMOVED)
732*4882a593Smuzhiyun 			return false;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	return true;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun #else
pnv_pci_cfg_check(struct pci_dn * pdn)738*4882a593Smuzhiyun static inline pnv_pci_cfg_check(struct pci_dn *pdn)
739*4882a593Smuzhiyun {
740*4882a593Smuzhiyun 	return true;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun #endif /* CONFIG_EEH */
743*4882a593Smuzhiyun 
pnv_pci_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)744*4882a593Smuzhiyun static int pnv_pci_read_config(struct pci_bus *bus,
745*4882a593Smuzhiyun 			       unsigned int devfn,
746*4882a593Smuzhiyun 			       int where, int size, u32 *val)
747*4882a593Smuzhiyun {
748*4882a593Smuzhiyun 	struct pci_dn *pdn;
749*4882a593Smuzhiyun 	struct pnv_phb *phb;
750*4882a593Smuzhiyun 	int ret;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	*val = 0xFFFFFFFF;
753*4882a593Smuzhiyun 	pdn = pci_get_pdn_by_devfn(bus, devfn);
754*4882a593Smuzhiyun 	if (!pdn)
755*4882a593Smuzhiyun 		return PCIBIOS_DEVICE_NOT_FOUND;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (!pnv_pci_cfg_check(pdn))
758*4882a593Smuzhiyun 		return PCIBIOS_DEVICE_NOT_FOUND;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	ret = pnv_pci_cfg_read(pdn, where, size, val);
761*4882a593Smuzhiyun 	phb = pdn->phb->private_data;
762*4882a593Smuzhiyun 	if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
763*4882a593Smuzhiyun 		if (*val == EEH_IO_ERROR_VALUE(size) &&
764*4882a593Smuzhiyun 		    eeh_dev_check_failure(pdn->edev))
765*4882a593Smuzhiyun                         return PCIBIOS_DEVICE_NOT_FOUND;
766*4882a593Smuzhiyun 	} else {
767*4882a593Smuzhiyun 		pnv_pci_config_check_eeh(pdn);
768*4882a593Smuzhiyun 	}
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	return ret;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
pnv_pci_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)773*4882a593Smuzhiyun static int pnv_pci_write_config(struct pci_bus *bus,
774*4882a593Smuzhiyun 				unsigned int devfn,
775*4882a593Smuzhiyun 				int where, int size, u32 val)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun 	struct pci_dn *pdn;
778*4882a593Smuzhiyun 	struct pnv_phb *phb;
779*4882a593Smuzhiyun 	int ret;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	pdn = pci_get_pdn_by_devfn(bus, devfn);
782*4882a593Smuzhiyun 	if (!pdn)
783*4882a593Smuzhiyun 		return PCIBIOS_DEVICE_NOT_FOUND;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (!pnv_pci_cfg_check(pdn))
786*4882a593Smuzhiyun 		return PCIBIOS_DEVICE_NOT_FOUND;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	ret = pnv_pci_cfg_write(pdn, where, size, val);
789*4882a593Smuzhiyun 	phb = pdn->phb->private_data;
790*4882a593Smuzhiyun 	if (!(phb->flags & PNV_PHB_FLAG_EEH))
791*4882a593Smuzhiyun 		pnv_pci_config_check_eeh(pdn);
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	return ret;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun struct pci_ops pnv_pci_ops = {
797*4882a593Smuzhiyun 	.read  = pnv_pci_read_config,
798*4882a593Smuzhiyun 	.write = pnv_pci_write_config,
799*4882a593Smuzhiyun };
800*4882a593Smuzhiyun 
pnv_pci_table_alloc(int nid)801*4882a593Smuzhiyun struct iommu_table *pnv_pci_table_alloc(int nid)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct iommu_table *tbl;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
806*4882a593Smuzhiyun 	if (!tbl)
807*4882a593Smuzhiyun 		return NULL;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	INIT_LIST_HEAD_RCU(&tbl->it_group_list);
810*4882a593Smuzhiyun 	kref_init(&tbl->it_kref);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	return tbl;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun 
pnv_pci_get_phb_node(struct pci_dev * dev)815*4882a593Smuzhiyun struct device_node *pnv_pci_get_phb_node(struct pci_dev *dev)
816*4882a593Smuzhiyun {
817*4882a593Smuzhiyun 	struct pci_controller *hose = pci_bus_to_host(dev->bus);
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	return of_node_get(hose->dn);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun EXPORT_SYMBOL(pnv_pci_get_phb_node);
822*4882a593Smuzhiyun 
pnv_pci_set_tunnel_bar(struct pci_dev * dev,u64 addr,int enable)823*4882a593Smuzhiyun int pnv_pci_set_tunnel_bar(struct pci_dev *dev, u64 addr, int enable)
824*4882a593Smuzhiyun {
825*4882a593Smuzhiyun 	struct pnv_phb *phb = pci_bus_to_pnvhb(dev->bus);
826*4882a593Smuzhiyun 	u64 tunnel_bar;
827*4882a593Smuzhiyun 	__be64 val;
828*4882a593Smuzhiyun 	int rc;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	if (!opal_check_token(OPAL_PCI_GET_PBCQ_TUNNEL_BAR))
831*4882a593Smuzhiyun 		return -ENXIO;
832*4882a593Smuzhiyun 	if (!opal_check_token(OPAL_PCI_SET_PBCQ_TUNNEL_BAR))
833*4882a593Smuzhiyun 		return -ENXIO;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	mutex_lock(&tunnel_mutex);
836*4882a593Smuzhiyun 	rc = opal_pci_get_pbcq_tunnel_bar(phb->opal_id, &val);
837*4882a593Smuzhiyun 	if (rc != OPAL_SUCCESS) {
838*4882a593Smuzhiyun 		rc = -EIO;
839*4882a593Smuzhiyun 		goto out;
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun 	tunnel_bar = be64_to_cpu(val);
842*4882a593Smuzhiyun 	if (enable) {
843*4882a593Smuzhiyun 		/*
844*4882a593Smuzhiyun 		* Only one device per PHB can use atomics.
845*4882a593Smuzhiyun 		* Our policy is first-come, first-served.
846*4882a593Smuzhiyun 		*/
847*4882a593Smuzhiyun 		if (tunnel_bar) {
848*4882a593Smuzhiyun 			if (tunnel_bar != addr)
849*4882a593Smuzhiyun 				rc = -EBUSY;
850*4882a593Smuzhiyun 			else
851*4882a593Smuzhiyun 				rc = 0;	/* Setting same address twice is ok */
852*4882a593Smuzhiyun 			goto out;
853*4882a593Smuzhiyun 		}
854*4882a593Smuzhiyun 	} else {
855*4882a593Smuzhiyun 		/*
856*4882a593Smuzhiyun 		* The device that owns atomics and wants to release
857*4882a593Smuzhiyun 		* them must pass the same address with enable == 0.
858*4882a593Smuzhiyun 		*/
859*4882a593Smuzhiyun 		if (tunnel_bar != addr) {
860*4882a593Smuzhiyun 			rc = -EPERM;
861*4882a593Smuzhiyun 			goto out;
862*4882a593Smuzhiyun 		}
863*4882a593Smuzhiyun 		addr = 0x0ULL;
864*4882a593Smuzhiyun 	}
865*4882a593Smuzhiyun 	rc = opal_pci_set_pbcq_tunnel_bar(phb->opal_id, addr);
866*4882a593Smuzhiyun 	rc = opal_error_code(rc);
867*4882a593Smuzhiyun out:
868*4882a593Smuzhiyun 	mutex_unlock(&tunnel_mutex);
869*4882a593Smuzhiyun 	return rc;
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pnv_pci_set_tunnel_bar);
872*4882a593Smuzhiyun 
pnv_pci_shutdown(void)873*4882a593Smuzhiyun void pnv_pci_shutdown(void)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	struct pci_controller *hose;
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	list_for_each_entry(hose, &hose_list, list_node)
878*4882a593Smuzhiyun 		if (hose->controller_ops.shutdown)
879*4882a593Smuzhiyun 			hose->controller_ops.shutdown(hose);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun /* Fixup wrong class code in p7ioc and p8 root complex */
pnv_p7ioc_rc_quirk(struct pci_dev * dev)883*4882a593Smuzhiyun static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	dev->class = PCI_CLASS_BRIDGE_PCI << 8;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
888*4882a593Smuzhiyun 
pnv_pci_init(void)889*4882a593Smuzhiyun void __init pnv_pci_init(void)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun 	struct device_node *np;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	/* If we don't have OPAL, eg. in sim, just skip PCI probe */
896*4882a593Smuzhiyun 	if (!firmware_has_feature(FW_FEATURE_OPAL))
897*4882a593Smuzhiyun 		return;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun #ifdef CONFIG_PCIEPORTBUS
900*4882a593Smuzhiyun 	/*
901*4882a593Smuzhiyun 	 * On PowerNV PCIe devices are (currently) managed in cooperation
902*4882a593Smuzhiyun 	 * with firmware. This isn't *strictly* required, but there's enough
903*4882a593Smuzhiyun 	 * assumptions baked into both firmware and the platform code that
904*4882a593Smuzhiyun 	 * it's unwise to allow the portbus services to be used.
905*4882a593Smuzhiyun 	 *
906*4882a593Smuzhiyun 	 * We need to fix this eventually, but for now set this flag to disable
907*4882a593Smuzhiyun 	 * the portbus driver. The AER service isn't required since that AER
908*4882a593Smuzhiyun 	 * events are handled via EEH. The pciehp hotplug driver can't work
909*4882a593Smuzhiyun 	 * without kernel changes (and portbus binding breaks pnv_php). The
910*4882a593Smuzhiyun 	 * other services also require some thinking about how we're going
911*4882a593Smuzhiyun 	 * to integrate them.
912*4882a593Smuzhiyun 	 */
913*4882a593Smuzhiyun 	pcie_ports_disabled = true;
914*4882a593Smuzhiyun #endif
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* Look for IODA IO-Hubs. */
917*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
918*4882a593Smuzhiyun 		pnv_pci_init_ioda_hub(np);
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	/* Look for ioda2 built-in PHB3's */
922*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
923*4882a593Smuzhiyun 		pnv_pci_init_ioda2_phb(np);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	/* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
926*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
927*4882a593Smuzhiyun 		pnv_pci_init_ioda2_phb(np);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	/* Look for NPU PHBs */
930*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
931*4882a593Smuzhiyun 		pnv_pci_init_npu_phb(np);
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	/*
934*4882a593Smuzhiyun 	 * Look for NPU2 PHBs which we treat mostly as NPU PHBs with
935*4882a593Smuzhiyun 	 * the exception of TCE kill which requires an OPAL call.
936*4882a593Smuzhiyun 	 */
937*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-phb")
938*4882a593Smuzhiyun 		pnv_pci_init_npu_phb(np);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 	/* Look for NPU2 OpenCAPI PHBs */
941*4882a593Smuzhiyun 	for_each_compatible_node(np, NULL, "ibm,ioda2-npu2-opencapi-phb")
942*4882a593Smuzhiyun 		pnv_pci_init_npu2_opencapi_phb(np);
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	/* Configure IOMMU DMA hooks */
945*4882a593Smuzhiyun 	set_pci_dma_ops(&dma_iommu_ops);
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun 
pnv_tce_iommu_bus_notifier(struct notifier_block * nb,unsigned long action,void * data)948*4882a593Smuzhiyun static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
949*4882a593Smuzhiyun 		unsigned long action, void *data)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	struct device *dev = data;
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun 	switch (action) {
954*4882a593Smuzhiyun 	case BUS_NOTIFY_DEL_DEVICE:
955*4882a593Smuzhiyun 		iommu_del_device(dev);
956*4882a593Smuzhiyun 		return 0;
957*4882a593Smuzhiyun 	default:
958*4882a593Smuzhiyun 		return 0;
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun static struct notifier_block pnv_tce_iommu_bus_nb = {
963*4882a593Smuzhiyun 	.notifier_call = pnv_tce_iommu_bus_notifier,
964*4882a593Smuzhiyun };
965*4882a593Smuzhiyun 
pnv_tce_iommu_bus_notifier_init(void)966*4882a593Smuzhiyun static int __init pnv_tce_iommu_bus_notifier_init(void)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
969*4882a593Smuzhiyun 	return 0;
970*4882a593Smuzhiyun }
971*4882a593Smuzhiyun machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
972