1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * The file intends to implement the platform dependent EEH operations on pseries.
4*4882a593Smuzhiyun * Actually, the pseries platform is built based on RTAS heavily. That means the
5*4882a593Smuzhiyun * pseries platform dependent EEH operations will be built on RTAS calls. The functions
6*4882a593Smuzhiyun * are derived from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
7*4882a593Smuzhiyun * been done.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
10*4882a593Smuzhiyun * Copyright IBM Corporation 2001, 2005, 2006
11*4882a593Smuzhiyun * Copyright Dave Engebretsen & Todd Inglett 2001
12*4882a593Smuzhiyun * Copyright Linas Vepstas 2005, 2006
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/atomic.h>
16*4882a593Smuzhiyun #include <linux/delay.h>
17*4882a593Smuzhiyun #include <linux/export.h>
18*4882a593Smuzhiyun #include <linux/init.h>
19*4882a593Smuzhiyun #include <linux/list.h>
20*4882a593Smuzhiyun #include <linux/of.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/proc_fs.h>
23*4882a593Smuzhiyun #include <linux/rbtree.h>
24*4882a593Smuzhiyun #include <linux/sched.h>
25*4882a593Smuzhiyun #include <linux/seq_file.h>
26*4882a593Smuzhiyun #include <linux/spinlock.h>
27*4882a593Smuzhiyun #include <linux/crash_dump.h>
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include <asm/eeh.h>
30*4882a593Smuzhiyun #include <asm/eeh_event.h>
31*4882a593Smuzhiyun #include <asm/io.h>
32*4882a593Smuzhiyun #include <asm/machdep.h>
33*4882a593Smuzhiyun #include <asm/ppc-pci.h>
34*4882a593Smuzhiyun #include <asm/rtas.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* RTAS tokens */
37*4882a593Smuzhiyun static int ibm_set_eeh_option;
38*4882a593Smuzhiyun static int ibm_set_slot_reset;
39*4882a593Smuzhiyun static int ibm_read_slot_reset_state;
40*4882a593Smuzhiyun static int ibm_read_slot_reset_state2;
41*4882a593Smuzhiyun static int ibm_slot_error_detail;
42*4882a593Smuzhiyun static int ibm_get_config_addr_info;
43*4882a593Smuzhiyun static int ibm_get_config_addr_info2;
44*4882a593Smuzhiyun static int ibm_configure_pe;
45*4882a593Smuzhiyun
pseries_pcibios_bus_add_device(struct pci_dev * pdev)46*4882a593Smuzhiyun void pseries_pcibios_bus_add_device(struct pci_dev *pdev)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun struct pci_dn *pdn = pci_get_pdn(pdev);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (eeh_has_flag(EEH_FORCE_DISABLED))
51*4882a593Smuzhiyun return;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun dev_dbg(&pdev->dev, "EEH: Setting up device\n");
54*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
55*4882a593Smuzhiyun if (pdev->is_virtfn) {
56*4882a593Smuzhiyun pdn->device_id = pdev->device;
57*4882a593Smuzhiyun pdn->vendor_id = pdev->vendor;
58*4882a593Smuzhiyun pdn->class_code = pdev->class;
59*4882a593Smuzhiyun /*
60*4882a593Smuzhiyun * Last allow unfreeze return code used for retrieval
61*4882a593Smuzhiyun * by user space in eeh-sysfs to show the last command
62*4882a593Smuzhiyun * completion from platform.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun pdn->last_allow_rc = 0;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun #endif
67*4882a593Smuzhiyun pseries_eeh_init_edev(pdn);
68*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
69*4882a593Smuzhiyun if (pdev->is_virtfn) {
70*4882a593Smuzhiyun /*
71*4882a593Smuzhiyun * FIXME: This really should be handled by choosing the right
72*4882a593Smuzhiyun * parent PE in in pseries_eeh_init_edev().
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun struct eeh_pe *physfn_pe = pci_dev_to_eeh_dev(pdev->physfn)->pe;
75*4882a593Smuzhiyun struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun edev->pe_config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
78*4882a593Smuzhiyun eeh_pe_tree_remove(edev); /* Remove as it is adding to bus pe */
79*4882a593Smuzhiyun eeh_pe_tree_insert(edev, physfn_pe); /* Add as VF PE type */
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun #endif
82*4882a593Smuzhiyun eeh_probe_device(pdev);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun * pseries_eeh_get_pe_config_addr - Find the pe_config_addr for a device
88*4882a593Smuzhiyun * @pdn: pci_dn of the input device
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * The EEH RTAS calls use a tuple consisting of: (buid_hi, buid_lo,
91*4882a593Smuzhiyun * pe_config_addr) as a handle to a given PE. This function finds the
92*4882a593Smuzhiyun * pe_config_addr based on the device's config addr.
93*4882a593Smuzhiyun *
94*4882a593Smuzhiyun * Keep in mind that the pe_config_addr *might* be numerically identical to the
95*4882a593Smuzhiyun * device's config addr, but the two are conceptually distinct.
96*4882a593Smuzhiyun *
97*4882a593Smuzhiyun * Returns the pe_config_addr, or a negative error code.
98*4882a593Smuzhiyun */
pseries_eeh_get_pe_config_addr(struct pci_dn * pdn)99*4882a593Smuzhiyun static int pseries_eeh_get_pe_config_addr(struct pci_dn *pdn)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun int config_addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
102*4882a593Smuzhiyun struct pci_controller *phb = pdn->phb;
103*4882a593Smuzhiyun int ret, rets[3];
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
106*4882a593Smuzhiyun /*
107*4882a593Smuzhiyun * First of all, use function 1 to determine if this device is
108*4882a593Smuzhiyun * part of a PE or not. ret[0] being zero indicates it's not.
109*4882a593Smuzhiyun */
110*4882a593Smuzhiyun ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
111*4882a593Smuzhiyun config_addr, BUID_HI(phb->buid),
112*4882a593Smuzhiyun BUID_LO(phb->buid), 1);
113*4882a593Smuzhiyun if (ret || (rets[0] == 0))
114*4882a593Smuzhiyun return -ENOENT;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* Retrieve the associated PE config address with function 0 */
117*4882a593Smuzhiyun ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
118*4882a593Smuzhiyun config_addr, BUID_HI(phb->buid),
119*4882a593Smuzhiyun BUID_LO(phb->buid), 0);
120*4882a593Smuzhiyun if (ret) {
121*4882a593Smuzhiyun pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
122*4882a593Smuzhiyun __func__, phb->global_number, config_addr);
123*4882a593Smuzhiyun return -ENXIO;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return rets[0];
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
130*4882a593Smuzhiyun ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
131*4882a593Smuzhiyun config_addr, BUID_HI(phb->buid),
132*4882a593Smuzhiyun BUID_LO(phb->buid), 0);
133*4882a593Smuzhiyun if (ret) {
134*4882a593Smuzhiyun pr_warn("%s: Failed to get address for PHB#%x-PE#%x\n",
135*4882a593Smuzhiyun __func__, phb->global_number, config_addr);
136*4882a593Smuzhiyun return -ENXIO;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return rets[0];
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * PAPR does describe a process for finding the pe_config_addr that was
144*4882a593Smuzhiyun * used before the ibm,get-config-addr-info calls were added. However,
145*4882a593Smuzhiyun * I haven't found *any* systems that don't have that RTAS call
146*4882a593Smuzhiyun * implemented. If you happen to find one that needs the old DT based
147*4882a593Smuzhiyun * process, patches are welcome!
148*4882a593Smuzhiyun */
149*4882a593Smuzhiyun return -ENOENT;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /**
153*4882a593Smuzhiyun * pseries_eeh_phb_reset - Reset the specified PHB
154*4882a593Smuzhiyun * @phb: PCI controller
155*4882a593Smuzhiyun * @config_adddr: the associated config address
156*4882a593Smuzhiyun * @option: reset option
157*4882a593Smuzhiyun *
158*4882a593Smuzhiyun * Reset the specified PHB/PE
159*4882a593Smuzhiyun */
pseries_eeh_phb_reset(struct pci_controller * phb,int config_addr,int option)160*4882a593Smuzhiyun static int pseries_eeh_phb_reset(struct pci_controller *phb, int config_addr, int option)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun int ret;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun /* Reset PE through RTAS call */
165*4882a593Smuzhiyun ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
166*4882a593Smuzhiyun config_addr, BUID_HI(phb->buid),
167*4882a593Smuzhiyun BUID_LO(phb->buid), option);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* If fundamental-reset not supported, try hot-reset */
170*4882a593Smuzhiyun if (option == EEH_RESET_FUNDAMENTAL && ret == -8) {
171*4882a593Smuzhiyun option = EEH_RESET_HOT;
172*4882a593Smuzhiyun ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
173*4882a593Smuzhiyun config_addr, BUID_HI(phb->buid),
174*4882a593Smuzhiyun BUID_LO(phb->buid), option);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* We need reset hold or settlement delay */
178*4882a593Smuzhiyun if (option == EEH_RESET_FUNDAMENTAL || option == EEH_RESET_HOT)
179*4882a593Smuzhiyun msleep(EEH_PE_RST_HOLD_TIME);
180*4882a593Smuzhiyun else
181*4882a593Smuzhiyun msleep(EEH_PE_RST_SETTLE_TIME);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun return ret;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun * pseries_eeh_phb_configure_bridge - Configure PCI bridges in the indicated PE
188*4882a593Smuzhiyun * @phb: PCI controller
189*4882a593Smuzhiyun * @config_adddr: the associated config address
190*4882a593Smuzhiyun *
191*4882a593Smuzhiyun * The function will be called to reconfigure the bridges included
192*4882a593Smuzhiyun * in the specified PE so that the mulfunctional PE would be recovered
193*4882a593Smuzhiyun * again.
194*4882a593Smuzhiyun */
pseries_eeh_phb_configure_bridge(struct pci_controller * phb,int config_addr)195*4882a593Smuzhiyun static int pseries_eeh_phb_configure_bridge(struct pci_controller *phb, int config_addr)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun int ret;
198*4882a593Smuzhiyun /* Waiting 0.2s maximum before skipping configuration */
199*4882a593Smuzhiyun int max_wait = 200;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun while (max_wait > 0) {
202*4882a593Smuzhiyun ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
203*4882a593Smuzhiyun config_addr, BUID_HI(phb->buid),
204*4882a593Smuzhiyun BUID_LO(phb->buid));
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun if (!ret)
207*4882a593Smuzhiyun return ret;
208*4882a593Smuzhiyun if (ret < 0)
209*4882a593Smuzhiyun break;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /*
212*4882a593Smuzhiyun * If RTAS returns a delay value that's above 100ms, cut it
213*4882a593Smuzhiyun * down to 100ms in case firmware made a mistake. For more
214*4882a593Smuzhiyun * on how these delay values work see rtas_busy_delay_time
215*4882a593Smuzhiyun */
216*4882a593Smuzhiyun if (ret > RTAS_EXTENDED_DELAY_MIN+2 &&
217*4882a593Smuzhiyun ret <= RTAS_EXTENDED_DELAY_MAX)
218*4882a593Smuzhiyun ret = RTAS_EXTENDED_DELAY_MIN+2;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun max_wait -= rtas_busy_delay_time(ret);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun if (max_wait < 0)
223*4882a593Smuzhiyun break;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun rtas_busy_delay(ret);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun pr_warn("%s: Unable to configure bridge PHB#%x-PE#%x (%d)\n",
229*4882a593Smuzhiyun __func__, phb->global_number, config_addr, ret);
230*4882a593Smuzhiyun /* PAPR defines -3 as "Parameter Error" for this function: */
231*4882a593Smuzhiyun if (ret == -3)
232*4882a593Smuzhiyun return -EINVAL;
233*4882a593Smuzhiyun else
234*4882a593Smuzhiyun return -EIO;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /*
238*4882a593Smuzhiyun * Buffer for reporting slot-error-detail rtas calls. Its here
239*4882a593Smuzhiyun * in BSS, and not dynamically alloced, so that it ends up in
240*4882a593Smuzhiyun * RMO where RTAS can access it.
241*4882a593Smuzhiyun */
242*4882a593Smuzhiyun static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
243*4882a593Smuzhiyun static DEFINE_SPINLOCK(slot_errbuf_lock);
244*4882a593Smuzhiyun static int eeh_error_buf_size;
245*4882a593Smuzhiyun
pseries_eeh_cap_start(struct pci_dn * pdn)246*4882a593Smuzhiyun static int pseries_eeh_cap_start(struct pci_dn *pdn)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun u32 status;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun if (!pdn)
251*4882a593Smuzhiyun return 0;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun rtas_read_config(pdn, PCI_STATUS, 2, &status);
254*4882a593Smuzhiyun if (!(status & PCI_STATUS_CAP_LIST))
255*4882a593Smuzhiyun return 0;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun return PCI_CAPABILITY_LIST;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun
pseries_eeh_find_cap(struct pci_dn * pdn,int cap)261*4882a593Smuzhiyun static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun int pos = pseries_eeh_cap_start(pdn);
264*4882a593Smuzhiyun int cnt = 48; /* Maximal number of capabilities */
265*4882a593Smuzhiyun u32 id;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun if (!pos)
268*4882a593Smuzhiyun return 0;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun while (cnt--) {
271*4882a593Smuzhiyun rtas_read_config(pdn, pos, 1, &pos);
272*4882a593Smuzhiyun if (pos < 0x40)
273*4882a593Smuzhiyun break;
274*4882a593Smuzhiyun pos &= ~3;
275*4882a593Smuzhiyun rtas_read_config(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
276*4882a593Smuzhiyun if (id == 0xff)
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun if (id == cap)
279*4882a593Smuzhiyun return pos;
280*4882a593Smuzhiyun pos += PCI_CAP_LIST_NEXT;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return 0;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
pseries_eeh_find_ecap(struct pci_dn * pdn,int cap)286*4882a593Smuzhiyun static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
289*4882a593Smuzhiyun u32 header;
290*4882a593Smuzhiyun int pos = 256;
291*4882a593Smuzhiyun int ttl = (4096 - 256) / 8;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (!edev || !edev->pcie_cap)
294*4882a593Smuzhiyun return 0;
295*4882a593Smuzhiyun if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
296*4882a593Smuzhiyun return 0;
297*4882a593Smuzhiyun else if (!header)
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun while (ttl-- > 0) {
301*4882a593Smuzhiyun if (PCI_EXT_CAP_ID(header) == cap && pos)
302*4882a593Smuzhiyun return pos;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun pos = PCI_EXT_CAP_NEXT(header);
305*4882a593Smuzhiyun if (pos < 256)
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (rtas_read_config(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
309*4882a593Smuzhiyun break;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun return 0;
313*4882a593Smuzhiyun }
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun /**
316*4882a593Smuzhiyun * pseries_eeh_pe_get_parent - Retrieve the parent PE
317*4882a593Smuzhiyun * @edev: EEH device
318*4882a593Smuzhiyun *
319*4882a593Smuzhiyun * The whole PEs existing in the system are organized as hierarchy
320*4882a593Smuzhiyun * tree. The function is used to retrieve the parent PE according
321*4882a593Smuzhiyun * to the parent EEH device.
322*4882a593Smuzhiyun */
pseries_eeh_pe_get_parent(struct eeh_dev * edev)323*4882a593Smuzhiyun static struct eeh_pe *pseries_eeh_pe_get_parent(struct eeh_dev *edev)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct eeh_dev *parent;
326*4882a593Smuzhiyun struct pci_dn *pdn = eeh_dev_to_pdn(edev);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun /*
329*4882a593Smuzhiyun * It might have the case for the indirect parent
330*4882a593Smuzhiyun * EEH device already having associated PE, but
331*4882a593Smuzhiyun * the direct parent EEH device doesn't have yet.
332*4882a593Smuzhiyun */
333*4882a593Smuzhiyun if (edev->physfn)
334*4882a593Smuzhiyun pdn = pci_get_pdn(edev->physfn);
335*4882a593Smuzhiyun else
336*4882a593Smuzhiyun pdn = pdn ? pdn->parent : NULL;
337*4882a593Smuzhiyun while (pdn) {
338*4882a593Smuzhiyun /* We're poking out of PCI territory */
339*4882a593Smuzhiyun parent = pdn_to_eeh_dev(pdn);
340*4882a593Smuzhiyun if (!parent)
341*4882a593Smuzhiyun return NULL;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun if (parent->pe)
344*4882a593Smuzhiyun return parent->pe;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun pdn = pdn->parent;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun return NULL;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun /**
353*4882a593Smuzhiyun * pseries_eeh_init_edev - initialise the eeh_dev and eeh_pe for a pci_dn
354*4882a593Smuzhiyun *
355*4882a593Smuzhiyun * @pdn: PCI device node
356*4882a593Smuzhiyun *
357*4882a593Smuzhiyun * When we discover a new PCI device via the device-tree we create a
358*4882a593Smuzhiyun * corresponding pci_dn and we allocate, but don't initialise, an eeh_dev.
359*4882a593Smuzhiyun * This function takes care of the initialisation and inserts the eeh_dev
360*4882a593Smuzhiyun * into the correct eeh_pe. If no eeh_pe exists we'll allocate one.
361*4882a593Smuzhiyun */
pseries_eeh_init_edev(struct pci_dn * pdn)362*4882a593Smuzhiyun void pseries_eeh_init_edev(struct pci_dn *pdn)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun struct eeh_pe pe, *parent;
365*4882a593Smuzhiyun struct eeh_dev *edev;
366*4882a593Smuzhiyun u32 pcie_flags;
367*4882a593Smuzhiyun int ret;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun if (WARN_ON_ONCE(!eeh_has_flag(EEH_PROBE_MODE_DEVTREE)))
370*4882a593Smuzhiyun return;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun * Find the eeh_dev for this pdn. The storage for the eeh_dev was
374*4882a593Smuzhiyun * allocated at the same time as the pci_dn.
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * XXX: We should probably re-visit that.
377*4882a593Smuzhiyun */
378*4882a593Smuzhiyun edev = pdn_to_eeh_dev(pdn);
379*4882a593Smuzhiyun if (!edev)
380*4882a593Smuzhiyun return;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /*
383*4882a593Smuzhiyun * If ->pe is set then we've already probed this device. We hit
384*4882a593Smuzhiyun * this path when a pci_dev is removed and rescanned while recovering
385*4882a593Smuzhiyun * a PE (i.e. for devices where the driver doesn't support error
386*4882a593Smuzhiyun * recovery).
387*4882a593Smuzhiyun */
388*4882a593Smuzhiyun if (edev->pe)
389*4882a593Smuzhiyun return;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* Check class/vendor/device IDs */
392*4882a593Smuzhiyun if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
393*4882a593Smuzhiyun return;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun /* Skip for PCI-ISA bridge */
396*4882a593Smuzhiyun if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
397*4882a593Smuzhiyun return;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun eeh_edev_dbg(edev, "Probing device\n");
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * Update class code and mode of eeh device. We need
403*4882a593Smuzhiyun * correctly reflects that current device is root port
404*4882a593Smuzhiyun * or PCIe switch downstream port.
405*4882a593Smuzhiyun */
406*4882a593Smuzhiyun edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
407*4882a593Smuzhiyun edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
408*4882a593Smuzhiyun edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
409*4882a593Smuzhiyun edev->mode &= 0xFFFFFF00;
410*4882a593Smuzhiyun if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
411*4882a593Smuzhiyun edev->mode |= EEH_DEV_BRIDGE;
412*4882a593Smuzhiyun if (edev->pcie_cap) {
413*4882a593Smuzhiyun rtas_read_config(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
414*4882a593Smuzhiyun 2, &pcie_flags);
415*4882a593Smuzhiyun pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
416*4882a593Smuzhiyun if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
417*4882a593Smuzhiyun edev->mode |= EEH_DEV_ROOT_PORT;
418*4882a593Smuzhiyun else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
419*4882a593Smuzhiyun edev->mode |= EEH_DEV_DS_PORT;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* first up, find the pe_config_addr for the PE containing the device */
424*4882a593Smuzhiyun ret = pseries_eeh_get_pe_config_addr(pdn);
425*4882a593Smuzhiyun if (ret < 0) {
426*4882a593Smuzhiyun eeh_edev_dbg(edev, "Unable to find pe_config_addr\n");
427*4882a593Smuzhiyun goto err;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* Try enable EEH on the fake PE */
431*4882a593Smuzhiyun memset(&pe, 0, sizeof(struct eeh_pe));
432*4882a593Smuzhiyun pe.phb = pdn->phb;
433*4882a593Smuzhiyun pe.addr = ret;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun eeh_edev_dbg(edev, "Enabling EEH on device\n");
436*4882a593Smuzhiyun ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
437*4882a593Smuzhiyun if (ret) {
438*4882a593Smuzhiyun eeh_edev_dbg(edev, "EEH failed to enable on device (code %d)\n", ret);
439*4882a593Smuzhiyun goto err;
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun edev->pe_config_addr = pe.addr;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun eeh_add_flag(EEH_ENABLED);
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun parent = pseries_eeh_pe_get_parent(edev);
447*4882a593Smuzhiyun eeh_pe_tree_insert(edev, parent);
448*4882a593Smuzhiyun eeh_save_bars(edev);
449*4882a593Smuzhiyun eeh_edev_dbg(edev, "EEH enabled for device");
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun return;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun err:
454*4882a593Smuzhiyun eeh_edev_dbg(edev, "EEH is unsupported on device (code = %d)\n", ret);
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
pseries_eeh_probe(struct pci_dev * pdev)457*4882a593Smuzhiyun static struct eeh_dev *pseries_eeh_probe(struct pci_dev *pdev)
458*4882a593Smuzhiyun {
459*4882a593Smuzhiyun struct eeh_dev *edev;
460*4882a593Smuzhiyun struct pci_dn *pdn;
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun pdn = pci_get_pdn_by_devfn(pdev->bus, pdev->devfn);
463*4882a593Smuzhiyun if (!pdn)
464*4882a593Smuzhiyun return NULL;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /*
467*4882a593Smuzhiyun * If the system supports EEH on this device then the eeh_dev was
468*4882a593Smuzhiyun * configured and inserted into a PE in pseries_eeh_init_edev()
469*4882a593Smuzhiyun */
470*4882a593Smuzhiyun edev = pdn_to_eeh_dev(pdn);
471*4882a593Smuzhiyun if (!edev || !edev->pe)
472*4882a593Smuzhiyun return NULL;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun return edev;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /**
478*4882a593Smuzhiyun * pseries_eeh_init_edev_recursive - Enable EEH for the indicated device
479*4882a593Smuzhiyun * @pdn: PCI device node
480*4882a593Smuzhiyun *
481*4882a593Smuzhiyun * This routine must be used to perform EEH initialization for the
482*4882a593Smuzhiyun * indicated PCI device that was added after system boot (e.g.
483*4882a593Smuzhiyun * hotplug, dlpar).
484*4882a593Smuzhiyun */
pseries_eeh_init_edev_recursive(struct pci_dn * pdn)485*4882a593Smuzhiyun void pseries_eeh_init_edev_recursive(struct pci_dn *pdn)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun struct pci_dn *n;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!pdn)
490*4882a593Smuzhiyun return;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun list_for_each_entry(n, &pdn->child_list, list)
493*4882a593Smuzhiyun pseries_eeh_init_edev_recursive(n);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun pseries_eeh_init_edev(pdn);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(pseries_eeh_init_edev_recursive);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
501*4882a593Smuzhiyun * @pe: EEH PE
502*4882a593Smuzhiyun * @option: operation to be issued
503*4882a593Smuzhiyun *
504*4882a593Smuzhiyun * The function is used to control the EEH functionality globally.
505*4882a593Smuzhiyun * Currently, following options are support according to PAPR:
506*4882a593Smuzhiyun * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
507*4882a593Smuzhiyun */
pseries_eeh_set_option(struct eeh_pe * pe,int option)508*4882a593Smuzhiyun static int pseries_eeh_set_option(struct eeh_pe *pe, int option)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun int ret = 0;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * When we're enabling or disabling EEH functioality on
514*4882a593Smuzhiyun * the particular PE, the PE config address is possibly
515*4882a593Smuzhiyun * unavailable. Therefore, we have to figure it out from
516*4882a593Smuzhiyun * the FDT node.
517*4882a593Smuzhiyun */
518*4882a593Smuzhiyun switch (option) {
519*4882a593Smuzhiyun case EEH_OPT_DISABLE:
520*4882a593Smuzhiyun case EEH_OPT_ENABLE:
521*4882a593Smuzhiyun case EEH_OPT_THAW_MMIO:
522*4882a593Smuzhiyun case EEH_OPT_THAW_DMA:
523*4882a593Smuzhiyun break;
524*4882a593Smuzhiyun case EEH_OPT_FREEZE_PE:
525*4882a593Smuzhiyun /* Not support */
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun default:
528*4882a593Smuzhiyun pr_err("%s: Invalid option %d\n", __func__, option);
529*4882a593Smuzhiyun return -EINVAL;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
533*4882a593Smuzhiyun pe->addr, BUID_HI(pe->phb->buid),
534*4882a593Smuzhiyun BUID_LO(pe->phb->buid), option);
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun return ret;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun /**
540*4882a593Smuzhiyun * pseries_eeh_get_state - Retrieve PE state
541*4882a593Smuzhiyun * @pe: EEH PE
542*4882a593Smuzhiyun * @delay: suggested time to wait if state is unavailable
543*4882a593Smuzhiyun *
544*4882a593Smuzhiyun * Retrieve the state of the specified PE. On RTAS compliant
545*4882a593Smuzhiyun * pseries platform, there already has one dedicated RTAS function
546*4882a593Smuzhiyun * for the purpose. It's notable that the associated PE config address
547*4882a593Smuzhiyun * might be ready when calling the function. Therefore, endeavour to
548*4882a593Smuzhiyun * use the PE config address if possible. Further more, there're 2
549*4882a593Smuzhiyun * RTAS calls for the purpose, we need to try the new one and back
550*4882a593Smuzhiyun * to the old one if the new one couldn't work properly.
551*4882a593Smuzhiyun */
pseries_eeh_get_state(struct eeh_pe * pe,int * delay)552*4882a593Smuzhiyun static int pseries_eeh_get_state(struct eeh_pe *pe, int *delay)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun int ret;
555*4882a593Smuzhiyun int rets[4];
556*4882a593Smuzhiyun int result;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
559*4882a593Smuzhiyun ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
560*4882a593Smuzhiyun pe->addr, BUID_HI(pe->phb->buid),
561*4882a593Smuzhiyun BUID_LO(pe->phb->buid));
562*4882a593Smuzhiyun } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
563*4882a593Smuzhiyun /* Fake PE unavailable info */
564*4882a593Smuzhiyun rets[2] = 0;
565*4882a593Smuzhiyun ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
566*4882a593Smuzhiyun pe->addr, BUID_HI(pe->phb->buid),
567*4882a593Smuzhiyun BUID_LO(pe->phb->buid));
568*4882a593Smuzhiyun } else {
569*4882a593Smuzhiyun return EEH_STATE_NOT_SUPPORT;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (ret)
573*4882a593Smuzhiyun return ret;
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun /* Parse the result out */
576*4882a593Smuzhiyun if (!rets[1])
577*4882a593Smuzhiyun return EEH_STATE_NOT_SUPPORT;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun switch(rets[0]) {
580*4882a593Smuzhiyun case 0:
581*4882a593Smuzhiyun result = EEH_STATE_MMIO_ACTIVE |
582*4882a593Smuzhiyun EEH_STATE_DMA_ACTIVE;
583*4882a593Smuzhiyun break;
584*4882a593Smuzhiyun case 1:
585*4882a593Smuzhiyun result = EEH_STATE_RESET_ACTIVE |
586*4882a593Smuzhiyun EEH_STATE_MMIO_ACTIVE |
587*4882a593Smuzhiyun EEH_STATE_DMA_ACTIVE;
588*4882a593Smuzhiyun break;
589*4882a593Smuzhiyun case 2:
590*4882a593Smuzhiyun result = 0;
591*4882a593Smuzhiyun break;
592*4882a593Smuzhiyun case 4:
593*4882a593Smuzhiyun result = EEH_STATE_MMIO_ENABLED;
594*4882a593Smuzhiyun break;
595*4882a593Smuzhiyun case 5:
596*4882a593Smuzhiyun if (rets[2]) {
597*4882a593Smuzhiyun if (delay)
598*4882a593Smuzhiyun *delay = rets[2];
599*4882a593Smuzhiyun result = EEH_STATE_UNAVAILABLE;
600*4882a593Smuzhiyun } else {
601*4882a593Smuzhiyun result = EEH_STATE_NOT_SUPPORT;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun break;
604*4882a593Smuzhiyun default:
605*4882a593Smuzhiyun result = EEH_STATE_NOT_SUPPORT;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun return result;
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /**
612*4882a593Smuzhiyun * pseries_eeh_reset - Reset the specified PE
613*4882a593Smuzhiyun * @pe: EEH PE
614*4882a593Smuzhiyun * @option: reset option
615*4882a593Smuzhiyun *
616*4882a593Smuzhiyun * Reset the specified PE
617*4882a593Smuzhiyun */
pseries_eeh_reset(struct eeh_pe * pe,int option)618*4882a593Smuzhiyun static int pseries_eeh_reset(struct eeh_pe *pe, int option)
619*4882a593Smuzhiyun {
620*4882a593Smuzhiyun return pseries_eeh_phb_reset(pe->phb, pe->addr, option);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun /**
624*4882a593Smuzhiyun * pseries_eeh_get_log - Retrieve error log
625*4882a593Smuzhiyun * @pe: EEH PE
626*4882a593Smuzhiyun * @severity: temporary or permanent error log
627*4882a593Smuzhiyun * @drv_log: driver log to be combined with retrieved error log
628*4882a593Smuzhiyun * @len: length of driver log
629*4882a593Smuzhiyun *
630*4882a593Smuzhiyun * Retrieve the temporary or permanent error from the PE.
631*4882a593Smuzhiyun * Actually, the error will be retrieved through the dedicated
632*4882a593Smuzhiyun * RTAS call.
633*4882a593Smuzhiyun */
pseries_eeh_get_log(struct eeh_pe * pe,int severity,char * drv_log,unsigned long len)634*4882a593Smuzhiyun static int pseries_eeh_get_log(struct eeh_pe *pe, int severity, char *drv_log, unsigned long len)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun unsigned long flags;
637*4882a593Smuzhiyun int ret;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun spin_lock_irqsave(&slot_errbuf_lock, flags);
640*4882a593Smuzhiyun memset(slot_errbuf, 0, eeh_error_buf_size);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, pe->addr,
643*4882a593Smuzhiyun BUID_HI(pe->phb->buid), BUID_LO(pe->phb->buid),
644*4882a593Smuzhiyun virt_to_phys(drv_log), len,
645*4882a593Smuzhiyun virt_to_phys(slot_errbuf), eeh_error_buf_size,
646*4882a593Smuzhiyun severity);
647*4882a593Smuzhiyun if (!ret)
648*4882a593Smuzhiyun log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
649*4882a593Smuzhiyun spin_unlock_irqrestore(&slot_errbuf_lock, flags);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun return ret;
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
654*4882a593Smuzhiyun /**
655*4882a593Smuzhiyun * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
656*4882a593Smuzhiyun * @pe: EEH PE
657*4882a593Smuzhiyun *
658*4882a593Smuzhiyun */
pseries_eeh_configure_bridge(struct eeh_pe * pe)659*4882a593Smuzhiyun static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
660*4882a593Smuzhiyun {
661*4882a593Smuzhiyun return pseries_eeh_phb_configure_bridge(pe->phb, pe->addr);
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun /**
665*4882a593Smuzhiyun * pseries_eeh_read_config - Read PCI config space
666*4882a593Smuzhiyun * @edev: EEH device handle
667*4882a593Smuzhiyun * @where: PCI config space offset
668*4882a593Smuzhiyun * @size: size to read
669*4882a593Smuzhiyun * @val: return value
670*4882a593Smuzhiyun *
671*4882a593Smuzhiyun * Read config space from the speicifed device
672*4882a593Smuzhiyun */
pseries_eeh_read_config(struct eeh_dev * edev,int where,int size,u32 * val)673*4882a593Smuzhiyun static int pseries_eeh_read_config(struct eeh_dev *edev, int where, int size, u32 *val)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun struct pci_dn *pdn = eeh_dev_to_pdn(edev);
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun return rtas_read_config(pdn, where, size, val);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /**
681*4882a593Smuzhiyun * pseries_eeh_write_config - Write PCI config space
682*4882a593Smuzhiyun * @edev: EEH device handle
683*4882a593Smuzhiyun * @where: PCI config space offset
684*4882a593Smuzhiyun * @size: size to write
685*4882a593Smuzhiyun * @val: value to be written
686*4882a593Smuzhiyun *
687*4882a593Smuzhiyun * Write config space to the specified device
688*4882a593Smuzhiyun */
pseries_eeh_write_config(struct eeh_dev * edev,int where,int size,u32 val)689*4882a593Smuzhiyun static int pseries_eeh_write_config(struct eeh_dev *edev, int where, int size, u32 val)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun struct pci_dn *pdn = eeh_dev_to_pdn(edev);
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun return rtas_write_config(pdn, where, size, val);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
pseries_send_allow_unfreeze(struct pci_dn * pdn,u16 * vf_pe_array,int cur_vfs)697*4882a593Smuzhiyun int pseries_send_allow_unfreeze(struct pci_dn *pdn,
698*4882a593Smuzhiyun u16 *vf_pe_array, int cur_vfs)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun int rc;
701*4882a593Smuzhiyun int ibm_allow_unfreeze = rtas_token("ibm,open-sriov-allow-unfreeze");
702*4882a593Smuzhiyun unsigned long buid, addr;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun addr = rtas_config_addr(pdn->busno, pdn->devfn, 0);
705*4882a593Smuzhiyun buid = pdn->phb->buid;
706*4882a593Smuzhiyun spin_lock(&rtas_data_buf_lock);
707*4882a593Smuzhiyun memcpy(rtas_data_buf, vf_pe_array, RTAS_DATA_BUF_SIZE);
708*4882a593Smuzhiyun rc = rtas_call(ibm_allow_unfreeze, 5, 1, NULL,
709*4882a593Smuzhiyun addr,
710*4882a593Smuzhiyun BUID_HI(buid),
711*4882a593Smuzhiyun BUID_LO(buid),
712*4882a593Smuzhiyun rtas_data_buf, cur_vfs * sizeof(u16));
713*4882a593Smuzhiyun spin_unlock(&rtas_data_buf_lock);
714*4882a593Smuzhiyun if (rc)
715*4882a593Smuzhiyun pr_warn("%s: Failed to allow unfreeze for PHB#%x-PE#%lx, rc=%x\n",
716*4882a593Smuzhiyun __func__,
717*4882a593Smuzhiyun pdn->phb->global_number, addr, rc);
718*4882a593Smuzhiyun return rc;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
pseries_call_allow_unfreeze(struct eeh_dev * edev)721*4882a593Smuzhiyun static int pseries_call_allow_unfreeze(struct eeh_dev *edev)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun int cur_vfs = 0, rc = 0, vf_index, bus, devfn, vf_pe_num;
724*4882a593Smuzhiyun struct pci_dn *pdn, *tmp, *parent, *physfn_pdn;
725*4882a593Smuzhiyun u16 *vf_pe_array;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun vf_pe_array = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL);
728*4882a593Smuzhiyun if (!vf_pe_array)
729*4882a593Smuzhiyun return -ENOMEM;
730*4882a593Smuzhiyun if (pci_num_vf(edev->physfn ? edev->physfn : edev->pdev)) {
731*4882a593Smuzhiyun if (edev->pdev->is_physfn) {
732*4882a593Smuzhiyun cur_vfs = pci_num_vf(edev->pdev);
733*4882a593Smuzhiyun pdn = eeh_dev_to_pdn(edev);
734*4882a593Smuzhiyun parent = pdn->parent;
735*4882a593Smuzhiyun for (vf_index = 0; vf_index < cur_vfs; vf_index++)
736*4882a593Smuzhiyun vf_pe_array[vf_index] =
737*4882a593Smuzhiyun cpu_to_be16(pdn->pe_num_map[vf_index]);
738*4882a593Smuzhiyun rc = pseries_send_allow_unfreeze(pdn, vf_pe_array,
739*4882a593Smuzhiyun cur_vfs);
740*4882a593Smuzhiyun pdn->last_allow_rc = rc;
741*4882a593Smuzhiyun for (vf_index = 0; vf_index < cur_vfs; vf_index++) {
742*4882a593Smuzhiyun list_for_each_entry_safe(pdn, tmp,
743*4882a593Smuzhiyun &parent->child_list,
744*4882a593Smuzhiyun list) {
745*4882a593Smuzhiyun bus = pci_iov_virtfn_bus(edev->pdev,
746*4882a593Smuzhiyun vf_index);
747*4882a593Smuzhiyun devfn = pci_iov_virtfn_devfn(edev->pdev,
748*4882a593Smuzhiyun vf_index);
749*4882a593Smuzhiyun if (pdn->busno != bus ||
750*4882a593Smuzhiyun pdn->devfn != devfn)
751*4882a593Smuzhiyun continue;
752*4882a593Smuzhiyun pdn->last_allow_rc = rc;
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun } else {
756*4882a593Smuzhiyun pdn = pci_get_pdn(edev->pdev);
757*4882a593Smuzhiyun physfn_pdn = pci_get_pdn(edev->physfn);
758*4882a593Smuzhiyun
759*4882a593Smuzhiyun vf_pe_num = physfn_pdn->pe_num_map[edev->vf_index];
760*4882a593Smuzhiyun vf_pe_array[0] = cpu_to_be16(vf_pe_num);
761*4882a593Smuzhiyun rc = pseries_send_allow_unfreeze(physfn_pdn,
762*4882a593Smuzhiyun vf_pe_array, 1);
763*4882a593Smuzhiyun pdn->last_allow_rc = rc;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun kfree(vf_pe_array);
768*4882a593Smuzhiyun return rc;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
pseries_notify_resume(struct eeh_dev * edev)771*4882a593Smuzhiyun static int pseries_notify_resume(struct eeh_dev *edev)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun if (!edev)
774*4882a593Smuzhiyun return -EEXIST;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun if (rtas_token("ibm,open-sriov-allow-unfreeze") == RTAS_UNKNOWN_SERVICE)
777*4882a593Smuzhiyun return -EINVAL;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (edev->pdev->is_physfn || edev->pdev->is_virtfn)
780*4882a593Smuzhiyun return pseries_call_allow_unfreeze(edev);
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun return 0;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun #endif
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun static struct eeh_ops pseries_eeh_ops = {
787*4882a593Smuzhiyun .name = "pseries",
788*4882a593Smuzhiyun .probe = pseries_eeh_probe,
789*4882a593Smuzhiyun .set_option = pseries_eeh_set_option,
790*4882a593Smuzhiyun .get_state = pseries_eeh_get_state,
791*4882a593Smuzhiyun .reset = pseries_eeh_reset,
792*4882a593Smuzhiyun .get_log = pseries_eeh_get_log,
793*4882a593Smuzhiyun .configure_bridge = pseries_eeh_configure_bridge,
794*4882a593Smuzhiyun .err_inject = NULL,
795*4882a593Smuzhiyun .read_config = pseries_eeh_read_config,
796*4882a593Smuzhiyun .write_config = pseries_eeh_write_config,
797*4882a593Smuzhiyun .next_error = NULL,
798*4882a593Smuzhiyun .restore_config = NULL, /* NB: configure_bridge() does this */
799*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
800*4882a593Smuzhiyun .notify_resume = pseries_notify_resume
801*4882a593Smuzhiyun #endif
802*4882a593Smuzhiyun };
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /**
805*4882a593Smuzhiyun * eeh_pseries_init - Register platform dependent EEH operations
806*4882a593Smuzhiyun *
807*4882a593Smuzhiyun * EEH initialization on pseries platform. This function should be
808*4882a593Smuzhiyun * called before any EEH related functions.
809*4882a593Smuzhiyun */
eeh_pseries_init(void)810*4882a593Smuzhiyun static int __init eeh_pseries_init(void)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun struct pci_controller *phb;
813*4882a593Smuzhiyun struct pci_dn *pdn;
814*4882a593Smuzhiyun int ret, config_addr;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /* figure out EEH RTAS function call tokens */
817*4882a593Smuzhiyun ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
818*4882a593Smuzhiyun ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
819*4882a593Smuzhiyun ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
820*4882a593Smuzhiyun ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
821*4882a593Smuzhiyun ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
822*4882a593Smuzhiyun ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
823*4882a593Smuzhiyun ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
824*4882a593Smuzhiyun ibm_configure_pe = rtas_token("ibm,configure-pe");
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /*
827*4882a593Smuzhiyun * ibm,configure-pe and ibm,configure-bridge have the same semantics,
828*4882a593Smuzhiyun * however ibm,configure-pe can be faster. If we can't find
829*4882a593Smuzhiyun * ibm,configure-pe then fall back to using ibm,configure-bridge.
830*4882a593Smuzhiyun */
831*4882a593Smuzhiyun if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE)
832*4882a593Smuzhiyun ibm_configure_pe = rtas_token("ibm,configure-bridge");
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /*
835*4882a593Smuzhiyun * Necessary sanity check. We needn't check "get-config-addr-info"
836*4882a593Smuzhiyun * and its variant since the old firmware probably support address
837*4882a593Smuzhiyun * of domain/bus/slot/function for EEH RTAS operations.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE ||
840*4882a593Smuzhiyun ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE ||
841*4882a593Smuzhiyun (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
842*4882a593Smuzhiyun ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) ||
843*4882a593Smuzhiyun ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE ||
844*4882a593Smuzhiyun ibm_configure_pe == RTAS_UNKNOWN_SERVICE) {
845*4882a593Smuzhiyun pr_info("EEH functionality not supported\n");
846*4882a593Smuzhiyun return -EINVAL;
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun /* Initialize error log lock and size */
850*4882a593Smuzhiyun spin_lock_init(&slot_errbuf_lock);
851*4882a593Smuzhiyun eeh_error_buf_size = rtas_token("rtas-error-log-max");
852*4882a593Smuzhiyun if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
853*4882a593Smuzhiyun pr_info("%s: unknown EEH error log size\n",
854*4882a593Smuzhiyun __func__);
855*4882a593Smuzhiyun eeh_error_buf_size = 1024;
856*4882a593Smuzhiyun } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
857*4882a593Smuzhiyun pr_info("%s: EEH error log size %d exceeds the maximal %d\n",
858*4882a593Smuzhiyun __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
859*4882a593Smuzhiyun eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* Set EEH probe mode */
863*4882a593Smuzhiyun eeh_add_flag(EEH_PROBE_MODE_DEVTREE | EEH_ENABLE_IO_FOR_LOG);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun /* Set EEH machine dependent code */
866*4882a593Smuzhiyun ppc_md.pcibios_bus_add_device = pseries_pcibios_bus_add_device;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun if (is_kdump_kernel() || reset_devices) {
869*4882a593Smuzhiyun pr_info("Issue PHB reset ...\n");
870*4882a593Smuzhiyun list_for_each_entry(phb, &hose_list, list_node) {
871*4882a593Smuzhiyun // Skip if the slot is empty
872*4882a593Smuzhiyun if (list_empty(&PCI_DN(phb->dn)->child_list))
873*4882a593Smuzhiyun continue;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun pdn = list_first_entry(&PCI_DN(phb->dn)->child_list, struct pci_dn, list);
876*4882a593Smuzhiyun config_addr = pseries_eeh_get_pe_config_addr(pdn);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /* invalid PE config addr */
879*4882a593Smuzhiyun if (config_addr < 0)
880*4882a593Smuzhiyun continue;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_FUNDAMENTAL);
883*4882a593Smuzhiyun pseries_eeh_phb_reset(phb, config_addr, EEH_RESET_DEACTIVATE);
884*4882a593Smuzhiyun pseries_eeh_phb_configure_bridge(phb, config_addr);
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun ret = eeh_init(&pseries_eeh_ops);
889*4882a593Smuzhiyun if (!ret)
890*4882a593Smuzhiyun pr_info("EEH: pSeries platform initialized\n");
891*4882a593Smuzhiyun else
892*4882a593Smuzhiyun pr_info("EEH: pSeries platform initialization failure (%d)\n",
893*4882a593Smuzhiyun ret);
894*4882a593Smuzhiyun return ret;
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun machine_arch_initcall(pseries, eeh_pseries_init);
897