1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * PCIe AER software error injection support.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Debugging PCIe AER code is quite difficult because it is hard to
6*4882a593Smuzhiyun * trigger various real hardware errors. Software based error
7*4882a593Smuzhiyun * injection can fake almost all kinds of errors with the help of a
8*4882a593Smuzhiyun * user space helper tool aer-inject, which can be gotten from:
9*4882a593Smuzhiyun * https://www.kernel.org/pub/linux/utils/pci/aer-inject/
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Copyright 2009 Intel Corporation.
12*4882a593Smuzhiyun * Huang Ying <ying.huang@intel.com>
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define dev_fmt(fmt) "aer_inject: " fmt
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/module.h>
18*4882a593Smuzhiyun #include <linux/init.h>
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun #include <linux/miscdevice.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/slab.h>
23*4882a593Smuzhiyun #include <linux/fs.h>
24*4882a593Smuzhiyun #include <linux/uaccess.h>
25*4882a593Smuzhiyun #include <linux/stddef.h>
26*4882a593Smuzhiyun #include <linux/device.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "portdrv.h"
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* Override the existing corrected and uncorrected error masks */
31*4882a593Smuzhiyun static bool aer_mask_override;
32*4882a593Smuzhiyun module_param(aer_mask_override, bool, 0);
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun struct aer_error_inj {
35*4882a593Smuzhiyun u8 bus;
36*4882a593Smuzhiyun u8 dev;
37*4882a593Smuzhiyun u8 fn;
38*4882a593Smuzhiyun u32 uncor_status;
39*4882a593Smuzhiyun u32 cor_status;
40*4882a593Smuzhiyun u32 header_log0;
41*4882a593Smuzhiyun u32 header_log1;
42*4882a593Smuzhiyun u32 header_log2;
43*4882a593Smuzhiyun u32 header_log3;
44*4882a593Smuzhiyun u32 domain;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct aer_error {
48*4882a593Smuzhiyun struct list_head list;
49*4882a593Smuzhiyun u32 domain;
50*4882a593Smuzhiyun unsigned int bus;
51*4882a593Smuzhiyun unsigned int devfn;
52*4882a593Smuzhiyun int pos_cap_err;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun u32 uncor_status;
55*4882a593Smuzhiyun u32 cor_status;
56*4882a593Smuzhiyun u32 header_log0;
57*4882a593Smuzhiyun u32 header_log1;
58*4882a593Smuzhiyun u32 header_log2;
59*4882a593Smuzhiyun u32 header_log3;
60*4882a593Smuzhiyun u32 root_status;
61*4882a593Smuzhiyun u32 source_id;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun struct pci_bus_ops {
65*4882a593Smuzhiyun struct list_head list;
66*4882a593Smuzhiyun struct pci_bus *bus;
67*4882a593Smuzhiyun struct pci_ops *ops;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun static LIST_HEAD(einjected);
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static LIST_HEAD(pci_bus_ops_list);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Protect einjected and pci_bus_ops_list */
75*4882a593Smuzhiyun static DEFINE_SPINLOCK(inject_lock);
76*4882a593Smuzhiyun
aer_error_init(struct aer_error * err,u32 domain,unsigned int bus,unsigned int devfn,int pos_cap_err)77*4882a593Smuzhiyun static void aer_error_init(struct aer_error *err, u32 domain,
78*4882a593Smuzhiyun unsigned int bus, unsigned int devfn,
79*4882a593Smuzhiyun int pos_cap_err)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun INIT_LIST_HEAD(&err->list);
82*4882a593Smuzhiyun err->domain = domain;
83*4882a593Smuzhiyun err->bus = bus;
84*4882a593Smuzhiyun err->devfn = devfn;
85*4882a593Smuzhiyun err->pos_cap_err = pos_cap_err;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* inject_lock must be held before calling */
__find_aer_error(u32 domain,unsigned int bus,unsigned int devfn)89*4882a593Smuzhiyun static struct aer_error *__find_aer_error(u32 domain, unsigned int bus,
90*4882a593Smuzhiyun unsigned int devfn)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun struct aer_error *err;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun list_for_each_entry(err, &einjected, list) {
95*4882a593Smuzhiyun if (domain == err->domain &&
96*4882a593Smuzhiyun bus == err->bus &&
97*4882a593Smuzhiyun devfn == err->devfn)
98*4882a593Smuzhiyun return err;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun return NULL;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* inject_lock must be held before calling */
__find_aer_error_by_dev(struct pci_dev * dev)104*4882a593Smuzhiyun static struct aer_error *__find_aer_error_by_dev(struct pci_dev *dev)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun int domain = pci_domain_nr(dev->bus);
107*4882a593Smuzhiyun if (domain < 0)
108*4882a593Smuzhiyun return NULL;
109*4882a593Smuzhiyun return __find_aer_error(domain, dev->bus->number, dev->devfn);
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /* inject_lock must be held before calling */
__find_pci_bus_ops(struct pci_bus * bus)113*4882a593Smuzhiyun static struct pci_ops *__find_pci_bus_ops(struct pci_bus *bus)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun struct pci_bus_ops *bus_ops;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun list_for_each_entry(bus_ops, &pci_bus_ops_list, list) {
118*4882a593Smuzhiyun if (bus_ops->bus == bus)
119*4882a593Smuzhiyun return bus_ops->ops;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun return NULL;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
pci_bus_ops_pop(void)124*4882a593Smuzhiyun static struct pci_bus_ops *pci_bus_ops_pop(void)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun unsigned long flags;
127*4882a593Smuzhiyun struct pci_bus_ops *bus_ops;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun spin_lock_irqsave(&inject_lock, flags);
130*4882a593Smuzhiyun bus_ops = list_first_entry_or_null(&pci_bus_ops_list,
131*4882a593Smuzhiyun struct pci_bus_ops, list);
132*4882a593Smuzhiyun if (bus_ops)
133*4882a593Smuzhiyun list_del(&bus_ops->list);
134*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
135*4882a593Smuzhiyun return bus_ops;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
find_pci_config_dword(struct aer_error * err,int where,int * prw1cs)138*4882a593Smuzhiyun static u32 *find_pci_config_dword(struct aer_error *err, int where,
139*4882a593Smuzhiyun int *prw1cs)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun int rw1cs = 0;
142*4882a593Smuzhiyun u32 *target = NULL;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (err->pos_cap_err == -1)
145*4882a593Smuzhiyun return NULL;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun switch (where - err->pos_cap_err) {
148*4882a593Smuzhiyun case PCI_ERR_UNCOR_STATUS:
149*4882a593Smuzhiyun target = &err->uncor_status;
150*4882a593Smuzhiyun rw1cs = 1;
151*4882a593Smuzhiyun break;
152*4882a593Smuzhiyun case PCI_ERR_COR_STATUS:
153*4882a593Smuzhiyun target = &err->cor_status;
154*4882a593Smuzhiyun rw1cs = 1;
155*4882a593Smuzhiyun break;
156*4882a593Smuzhiyun case PCI_ERR_HEADER_LOG:
157*4882a593Smuzhiyun target = &err->header_log0;
158*4882a593Smuzhiyun break;
159*4882a593Smuzhiyun case PCI_ERR_HEADER_LOG+4:
160*4882a593Smuzhiyun target = &err->header_log1;
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun case PCI_ERR_HEADER_LOG+8:
163*4882a593Smuzhiyun target = &err->header_log2;
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun case PCI_ERR_HEADER_LOG+12:
166*4882a593Smuzhiyun target = &err->header_log3;
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun case PCI_ERR_ROOT_STATUS:
169*4882a593Smuzhiyun target = &err->root_status;
170*4882a593Smuzhiyun rw1cs = 1;
171*4882a593Smuzhiyun break;
172*4882a593Smuzhiyun case PCI_ERR_ROOT_ERR_SRC:
173*4882a593Smuzhiyun target = &err->source_id;
174*4882a593Smuzhiyun break;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun if (prw1cs)
177*4882a593Smuzhiyun *prw1cs = rw1cs;
178*4882a593Smuzhiyun return target;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
aer_inj_read(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)181*4882a593Smuzhiyun static int aer_inj_read(struct pci_bus *bus, unsigned int devfn, int where,
182*4882a593Smuzhiyun int size, u32 *val)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun struct pci_ops *ops, *my_ops;
185*4882a593Smuzhiyun int rv;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun ops = __find_pci_bus_ops(bus);
188*4882a593Smuzhiyun if (!ops)
189*4882a593Smuzhiyun return -1;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun my_ops = bus->ops;
192*4882a593Smuzhiyun bus->ops = ops;
193*4882a593Smuzhiyun rv = ops->read(bus, devfn, where, size, val);
194*4882a593Smuzhiyun bus->ops = my_ops;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun return rv;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
aer_inj_write(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)199*4882a593Smuzhiyun static int aer_inj_write(struct pci_bus *bus, unsigned int devfn, int where,
200*4882a593Smuzhiyun int size, u32 val)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct pci_ops *ops, *my_ops;
203*4882a593Smuzhiyun int rv;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun ops = __find_pci_bus_ops(bus);
206*4882a593Smuzhiyun if (!ops)
207*4882a593Smuzhiyun return -1;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun my_ops = bus->ops;
210*4882a593Smuzhiyun bus->ops = ops;
211*4882a593Smuzhiyun rv = ops->write(bus, devfn, where, size, val);
212*4882a593Smuzhiyun bus->ops = my_ops;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return rv;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
aer_inj_read_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 * val)217*4882a593Smuzhiyun static int aer_inj_read_config(struct pci_bus *bus, unsigned int devfn,
218*4882a593Smuzhiyun int where, int size, u32 *val)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun u32 *sim;
221*4882a593Smuzhiyun struct aer_error *err;
222*4882a593Smuzhiyun unsigned long flags;
223*4882a593Smuzhiyun int domain;
224*4882a593Smuzhiyun int rv;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun spin_lock_irqsave(&inject_lock, flags);
227*4882a593Smuzhiyun if (size != sizeof(u32))
228*4882a593Smuzhiyun goto out;
229*4882a593Smuzhiyun domain = pci_domain_nr(bus);
230*4882a593Smuzhiyun if (domain < 0)
231*4882a593Smuzhiyun goto out;
232*4882a593Smuzhiyun err = __find_aer_error(domain, bus->number, devfn);
233*4882a593Smuzhiyun if (!err)
234*4882a593Smuzhiyun goto out;
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun sim = find_pci_config_dword(err, where, NULL);
237*4882a593Smuzhiyun if (sim) {
238*4882a593Smuzhiyun *val = *sim;
239*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
240*4882a593Smuzhiyun return 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun out:
243*4882a593Smuzhiyun rv = aer_inj_read(bus, devfn, where, size, val);
244*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
245*4882a593Smuzhiyun return rv;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
aer_inj_write_config(struct pci_bus * bus,unsigned int devfn,int where,int size,u32 val)248*4882a593Smuzhiyun static int aer_inj_write_config(struct pci_bus *bus, unsigned int devfn,
249*4882a593Smuzhiyun int where, int size, u32 val)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun u32 *sim;
252*4882a593Smuzhiyun struct aer_error *err;
253*4882a593Smuzhiyun unsigned long flags;
254*4882a593Smuzhiyun int rw1cs;
255*4882a593Smuzhiyun int domain;
256*4882a593Smuzhiyun int rv;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun spin_lock_irqsave(&inject_lock, flags);
259*4882a593Smuzhiyun if (size != sizeof(u32))
260*4882a593Smuzhiyun goto out;
261*4882a593Smuzhiyun domain = pci_domain_nr(bus);
262*4882a593Smuzhiyun if (domain < 0)
263*4882a593Smuzhiyun goto out;
264*4882a593Smuzhiyun err = __find_aer_error(domain, bus->number, devfn);
265*4882a593Smuzhiyun if (!err)
266*4882a593Smuzhiyun goto out;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun sim = find_pci_config_dword(err, where, &rw1cs);
269*4882a593Smuzhiyun if (sim) {
270*4882a593Smuzhiyun if (rw1cs)
271*4882a593Smuzhiyun *sim ^= val;
272*4882a593Smuzhiyun else
273*4882a593Smuzhiyun *sim = val;
274*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
275*4882a593Smuzhiyun return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun out:
278*4882a593Smuzhiyun rv = aer_inj_write(bus, devfn, where, size, val);
279*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
280*4882a593Smuzhiyun return rv;
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun static struct pci_ops aer_inj_pci_ops = {
284*4882a593Smuzhiyun .read = aer_inj_read_config,
285*4882a593Smuzhiyun .write = aer_inj_write_config,
286*4882a593Smuzhiyun };
287*4882a593Smuzhiyun
pci_bus_ops_init(struct pci_bus_ops * bus_ops,struct pci_bus * bus,struct pci_ops * ops)288*4882a593Smuzhiyun static void pci_bus_ops_init(struct pci_bus_ops *bus_ops,
289*4882a593Smuzhiyun struct pci_bus *bus,
290*4882a593Smuzhiyun struct pci_ops *ops)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun INIT_LIST_HEAD(&bus_ops->list);
293*4882a593Smuzhiyun bus_ops->bus = bus;
294*4882a593Smuzhiyun bus_ops->ops = ops;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
pci_bus_set_aer_ops(struct pci_bus * bus)297*4882a593Smuzhiyun static int pci_bus_set_aer_ops(struct pci_bus *bus)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun struct pci_ops *ops;
300*4882a593Smuzhiyun struct pci_bus_ops *bus_ops;
301*4882a593Smuzhiyun unsigned long flags;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun bus_ops = kmalloc(sizeof(*bus_ops), GFP_KERNEL);
304*4882a593Smuzhiyun if (!bus_ops)
305*4882a593Smuzhiyun return -ENOMEM;
306*4882a593Smuzhiyun ops = pci_bus_set_ops(bus, &aer_inj_pci_ops);
307*4882a593Smuzhiyun spin_lock_irqsave(&inject_lock, flags);
308*4882a593Smuzhiyun if (ops == &aer_inj_pci_ops)
309*4882a593Smuzhiyun goto out;
310*4882a593Smuzhiyun pci_bus_ops_init(bus_ops, bus, ops);
311*4882a593Smuzhiyun list_add(&bus_ops->list, &pci_bus_ops_list);
312*4882a593Smuzhiyun bus_ops = NULL;
313*4882a593Smuzhiyun out:
314*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
315*4882a593Smuzhiyun kfree(bus_ops);
316*4882a593Smuzhiyun return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
aer_inject(struct aer_error_inj * einj)319*4882a593Smuzhiyun static int aer_inject(struct aer_error_inj *einj)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct aer_error *err, *rperr;
322*4882a593Smuzhiyun struct aer_error *err_alloc = NULL, *rperr_alloc = NULL;
323*4882a593Smuzhiyun struct pci_dev *dev, *rpdev;
324*4882a593Smuzhiyun struct pcie_device *edev;
325*4882a593Smuzhiyun struct device *device;
326*4882a593Smuzhiyun unsigned long flags;
327*4882a593Smuzhiyun unsigned int devfn = PCI_DEVFN(einj->dev, einj->fn);
328*4882a593Smuzhiyun int pos_cap_err, rp_pos_cap_err;
329*4882a593Smuzhiyun u32 sever, cor_mask, uncor_mask, cor_mask_orig = 0, uncor_mask_orig = 0;
330*4882a593Smuzhiyun int ret = 0;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun dev = pci_get_domain_bus_and_slot(einj->domain, einj->bus, devfn);
333*4882a593Smuzhiyun if (!dev)
334*4882a593Smuzhiyun return -ENODEV;
335*4882a593Smuzhiyun rpdev = pcie_find_root_port(dev);
336*4882a593Smuzhiyun if (!rpdev) {
337*4882a593Smuzhiyun pci_err(dev, "Root port not found\n");
338*4882a593Smuzhiyun ret = -ENODEV;
339*4882a593Smuzhiyun goto out_put;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun pos_cap_err = dev->aer_cap;
343*4882a593Smuzhiyun if (!pos_cap_err) {
344*4882a593Smuzhiyun pci_err(dev, "Device doesn't support AER\n");
345*4882a593Smuzhiyun ret = -EPROTONOSUPPORT;
346*4882a593Smuzhiyun goto out_put;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_SEVER, &sever);
349*4882a593Smuzhiyun pci_read_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK, &cor_mask);
350*4882a593Smuzhiyun pci_read_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
351*4882a593Smuzhiyun &uncor_mask);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun rp_pos_cap_err = rpdev->aer_cap;
354*4882a593Smuzhiyun if (!rp_pos_cap_err) {
355*4882a593Smuzhiyun pci_err(rpdev, "Root port doesn't support AER\n");
356*4882a593Smuzhiyun ret = -EPROTONOSUPPORT;
357*4882a593Smuzhiyun goto out_put;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun err_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
361*4882a593Smuzhiyun if (!err_alloc) {
362*4882a593Smuzhiyun ret = -ENOMEM;
363*4882a593Smuzhiyun goto out_put;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun rperr_alloc = kzalloc(sizeof(struct aer_error), GFP_KERNEL);
366*4882a593Smuzhiyun if (!rperr_alloc) {
367*4882a593Smuzhiyun ret = -ENOMEM;
368*4882a593Smuzhiyun goto out_put;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (aer_mask_override) {
372*4882a593Smuzhiyun cor_mask_orig = cor_mask;
373*4882a593Smuzhiyun cor_mask &= !(einj->cor_status);
374*4882a593Smuzhiyun pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
375*4882a593Smuzhiyun cor_mask);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun uncor_mask_orig = uncor_mask;
378*4882a593Smuzhiyun uncor_mask &= !(einj->uncor_status);
379*4882a593Smuzhiyun pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
380*4882a593Smuzhiyun uncor_mask);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun spin_lock_irqsave(&inject_lock, flags);
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun err = __find_aer_error_by_dev(dev);
386*4882a593Smuzhiyun if (!err) {
387*4882a593Smuzhiyun err = err_alloc;
388*4882a593Smuzhiyun err_alloc = NULL;
389*4882a593Smuzhiyun aer_error_init(err, einj->domain, einj->bus, devfn,
390*4882a593Smuzhiyun pos_cap_err);
391*4882a593Smuzhiyun list_add(&err->list, &einjected);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun err->uncor_status |= einj->uncor_status;
394*4882a593Smuzhiyun err->cor_status |= einj->cor_status;
395*4882a593Smuzhiyun err->header_log0 = einj->header_log0;
396*4882a593Smuzhiyun err->header_log1 = einj->header_log1;
397*4882a593Smuzhiyun err->header_log2 = einj->header_log2;
398*4882a593Smuzhiyun err->header_log3 = einj->header_log3;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (!aer_mask_override && einj->cor_status &&
401*4882a593Smuzhiyun !(einj->cor_status & ~cor_mask)) {
402*4882a593Smuzhiyun ret = -EINVAL;
403*4882a593Smuzhiyun pci_warn(dev, "The correctable error(s) is masked by device\n");
404*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
405*4882a593Smuzhiyun goto out_put;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun if (!aer_mask_override && einj->uncor_status &&
408*4882a593Smuzhiyun !(einj->uncor_status & ~uncor_mask)) {
409*4882a593Smuzhiyun ret = -EINVAL;
410*4882a593Smuzhiyun pci_warn(dev, "The uncorrectable error(s) is masked by device\n");
411*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
412*4882a593Smuzhiyun goto out_put;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun rperr = __find_aer_error_by_dev(rpdev);
416*4882a593Smuzhiyun if (!rperr) {
417*4882a593Smuzhiyun rperr = rperr_alloc;
418*4882a593Smuzhiyun rperr_alloc = NULL;
419*4882a593Smuzhiyun aer_error_init(rperr, pci_domain_nr(rpdev->bus),
420*4882a593Smuzhiyun rpdev->bus->number, rpdev->devfn,
421*4882a593Smuzhiyun rp_pos_cap_err);
422*4882a593Smuzhiyun list_add(&rperr->list, &einjected);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun if (einj->cor_status) {
425*4882a593Smuzhiyun if (rperr->root_status & PCI_ERR_ROOT_COR_RCV)
426*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
427*4882a593Smuzhiyun else
428*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_COR_RCV;
429*4882a593Smuzhiyun rperr->source_id &= 0xffff0000;
430*4882a593Smuzhiyun rperr->source_id |= (einj->bus << 8) | devfn;
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun if (einj->uncor_status) {
433*4882a593Smuzhiyun if (rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV)
434*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
435*4882a593Smuzhiyun if (sever & einj->uncor_status) {
436*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_FATAL_RCV;
437*4882a593Smuzhiyun if (!(rperr->root_status & PCI_ERR_ROOT_UNCOR_RCV))
438*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_FIRST_FATAL;
439*4882a593Smuzhiyun } else
440*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
441*4882a593Smuzhiyun rperr->root_status |= PCI_ERR_ROOT_UNCOR_RCV;
442*4882a593Smuzhiyun rperr->source_id &= 0x0000ffff;
443*4882a593Smuzhiyun rperr->source_id |= ((einj->bus << 8) | devfn) << 16;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (aer_mask_override) {
448*4882a593Smuzhiyun pci_write_config_dword(dev, pos_cap_err + PCI_ERR_COR_MASK,
449*4882a593Smuzhiyun cor_mask_orig);
450*4882a593Smuzhiyun pci_write_config_dword(dev, pos_cap_err + PCI_ERR_UNCOR_MASK,
451*4882a593Smuzhiyun uncor_mask_orig);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun ret = pci_bus_set_aer_ops(dev->bus);
455*4882a593Smuzhiyun if (ret)
456*4882a593Smuzhiyun goto out_put;
457*4882a593Smuzhiyun ret = pci_bus_set_aer_ops(rpdev->bus);
458*4882a593Smuzhiyun if (ret)
459*4882a593Smuzhiyun goto out_put;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun device = pcie_port_find_device(rpdev, PCIE_PORT_SERVICE_AER);
462*4882a593Smuzhiyun if (device) {
463*4882a593Smuzhiyun edev = to_pcie_device(device);
464*4882a593Smuzhiyun if (!get_service_data(edev)) {
465*4882a593Smuzhiyun pci_warn(edev->port, "AER service is not initialized\n");
466*4882a593Smuzhiyun ret = -EPROTONOSUPPORT;
467*4882a593Smuzhiyun goto out_put;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun pci_info(edev->port, "Injecting errors %08x/%08x into device %s\n",
470*4882a593Smuzhiyun einj->cor_status, einj->uncor_status, pci_name(dev));
471*4882a593Smuzhiyun ret = irq_inject_interrupt(edev->irq);
472*4882a593Smuzhiyun } else {
473*4882a593Smuzhiyun pci_err(rpdev, "AER device not found\n");
474*4882a593Smuzhiyun ret = -ENODEV;
475*4882a593Smuzhiyun }
476*4882a593Smuzhiyun out_put:
477*4882a593Smuzhiyun kfree(err_alloc);
478*4882a593Smuzhiyun kfree(rperr_alloc);
479*4882a593Smuzhiyun pci_dev_put(dev);
480*4882a593Smuzhiyun return ret;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
aer_inject_write(struct file * filp,const char __user * ubuf,size_t usize,loff_t * off)483*4882a593Smuzhiyun static ssize_t aer_inject_write(struct file *filp, const char __user *ubuf,
484*4882a593Smuzhiyun size_t usize, loff_t *off)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun struct aer_error_inj einj;
487*4882a593Smuzhiyun int ret;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
490*4882a593Smuzhiyun return -EPERM;
491*4882a593Smuzhiyun if (usize < offsetof(struct aer_error_inj, domain) ||
492*4882a593Smuzhiyun usize > sizeof(einj))
493*4882a593Smuzhiyun return -EINVAL;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun memset(&einj, 0, sizeof(einj));
496*4882a593Smuzhiyun if (copy_from_user(&einj, ubuf, usize))
497*4882a593Smuzhiyun return -EFAULT;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun ret = aer_inject(&einj);
500*4882a593Smuzhiyun return ret ? ret : usize;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun static const struct file_operations aer_inject_fops = {
504*4882a593Smuzhiyun .write = aer_inject_write,
505*4882a593Smuzhiyun .owner = THIS_MODULE,
506*4882a593Smuzhiyun .llseek = noop_llseek,
507*4882a593Smuzhiyun };
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun static struct miscdevice aer_inject_device = {
510*4882a593Smuzhiyun .minor = MISC_DYNAMIC_MINOR,
511*4882a593Smuzhiyun .name = "aer_inject",
512*4882a593Smuzhiyun .fops = &aer_inject_fops,
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun
aer_inject_init(void)515*4882a593Smuzhiyun static int __init aer_inject_init(void)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun return misc_register(&aer_inject_device);
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
aer_inject_exit(void)520*4882a593Smuzhiyun static void __exit aer_inject_exit(void)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun struct aer_error *err, *err_next;
523*4882a593Smuzhiyun unsigned long flags;
524*4882a593Smuzhiyun struct pci_bus_ops *bus_ops;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun misc_deregister(&aer_inject_device);
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun while ((bus_ops = pci_bus_ops_pop())) {
529*4882a593Smuzhiyun pci_bus_set_ops(bus_ops->bus, bus_ops->ops);
530*4882a593Smuzhiyun kfree(bus_ops);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun spin_lock_irqsave(&inject_lock, flags);
534*4882a593Smuzhiyun list_for_each_entry_safe(err, err_next, &einjected, list) {
535*4882a593Smuzhiyun list_del(&err->list);
536*4882a593Smuzhiyun kfree(err);
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun spin_unlock_irqrestore(&inject_lock, flags);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun module_init(aer_inject_init);
542*4882a593Smuzhiyun module_exit(aer_inject_exit);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun MODULE_DESCRIPTION("PCIe AER software error injector");
545*4882a593Smuzhiyun MODULE_LICENSE("GPL");
546