1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright 2019 Intel Corporation, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Authors:
8*4882a593Smuzhiyun * Wu Hao <hao.wu@linux.intel.com>
9*4882a593Smuzhiyun * Xiao Guangrong <guangrong.xiao@linux.intel.com>
10*4882a593Smuzhiyun * Joseph Grecco <joe.grecco@intel.com>
11*4882a593Smuzhiyun * Enno Luebbers <enno.luebbers@intel.com>
12*4882a593Smuzhiyun * Tim Whisonant <tim.whisonant@intel.com>
13*4882a593Smuzhiyun * Ananda Ravuri <ananda.ravuri@intel.com>
14*4882a593Smuzhiyun * Mitchel Henry <henry.mitchel@intel.com>
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/fpga-dfl.h>
18*4882a593Smuzhiyun #include <linux/uaccess.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "dfl-afu.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define PORT_ERROR_MASK 0x8
23*4882a593Smuzhiyun #define PORT_ERROR 0x10
24*4882a593Smuzhiyun #define PORT_FIRST_ERROR 0x18
25*4882a593Smuzhiyun #define PORT_MALFORMED_REQ0 0x20
26*4882a593Smuzhiyun #define PORT_MALFORMED_REQ1 0x28
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define ERROR_MASK GENMASK_ULL(63, 0)
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /* mask or unmask port errors by the error mask register. */
__afu_port_err_mask(struct device * dev,bool mask)31*4882a593Smuzhiyun static void __afu_port_err_mask(struct device *dev, bool mask)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun void __iomem *base;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
afu_port_err_mask(struct device * dev,bool mask)40*4882a593Smuzhiyun static void afu_port_err_mask(struct device *dev, bool mask)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun mutex_lock(&pdata->lock);
45*4882a593Smuzhiyun __afu_port_err_mask(dev, mask);
46*4882a593Smuzhiyun mutex_unlock(&pdata->lock);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* clear port errors. */
afu_port_err_clear(struct device * dev,u64 err)50*4882a593Smuzhiyun static int afu_port_err_clear(struct device *dev, u64 err)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
53*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
54*4882a593Smuzhiyun void __iomem *base_err, *base_hdr;
55*4882a593Smuzhiyun int ret = -EBUSY;
56*4882a593Smuzhiyun u64 v;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun base_err = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
59*4882a593Smuzhiyun base_hdr = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun mutex_lock(&pdata->lock);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun * clear Port Errors
65*4882a593Smuzhiyun *
66*4882a593Smuzhiyun * - Check for AP6 State
67*4882a593Smuzhiyun * - Halt Port by keeping Port in reset
68*4882a593Smuzhiyun * - Set PORT Error mask to all 1 to mask errors
69*4882a593Smuzhiyun * - Clear all errors
70*4882a593Smuzhiyun * - Set Port mask to all 0 to enable errors
71*4882a593Smuzhiyun * - All errors start capturing new errors
72*4882a593Smuzhiyun * - Enable Port by pulling the port out of reset
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun /* if device is still in AP6 power state, can not clear any error. */
76*4882a593Smuzhiyun v = readq(base_hdr + PORT_HDR_STS);
77*4882a593Smuzhiyun if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
78*4882a593Smuzhiyun dev_err(dev, "Could not clear errors, device in AP6 state.\n");
79*4882a593Smuzhiyun goto done;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* Halt Port by keeping Port in reset */
83*4882a593Smuzhiyun ret = __afu_port_disable(pdev);
84*4882a593Smuzhiyun if (ret)
85*4882a593Smuzhiyun goto done;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Mask all errors */
88*4882a593Smuzhiyun __afu_port_err_mask(dev, true);
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Clear errors if err input matches with current port errors.*/
91*4882a593Smuzhiyun v = readq(base_err + PORT_ERROR);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (v == err) {
94*4882a593Smuzhiyun writeq(v, base_err + PORT_ERROR);
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun v = readq(base_err + PORT_FIRST_ERROR);
97*4882a593Smuzhiyun writeq(v, base_err + PORT_FIRST_ERROR);
98*4882a593Smuzhiyun } else {
99*4882a593Smuzhiyun ret = -EINVAL;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Clear mask */
103*4882a593Smuzhiyun __afu_port_err_mask(dev, false);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Enable the Port by clear the reset */
106*4882a593Smuzhiyun __afu_port_enable(pdev);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun done:
109*4882a593Smuzhiyun mutex_unlock(&pdata->lock);
110*4882a593Smuzhiyun return ret;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun
errors_show(struct device * dev,struct device_attribute * attr,char * buf)113*4882a593Smuzhiyun static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
114*4882a593Smuzhiyun char *buf)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
117*4882a593Smuzhiyun void __iomem *base;
118*4882a593Smuzhiyun u64 error;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun mutex_lock(&pdata->lock);
123*4882a593Smuzhiyun error = readq(base + PORT_ERROR);
124*4882a593Smuzhiyun mutex_unlock(&pdata->lock);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return sprintf(buf, "0x%llx\n", (unsigned long long)error);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
errors_store(struct device * dev,struct device_attribute * attr,const char * buff,size_t count)129*4882a593Smuzhiyun static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
130*4882a593Smuzhiyun const char *buff, size_t count)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun u64 value;
133*4882a593Smuzhiyun int ret;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun if (kstrtou64(buff, 0, &value))
136*4882a593Smuzhiyun return -EINVAL;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun ret = afu_port_err_clear(dev, value);
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun return ret ? ret : count;
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun static DEVICE_ATTR_RW(errors);
143*4882a593Smuzhiyun
first_error_show(struct device * dev,struct device_attribute * attr,char * buf)144*4882a593Smuzhiyun static ssize_t first_error_show(struct device *dev,
145*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
148*4882a593Smuzhiyun void __iomem *base;
149*4882a593Smuzhiyun u64 error;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun mutex_lock(&pdata->lock);
154*4882a593Smuzhiyun error = readq(base + PORT_FIRST_ERROR);
155*4882a593Smuzhiyun mutex_unlock(&pdata->lock);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun return sprintf(buf, "0x%llx\n", (unsigned long long)error);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun static DEVICE_ATTR_RO(first_error);
160*4882a593Smuzhiyun
first_malformed_req_show(struct device * dev,struct device_attribute * attr,char * buf)161*4882a593Smuzhiyun static ssize_t first_malformed_req_show(struct device *dev,
162*4882a593Smuzhiyun struct device_attribute *attr,
163*4882a593Smuzhiyun char *buf)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
166*4882a593Smuzhiyun void __iomem *base;
167*4882a593Smuzhiyun u64 req0, req1;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_ERROR);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun mutex_lock(&pdata->lock);
172*4882a593Smuzhiyun req0 = readq(base + PORT_MALFORMED_REQ0);
173*4882a593Smuzhiyun req1 = readq(base + PORT_MALFORMED_REQ1);
174*4882a593Smuzhiyun mutex_unlock(&pdata->lock);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun return sprintf(buf, "0x%016llx%016llx\n",
177*4882a593Smuzhiyun (unsigned long long)req1, (unsigned long long)req0);
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun static DEVICE_ATTR_RO(first_malformed_req);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun static struct attribute *port_err_attrs[] = {
182*4882a593Smuzhiyun &dev_attr_errors.attr,
183*4882a593Smuzhiyun &dev_attr_first_error.attr,
184*4882a593Smuzhiyun &dev_attr_first_malformed_req.attr,
185*4882a593Smuzhiyun NULL,
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
port_err_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)188*4882a593Smuzhiyun static umode_t port_err_attrs_visible(struct kobject *kobj,
189*4882a593Smuzhiyun struct attribute *attr, int n)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct device *dev = kobj_to_dev(kobj);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * sysfs entries are visible only if related private feature is
195*4882a593Smuzhiyun * enumerated.
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_ERROR))
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun return attr->mode;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun const struct attribute_group port_err_group = {
204*4882a593Smuzhiyun .name = "errors",
205*4882a593Smuzhiyun .attrs = port_err_attrs,
206*4882a593Smuzhiyun .is_visible = port_err_attrs_visible,
207*4882a593Smuzhiyun };
208*4882a593Smuzhiyun
port_err_init(struct platform_device * pdev,struct dfl_feature * feature)209*4882a593Smuzhiyun static int port_err_init(struct platform_device *pdev,
210*4882a593Smuzhiyun struct dfl_feature *feature)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun afu_port_err_mask(&pdev->dev, false);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
port_err_uinit(struct platform_device * pdev,struct dfl_feature * feature)217*4882a593Smuzhiyun static void port_err_uinit(struct platform_device *pdev,
218*4882a593Smuzhiyun struct dfl_feature *feature)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun afu_port_err_mask(&pdev->dev, true);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun static long
port_err_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)224*4882a593Smuzhiyun port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
225*4882a593Smuzhiyun unsigned int cmd, unsigned long arg)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun switch (cmd) {
228*4882a593Smuzhiyun case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
229*4882a593Smuzhiyun return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
230*4882a593Smuzhiyun case DFL_FPGA_PORT_ERR_SET_IRQ:
231*4882a593Smuzhiyun return dfl_feature_ioctl_set_irq(pdev, feature, arg);
232*4882a593Smuzhiyun default:
233*4882a593Smuzhiyun dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
234*4882a593Smuzhiyun return -ENODEV;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun const struct dfl_feature_id port_err_id_table[] = {
239*4882a593Smuzhiyun {.id = PORT_FEATURE_ID_ERROR,},
240*4882a593Smuzhiyun {0,}
241*4882a593Smuzhiyun };
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun const struct dfl_feature_ops port_err_ops = {
244*4882a593Smuzhiyun .init = port_err_init,
245*4882a593Smuzhiyun .uinit = port_err_uinit,
246*4882a593Smuzhiyun .ioctl = port_err_ioctl,
247*4882a593Smuzhiyun };
248