xref: /OK3568_Linux_fs/kernel/drivers/fpga/dfl-fme-error.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for FPGA Management Engine Error Management
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2019 Intel Corporation, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:
8*4882a593Smuzhiyun  *   Kang Luwei <luwei.kang@intel.com>
9*4882a593Smuzhiyun  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10*4882a593Smuzhiyun  *   Wu Hao <hao.wu@intel.com>
11*4882a593Smuzhiyun  *   Joseph Grecco <joe.grecco@intel.com>
12*4882a593Smuzhiyun  *   Enno Luebbers <enno.luebbers@intel.com>
13*4882a593Smuzhiyun  *   Tim Whisonant <tim.whisonant@intel.com>
14*4882a593Smuzhiyun  *   Ananda Ravuri <ananda.ravuri@intel.com>
15*4882a593Smuzhiyun  *   Mitchel, Henry <henry.mitchel@intel.com>
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/fpga-dfl.h>
19*4882a593Smuzhiyun #include <linux/uaccess.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "dfl.h"
22*4882a593Smuzhiyun #include "dfl-fme.h"
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define FME_ERROR_MASK		0x8
25*4882a593Smuzhiyun #define FME_ERROR		0x10
26*4882a593Smuzhiyun #define MBP_ERROR		BIT_ULL(6)
27*4882a593Smuzhiyun #define PCIE0_ERROR_MASK	0x18
28*4882a593Smuzhiyun #define PCIE0_ERROR		0x20
29*4882a593Smuzhiyun #define PCIE1_ERROR_MASK	0x28
30*4882a593Smuzhiyun #define PCIE1_ERROR		0x30
31*4882a593Smuzhiyun #define FME_FIRST_ERROR		0x38
32*4882a593Smuzhiyun #define FME_NEXT_ERROR		0x40
33*4882a593Smuzhiyun #define RAS_NONFAT_ERROR_MASK	0x48
34*4882a593Smuzhiyun #define RAS_NONFAT_ERROR	0x50
35*4882a593Smuzhiyun #define RAS_CATFAT_ERROR_MASK	0x58
36*4882a593Smuzhiyun #define RAS_CATFAT_ERROR	0x60
37*4882a593Smuzhiyun #define RAS_ERROR_INJECT	0x68
38*4882a593Smuzhiyun #define INJECT_ERROR_MASK	GENMASK_ULL(2, 0)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define ERROR_MASK		GENMASK_ULL(63, 0)
41*4882a593Smuzhiyun 
pcie0_errors_show(struct device * dev,struct device_attribute * attr,char * buf)42*4882a593Smuzhiyun static ssize_t pcie0_errors_show(struct device *dev,
43*4882a593Smuzhiyun 				 struct device_attribute *attr, char *buf)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
46*4882a593Smuzhiyun 	void __iomem *base;
47*4882a593Smuzhiyun 	u64 value;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
52*4882a593Smuzhiyun 	value = readq(base + PCIE0_ERROR);
53*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
pcie0_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)58*4882a593Smuzhiyun static ssize_t pcie0_errors_store(struct device *dev,
59*4882a593Smuzhiyun 				  struct device_attribute *attr,
60*4882a593Smuzhiyun 				  const char *buf, size_t count)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
63*4882a593Smuzhiyun 	void __iomem *base;
64*4882a593Smuzhiyun 	int ret = 0;
65*4882a593Smuzhiyun 	u64 v, val;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	if (kstrtou64(buf, 0, &val))
68*4882a593Smuzhiyun 		return -EINVAL;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
73*4882a593Smuzhiyun 	writeq(GENMASK_ULL(63, 0), base + PCIE0_ERROR_MASK);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	v = readq(base + PCIE0_ERROR);
76*4882a593Smuzhiyun 	if (val == v)
77*4882a593Smuzhiyun 		writeq(v, base + PCIE0_ERROR);
78*4882a593Smuzhiyun 	else
79*4882a593Smuzhiyun 		ret = -EINVAL;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	writeq(0ULL, base + PCIE0_ERROR_MASK);
82*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
83*4882a593Smuzhiyun 	return ret ? ret : count;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun static DEVICE_ATTR_RW(pcie0_errors);
86*4882a593Smuzhiyun 
pcie1_errors_show(struct device * dev,struct device_attribute * attr,char * buf)87*4882a593Smuzhiyun static ssize_t pcie1_errors_show(struct device *dev,
88*4882a593Smuzhiyun 				 struct device_attribute *attr, char *buf)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
91*4882a593Smuzhiyun 	void __iomem *base;
92*4882a593Smuzhiyun 	u64 value;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
97*4882a593Smuzhiyun 	value = readq(base + PCIE1_ERROR);
98*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
pcie1_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)103*4882a593Smuzhiyun static ssize_t pcie1_errors_store(struct device *dev,
104*4882a593Smuzhiyun 				  struct device_attribute *attr,
105*4882a593Smuzhiyun 				  const char *buf, size_t count)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
108*4882a593Smuzhiyun 	void __iomem *base;
109*4882a593Smuzhiyun 	int ret = 0;
110*4882a593Smuzhiyun 	u64 v, val;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	if (kstrtou64(buf, 0, &val))
113*4882a593Smuzhiyun 		return -EINVAL;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
118*4882a593Smuzhiyun 	writeq(GENMASK_ULL(63, 0), base + PCIE1_ERROR_MASK);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	v = readq(base + PCIE1_ERROR);
121*4882a593Smuzhiyun 	if (val == v)
122*4882a593Smuzhiyun 		writeq(v, base + PCIE1_ERROR);
123*4882a593Smuzhiyun 	else
124*4882a593Smuzhiyun 		ret = -EINVAL;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	writeq(0ULL, base + PCIE1_ERROR_MASK);
127*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
128*4882a593Smuzhiyun 	return ret ? ret : count;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun static DEVICE_ATTR_RW(pcie1_errors);
131*4882a593Smuzhiyun 
nonfatal_errors_show(struct device * dev,struct device_attribute * attr,char * buf)132*4882a593Smuzhiyun static ssize_t nonfatal_errors_show(struct device *dev,
133*4882a593Smuzhiyun 				    struct device_attribute *attr, char *buf)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	void __iomem *base;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n",
140*4882a593Smuzhiyun 		       (unsigned long long)readq(base + RAS_NONFAT_ERROR));
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun static DEVICE_ATTR_RO(nonfatal_errors);
143*4882a593Smuzhiyun 
catfatal_errors_show(struct device * dev,struct device_attribute * attr,char * buf)144*4882a593Smuzhiyun static ssize_t catfatal_errors_show(struct device *dev,
145*4882a593Smuzhiyun 				    struct device_attribute *attr, char *buf)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	void __iomem *base;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n",
152*4882a593Smuzhiyun 		       (unsigned long long)readq(base + RAS_CATFAT_ERROR));
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun static DEVICE_ATTR_RO(catfatal_errors);
155*4882a593Smuzhiyun 
inject_errors_show(struct device * dev,struct device_attribute * attr,char * buf)156*4882a593Smuzhiyun static ssize_t inject_errors_show(struct device *dev,
157*4882a593Smuzhiyun 				  struct device_attribute *attr, char *buf)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
160*4882a593Smuzhiyun 	void __iomem *base;
161*4882a593Smuzhiyun 	u64 v;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
166*4882a593Smuzhiyun 	v = readq(base + RAS_ERROR_INJECT);
167*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n",
170*4882a593Smuzhiyun 		       (unsigned long long)FIELD_GET(INJECT_ERROR_MASK, v));
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
inject_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)173*4882a593Smuzhiyun static ssize_t inject_errors_store(struct device *dev,
174*4882a593Smuzhiyun 				   struct device_attribute *attr,
175*4882a593Smuzhiyun 				   const char *buf, size_t count)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
178*4882a593Smuzhiyun 	void __iomem *base;
179*4882a593Smuzhiyun 	u8 inject_error;
180*4882a593Smuzhiyun 	u64 v;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (kstrtou8(buf, 0, &inject_error))
183*4882a593Smuzhiyun 		return -EINVAL;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (inject_error & ~INJECT_ERROR_MASK)
186*4882a593Smuzhiyun 		return -EINVAL;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
191*4882a593Smuzhiyun 	v = readq(base + RAS_ERROR_INJECT);
192*4882a593Smuzhiyun 	v &= ~INJECT_ERROR_MASK;
193*4882a593Smuzhiyun 	v |= FIELD_PREP(INJECT_ERROR_MASK, inject_error);
194*4882a593Smuzhiyun 	writeq(v, base + RAS_ERROR_INJECT);
195*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return count;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun static DEVICE_ATTR_RW(inject_errors);
200*4882a593Smuzhiyun 
fme_errors_show(struct device * dev,struct device_attribute * attr,char * buf)201*4882a593Smuzhiyun static ssize_t fme_errors_show(struct device *dev,
202*4882a593Smuzhiyun 			       struct device_attribute *attr, char *buf)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
205*4882a593Smuzhiyun 	void __iomem *base;
206*4882a593Smuzhiyun 	u64 value;
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
211*4882a593Smuzhiyun 	value = readq(base + FME_ERROR);
212*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
fme_errors_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)217*4882a593Smuzhiyun static ssize_t fme_errors_store(struct device *dev,
218*4882a593Smuzhiyun 				struct device_attribute *attr,
219*4882a593Smuzhiyun 				const char *buf, size_t count)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
222*4882a593Smuzhiyun 	void __iomem *base;
223*4882a593Smuzhiyun 	u64 v, val;
224*4882a593Smuzhiyun 	int ret = 0;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (kstrtou64(buf, 0, &val))
227*4882a593Smuzhiyun 		return -EINVAL;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
232*4882a593Smuzhiyun 	writeq(GENMASK_ULL(63, 0), base + FME_ERROR_MASK);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	v = readq(base + FME_ERROR);
235*4882a593Smuzhiyun 	if (val == v)
236*4882a593Smuzhiyun 		writeq(v, base + FME_ERROR);
237*4882a593Smuzhiyun 	else
238*4882a593Smuzhiyun 		ret = -EINVAL;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* Workaround: disable MBP_ERROR if feature revision is 0 */
241*4882a593Smuzhiyun 	writeq(dfl_feature_revision(base) ? 0ULL : MBP_ERROR,
242*4882a593Smuzhiyun 	       base + FME_ERROR_MASK);
243*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
244*4882a593Smuzhiyun 	return ret ? ret : count;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun static DEVICE_ATTR_RW(fme_errors);
247*4882a593Smuzhiyun 
first_error_show(struct device * dev,struct device_attribute * attr,char * buf)248*4882a593Smuzhiyun static ssize_t first_error_show(struct device *dev,
249*4882a593Smuzhiyun 				struct device_attribute *attr, char *buf)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
252*4882a593Smuzhiyun 	void __iomem *base;
253*4882a593Smuzhiyun 	u64 value;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
258*4882a593Smuzhiyun 	value = readq(base + FME_FIRST_ERROR);
259*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun static DEVICE_ATTR_RO(first_error);
264*4882a593Smuzhiyun 
next_error_show(struct device * dev,struct device_attribute * attr,char * buf)265*4882a593Smuzhiyun static ssize_t next_error_show(struct device *dev,
266*4882a593Smuzhiyun 			       struct device_attribute *attr, char *buf)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
269*4882a593Smuzhiyun 	void __iomem *base;
270*4882a593Smuzhiyun 	u64 value;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
275*4882a593Smuzhiyun 	value = readq(base + FME_NEXT_ERROR);
276*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	return sprintf(buf, "0x%llx\n", (unsigned long long)value);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun static DEVICE_ATTR_RO(next_error);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun static struct attribute *fme_global_err_attrs[] = {
283*4882a593Smuzhiyun 	&dev_attr_pcie0_errors.attr,
284*4882a593Smuzhiyun 	&dev_attr_pcie1_errors.attr,
285*4882a593Smuzhiyun 	&dev_attr_nonfatal_errors.attr,
286*4882a593Smuzhiyun 	&dev_attr_catfatal_errors.attr,
287*4882a593Smuzhiyun 	&dev_attr_inject_errors.attr,
288*4882a593Smuzhiyun 	&dev_attr_fme_errors.attr,
289*4882a593Smuzhiyun 	&dev_attr_first_error.attr,
290*4882a593Smuzhiyun 	&dev_attr_next_error.attr,
291*4882a593Smuzhiyun 	NULL,
292*4882a593Smuzhiyun };
293*4882a593Smuzhiyun 
fme_global_err_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)294*4882a593Smuzhiyun static umode_t fme_global_err_attrs_visible(struct kobject *kobj,
295*4882a593Smuzhiyun 					    struct attribute *attr, int n)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct device *dev = kobj_to_dev(kobj);
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/*
300*4882a593Smuzhiyun 	 * sysfs entries are visible only if related private feature is
301*4882a593Smuzhiyun 	 * enumerated.
302*4882a593Smuzhiyun 	 */
303*4882a593Smuzhiyun 	if (!dfl_get_feature_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR))
304*4882a593Smuzhiyun 		return 0;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	return attr->mode;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun const struct attribute_group fme_global_err_group = {
310*4882a593Smuzhiyun 	.name       = "errors",
311*4882a593Smuzhiyun 	.attrs      = fme_global_err_attrs,
312*4882a593Smuzhiyun 	.is_visible = fme_global_err_attrs_visible,
313*4882a593Smuzhiyun };
314*4882a593Smuzhiyun 
fme_err_mask(struct device * dev,bool mask)315*4882a593Smuzhiyun static void fme_err_mask(struct device *dev, bool mask)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
318*4882a593Smuzhiyun 	void __iomem *base;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	base = dfl_get_feature_ioaddr_by_id(dev, FME_FEATURE_ID_GLOBAL_ERR);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	mutex_lock(&pdata->lock);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* Workaround: keep MBP_ERROR always masked if revision is 0 */
325*4882a593Smuzhiyun 	if (dfl_feature_revision(base))
326*4882a593Smuzhiyun 		writeq(mask ? ERROR_MASK : 0, base + FME_ERROR_MASK);
327*4882a593Smuzhiyun 	else
328*4882a593Smuzhiyun 		writeq(mask ? ERROR_MASK : MBP_ERROR, base + FME_ERROR_MASK);
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	writeq(mask ? ERROR_MASK : 0, base + PCIE0_ERROR_MASK);
331*4882a593Smuzhiyun 	writeq(mask ? ERROR_MASK : 0, base + PCIE1_ERROR_MASK);
332*4882a593Smuzhiyun 	writeq(mask ? ERROR_MASK : 0, base + RAS_NONFAT_ERROR_MASK);
333*4882a593Smuzhiyun 	writeq(mask ? ERROR_MASK : 0, base + RAS_CATFAT_ERROR_MASK);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	mutex_unlock(&pdata->lock);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
fme_global_err_init(struct platform_device * pdev,struct dfl_feature * feature)338*4882a593Smuzhiyun static int fme_global_err_init(struct platform_device *pdev,
339*4882a593Smuzhiyun 			       struct dfl_feature *feature)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	fme_err_mask(&pdev->dev, false);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return 0;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
fme_global_err_uinit(struct platform_device * pdev,struct dfl_feature * feature)346*4882a593Smuzhiyun static void fme_global_err_uinit(struct platform_device *pdev,
347*4882a593Smuzhiyun 				 struct dfl_feature *feature)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	fme_err_mask(&pdev->dev, true);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun static long
fme_global_error_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)353*4882a593Smuzhiyun fme_global_error_ioctl(struct platform_device *pdev,
354*4882a593Smuzhiyun 		       struct dfl_feature *feature,
355*4882a593Smuzhiyun 		       unsigned int cmd, unsigned long arg)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun 	switch (cmd) {
358*4882a593Smuzhiyun 	case DFL_FPGA_FME_ERR_GET_IRQ_NUM:
359*4882a593Smuzhiyun 		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
360*4882a593Smuzhiyun 	case DFL_FPGA_FME_ERR_SET_IRQ:
361*4882a593Smuzhiyun 		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
362*4882a593Smuzhiyun 	default:
363*4882a593Smuzhiyun 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
364*4882a593Smuzhiyun 		return -ENODEV;
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun const struct dfl_feature_id fme_global_err_id_table[] = {
369*4882a593Smuzhiyun 	{.id = FME_FEATURE_ID_GLOBAL_ERR,},
370*4882a593Smuzhiyun 	{0,}
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun const struct dfl_feature_ops fme_global_err_ops = {
374*4882a593Smuzhiyun 	.init = fme_global_err_init,
375*4882a593Smuzhiyun 	.uinit = fme_global_err_uinit,
376*4882a593Smuzhiyun 	.ioctl = fme_global_error_ioctl,
377*4882a593Smuzhiyun };
378