xref: /OK3568_Linux_fs/kernel/drivers/edac/mv64x60_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Marvell MV64x60 Memory Controller kernel module for PPC platforms
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Author: Dave Jiang <djiang@mvista.com>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under
7*4882a593Smuzhiyun  * the terms of the GNU General Public License version 2. This program
8*4882a593Smuzhiyun  * is licensed "as is" without any warranty of any kind, whether express
9*4882a593Smuzhiyun  * or implied.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/init.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/edac.h>
18*4882a593Smuzhiyun #include <linux/gfp.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "edac_module.h"
21*4882a593Smuzhiyun #include "mv64x60_edac.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun static const char *mv64x60_ctl_name = "MV64x60";
24*4882a593Smuzhiyun static int edac_dev_idx;
25*4882a593Smuzhiyun static int edac_pci_idx;
26*4882a593Smuzhiyun static int edac_mc_idx;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /*********************** PCI err device **********************************/
29*4882a593Smuzhiyun #ifdef CONFIG_PCI
mv64x60_pci_check(struct edac_pci_ctl_info * pci)30*4882a593Smuzhiyun static void mv64x60_pci_check(struct edac_pci_ctl_info *pci)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct mv64x60_pci_pdata *pdata = pci->pvt_info;
33*4882a593Smuzhiyun 	u32 cause;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	cause = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
36*4882a593Smuzhiyun 	if (!cause)
37*4882a593Smuzhiyun 		return;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	printk(KERN_ERR "Error in PCI %d Interface\n", pdata->pci_hose);
40*4882a593Smuzhiyun 	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
41*4882a593Smuzhiyun 	printk(KERN_ERR "Address Low: 0x%08x\n",
42*4882a593Smuzhiyun 	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_LO));
43*4882a593Smuzhiyun 	printk(KERN_ERR "Address High: 0x%08x\n",
44*4882a593Smuzhiyun 	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ADDR_HI));
45*4882a593Smuzhiyun 	printk(KERN_ERR "Attribute: 0x%08x\n",
46*4882a593Smuzhiyun 	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_ATTR));
47*4882a593Smuzhiyun 	printk(KERN_ERR "Command: 0x%08x\n",
48*4882a593Smuzhiyun 	       readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CMD));
49*4882a593Smuzhiyun 	writel(~cause, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (cause & MV64X60_PCI_PE_MASK)
52*4882a593Smuzhiyun 		edac_pci_handle_pe(pci, pci->ctl_name);
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (!(cause & MV64X60_PCI_PE_MASK))
55*4882a593Smuzhiyun 		edac_pci_handle_npe(pci, pci->ctl_name);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
mv64x60_pci_isr(int irq,void * dev_id)58*4882a593Smuzhiyun static irqreturn_t mv64x60_pci_isr(int irq, void *dev_id)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun 	struct edac_pci_ctl_info *pci = dev_id;
61*4882a593Smuzhiyun 	struct mv64x60_pci_pdata *pdata = pci->pvt_info;
62*4882a593Smuzhiyun 	u32 val;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	val = readl(pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
65*4882a593Smuzhiyun 	if (!val)
66*4882a593Smuzhiyun 		return IRQ_NONE;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	mv64x60_pci_check(pci);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	return IRQ_HANDLED;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /*
74*4882a593Smuzhiyun  * Bit 0 of MV64x60_PCIx_ERR_MASK does not exist on the 64360 and because of
75*4882a593Smuzhiyun  * errata FEr-#11 and FEr-##16 for the 64460, it should be 0 on that chip as
76*4882a593Smuzhiyun  * well.  IOW, don't set bit 0.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /* Erratum FEr PCI-#16: clear bit 0 of PCI SERRn Mask reg. */
mv64x60_pci_fixup(struct platform_device * pdev)80*4882a593Smuzhiyun static int __init mv64x60_pci_fixup(struct platform_device *pdev)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct resource *r;
83*4882a593Smuzhiyun 	void __iomem *pci_serr;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
86*4882a593Smuzhiyun 	if (!r) {
87*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to get resource for "
88*4882a593Smuzhiyun 		       "PCI err regs\n", __func__);
89*4882a593Smuzhiyun 		return -ENOENT;
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	pci_serr = ioremap(r->start, resource_size(r));
93*4882a593Smuzhiyun 	if (!pci_serr)
94*4882a593Smuzhiyun 		return -ENOMEM;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	writel(readl(pci_serr) & ~0x1, pci_serr);
97*4882a593Smuzhiyun 	iounmap(pci_serr);
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return 0;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
mv64x60_pci_err_probe(struct platform_device * pdev)102*4882a593Smuzhiyun static int mv64x60_pci_err_probe(struct platform_device *pdev)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	struct edac_pci_ctl_info *pci;
105*4882a593Smuzhiyun 	struct mv64x60_pci_pdata *pdata;
106*4882a593Smuzhiyun 	struct resource *r;
107*4882a593Smuzhiyun 	int res = 0;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	if (!devres_open_group(&pdev->dev, mv64x60_pci_err_probe, GFP_KERNEL))
110*4882a593Smuzhiyun 		return -ENOMEM;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	pci = edac_pci_alloc_ctl_info(sizeof(*pdata), "mv64x60_pci_err");
113*4882a593Smuzhiyun 	if (!pci)
114*4882a593Smuzhiyun 		return -ENOMEM;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	pdata = pci->pvt_info;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	pdata->pci_hose = pdev->id;
119*4882a593Smuzhiyun 	pdata->name = "mv64x60_pci_err";
120*4882a593Smuzhiyun 	platform_set_drvdata(pdev, pci);
121*4882a593Smuzhiyun 	pci->dev = &pdev->dev;
122*4882a593Smuzhiyun 	pci->dev_name = dev_name(&pdev->dev);
123*4882a593Smuzhiyun 	pci->mod_name = EDAC_MOD_STR;
124*4882a593Smuzhiyun 	pci->ctl_name = pdata->name;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_POLL)
127*4882a593Smuzhiyun 		pci->edac_check = mv64x60_pci_check;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	pdata->edac_idx = edac_pci_idx++;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
132*4882a593Smuzhiyun 	if (!r) {
133*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to get resource for "
134*4882a593Smuzhiyun 		       "PCI err regs\n", __func__);
135*4882a593Smuzhiyun 		res = -ENOENT;
136*4882a593Smuzhiyun 		goto err;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev,
140*4882a593Smuzhiyun 				     r->start,
141*4882a593Smuzhiyun 				     resource_size(r),
142*4882a593Smuzhiyun 				     pdata->name)) {
143*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Error while requesting mem region\n",
144*4882a593Smuzhiyun 		       __func__);
145*4882a593Smuzhiyun 		res = -EBUSY;
146*4882a593Smuzhiyun 		goto err;
147*4882a593Smuzhiyun 	}
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	pdata->pci_vbase = devm_ioremap(&pdev->dev,
150*4882a593Smuzhiyun 					r->start,
151*4882a593Smuzhiyun 					resource_size(r));
152*4882a593Smuzhiyun 	if (!pdata->pci_vbase) {
153*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to setup PCI err regs\n", __func__);
154*4882a593Smuzhiyun 		res = -ENOMEM;
155*4882a593Smuzhiyun 		goto err;
156*4882a593Smuzhiyun 	}
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	res = mv64x60_pci_fixup(pdev);
159*4882a593Smuzhiyun 	if (res < 0) {
160*4882a593Smuzhiyun 		printk(KERN_ERR "%s: PCI fixup failed\n", __func__);
161*4882a593Smuzhiyun 		goto err;
162*4882a593Smuzhiyun 	}
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_CAUSE);
165*4882a593Smuzhiyun 	writel(0, pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
166*4882a593Smuzhiyun 	writel(MV64X60_PCIx_ERR_MASK_VAL,
167*4882a593Smuzhiyun 		  pdata->pci_vbase + MV64X60_PCI_ERROR_MASK);
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun 	if (edac_pci_add_device(pci, pdata->edac_idx) > 0) {
170*4882a593Smuzhiyun 		edac_dbg(3, "failed edac_pci_add_device()\n");
171*4882a593Smuzhiyun 		goto err;
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_INT) {
175*4882a593Smuzhiyun 		pdata->irq = platform_get_irq(pdev, 0);
176*4882a593Smuzhiyun 		res = devm_request_irq(&pdev->dev,
177*4882a593Smuzhiyun 				       pdata->irq,
178*4882a593Smuzhiyun 				       mv64x60_pci_isr,
179*4882a593Smuzhiyun 				       0,
180*4882a593Smuzhiyun 				       "[EDAC] PCI err",
181*4882a593Smuzhiyun 				       pci);
182*4882a593Smuzhiyun 		if (res < 0) {
183*4882a593Smuzhiyun 			printk(KERN_ERR "%s: Unable to request irq %d for "
184*4882a593Smuzhiyun 			       "MV64x60 PCI ERR\n", __func__, pdata->irq);
185*4882a593Smuzhiyun 			res = -ENODEV;
186*4882a593Smuzhiyun 			goto err2;
187*4882a593Smuzhiyun 		}
188*4882a593Smuzhiyun 		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for PCI Err\n",
189*4882a593Smuzhiyun 		       pdata->irq);
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	devres_remove_group(&pdev->dev, mv64x60_pci_err_probe);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* get this far and it's successful */
195*4882a593Smuzhiyun 	edac_dbg(3, "success\n");
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun err2:
200*4882a593Smuzhiyun 	edac_pci_del_device(&pdev->dev);
201*4882a593Smuzhiyun err:
202*4882a593Smuzhiyun 	edac_pci_free_ctl_info(pci);
203*4882a593Smuzhiyun 	devres_release_group(&pdev->dev, mv64x60_pci_err_probe);
204*4882a593Smuzhiyun 	return res;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
mv64x60_pci_err_remove(struct platform_device * pdev)207*4882a593Smuzhiyun static int mv64x60_pci_err_remove(struct platform_device *pdev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	struct edac_pci_ctl_info *pci = platform_get_drvdata(pdev);
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	edac_dbg(0, "\n");
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	edac_pci_del_device(&pdev->dev);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	edac_pci_free_ctl_info(pci);
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	return 0;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun static struct platform_driver mv64x60_pci_err_driver = {
221*4882a593Smuzhiyun 	.probe = mv64x60_pci_err_probe,
222*4882a593Smuzhiyun 	.remove = mv64x60_pci_err_remove,
223*4882a593Smuzhiyun 	.driver = {
224*4882a593Smuzhiyun 		   .name = "mv64x60_pci_err",
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun #endif /* CONFIG_PCI */
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /*********************** SRAM err device **********************************/
mv64x60_sram_check(struct edac_device_ctl_info * edac_dev)231*4882a593Smuzhiyun static void mv64x60_sram_check(struct edac_device_ctl_info *edac_dev)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
234*4882a593Smuzhiyun 	u32 cause;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
237*4882a593Smuzhiyun 	if (!cause)
238*4882a593Smuzhiyun 		return;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	printk(KERN_ERR "Error in internal SRAM\n");
241*4882a593Smuzhiyun 	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
242*4882a593Smuzhiyun 	printk(KERN_ERR "Address Low: 0x%08x\n",
243*4882a593Smuzhiyun 	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_LO));
244*4882a593Smuzhiyun 	printk(KERN_ERR "Address High: 0x%08x\n",
245*4882a593Smuzhiyun 	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_ADDR_HI));
246*4882a593Smuzhiyun 	printk(KERN_ERR "Data Low: 0x%08x\n",
247*4882a593Smuzhiyun 	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_LO));
248*4882a593Smuzhiyun 	printk(KERN_ERR "Data High: 0x%08x\n",
249*4882a593Smuzhiyun 	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_DATA_HI));
250*4882a593Smuzhiyun 	printk(KERN_ERR "Parity: 0x%08x\n",
251*4882a593Smuzhiyun 	       readl(pdata->sram_vbase + MV64X60_SRAM_ERR_PARITY));
252*4882a593Smuzhiyun 	writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
mv64x60_sram_isr(int irq,void * dev_id)257*4882a593Smuzhiyun static irqreturn_t mv64x60_sram_isr(int irq, void *dev_id)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev = dev_id;
260*4882a593Smuzhiyun 	struct mv64x60_sram_pdata *pdata = edac_dev->pvt_info;
261*4882a593Smuzhiyun 	u32 cause;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	cause = readl(pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
264*4882a593Smuzhiyun 	if (!cause)
265*4882a593Smuzhiyun 		return IRQ_NONE;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	mv64x60_sram_check(edac_dev);
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return IRQ_HANDLED;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
mv64x60_sram_err_probe(struct platform_device * pdev)272*4882a593Smuzhiyun static int mv64x60_sram_err_probe(struct platform_device *pdev)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev;
275*4882a593Smuzhiyun 	struct mv64x60_sram_pdata *pdata;
276*4882a593Smuzhiyun 	struct resource *r;
277*4882a593Smuzhiyun 	int res = 0;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	if (!devres_open_group(&pdev->dev, mv64x60_sram_err_probe, GFP_KERNEL))
280*4882a593Smuzhiyun 		return -ENOMEM;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
283*4882a593Smuzhiyun 					      "sram", 1, NULL, 0, 0, NULL, 0,
284*4882a593Smuzhiyun 					      edac_dev_idx);
285*4882a593Smuzhiyun 	if (!edac_dev) {
286*4882a593Smuzhiyun 		devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
287*4882a593Smuzhiyun 		return -ENOMEM;
288*4882a593Smuzhiyun 	}
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	pdata = edac_dev->pvt_info;
291*4882a593Smuzhiyun 	pdata->name = "mv64x60_sram_err";
292*4882a593Smuzhiyun 	edac_dev->dev = &pdev->dev;
293*4882a593Smuzhiyun 	platform_set_drvdata(pdev, edac_dev);
294*4882a593Smuzhiyun 	edac_dev->dev_name = dev_name(&pdev->dev);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
297*4882a593Smuzhiyun 	if (!r) {
298*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to get resource for "
299*4882a593Smuzhiyun 		       "SRAM err regs\n", __func__);
300*4882a593Smuzhiyun 		res = -ENOENT;
301*4882a593Smuzhiyun 		goto err;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev,
305*4882a593Smuzhiyun 				     r->start,
306*4882a593Smuzhiyun 				     resource_size(r),
307*4882a593Smuzhiyun 				     pdata->name)) {
308*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Error while request mem region\n",
309*4882a593Smuzhiyun 		       __func__);
310*4882a593Smuzhiyun 		res = -EBUSY;
311*4882a593Smuzhiyun 		goto err;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	pdata->sram_vbase = devm_ioremap(&pdev->dev,
315*4882a593Smuzhiyun 					 r->start,
316*4882a593Smuzhiyun 					 resource_size(r));
317*4882a593Smuzhiyun 	if (!pdata->sram_vbase) {
318*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to setup SRAM err regs\n",
319*4882a593Smuzhiyun 		       __func__);
320*4882a593Smuzhiyun 		res = -ENOMEM;
321*4882a593Smuzhiyun 		goto err;
322*4882a593Smuzhiyun 	}
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* setup SRAM err registers */
325*4882a593Smuzhiyun 	writel(0, pdata->sram_vbase + MV64X60_SRAM_ERR_CAUSE);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	edac_dev->mod_name = EDAC_MOD_STR;
328*4882a593Smuzhiyun 	edac_dev->ctl_name = pdata->name;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_POLL)
331*4882a593Smuzhiyun 		edac_dev->edac_check = mv64x60_sram_check;
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	pdata->edac_idx = edac_dev_idx++;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (edac_device_add_device(edac_dev) > 0) {
336*4882a593Smuzhiyun 		edac_dbg(3, "failed edac_device_add_device()\n");
337*4882a593Smuzhiyun 		goto err;
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_INT) {
341*4882a593Smuzhiyun 		pdata->irq = platform_get_irq(pdev, 0);
342*4882a593Smuzhiyun 		res = devm_request_irq(&pdev->dev,
343*4882a593Smuzhiyun 				       pdata->irq,
344*4882a593Smuzhiyun 				       mv64x60_sram_isr,
345*4882a593Smuzhiyun 				       0,
346*4882a593Smuzhiyun 				       "[EDAC] SRAM err",
347*4882a593Smuzhiyun 				       edac_dev);
348*4882a593Smuzhiyun 		if (res < 0) {
349*4882a593Smuzhiyun 			printk(KERN_ERR
350*4882a593Smuzhiyun 			       "%s: Unable to request irq %d for "
351*4882a593Smuzhiyun 			       "MV64x60 SRAM ERR\n", __func__, pdata->irq);
352*4882a593Smuzhiyun 			res = -ENODEV;
353*4882a593Smuzhiyun 			goto err2;
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for SRAM Err\n",
357*4882a593Smuzhiyun 		       pdata->irq);
358*4882a593Smuzhiyun 	}
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	devres_remove_group(&pdev->dev, mv64x60_sram_err_probe);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	/* get this far and it's successful */
363*4882a593Smuzhiyun 	edac_dbg(3, "success\n");
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	return 0;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun err2:
368*4882a593Smuzhiyun 	edac_device_del_device(&pdev->dev);
369*4882a593Smuzhiyun err:
370*4882a593Smuzhiyun 	devres_release_group(&pdev->dev, mv64x60_sram_err_probe);
371*4882a593Smuzhiyun 	edac_device_free_ctl_info(edac_dev);
372*4882a593Smuzhiyun 	return res;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
mv64x60_sram_err_remove(struct platform_device * pdev)375*4882a593Smuzhiyun static int mv64x60_sram_err_remove(struct platform_device *pdev)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	edac_dbg(0, "\n");
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	edac_device_del_device(&pdev->dev);
382*4882a593Smuzhiyun 	edac_device_free_ctl_info(edac_dev);
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	return 0;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun static struct platform_driver mv64x60_sram_err_driver = {
388*4882a593Smuzhiyun 	.probe = mv64x60_sram_err_probe,
389*4882a593Smuzhiyun 	.remove = mv64x60_sram_err_remove,
390*4882a593Smuzhiyun 	.driver = {
391*4882a593Smuzhiyun 		   .name = "mv64x60_sram_err",
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun };
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /*********************** CPU err device **********************************/
mv64x60_cpu_check(struct edac_device_ctl_info * edac_dev)396*4882a593Smuzhiyun static void mv64x60_cpu_check(struct edac_device_ctl_info *edac_dev)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
399*4882a593Smuzhiyun 	u32 cause;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
402*4882a593Smuzhiyun 	    MV64x60_CPU_CAUSE_MASK;
403*4882a593Smuzhiyun 	if (!cause)
404*4882a593Smuzhiyun 		return;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	printk(KERN_ERR "Error on CPU interface\n");
407*4882a593Smuzhiyun 	printk(KERN_ERR "Cause register: 0x%08x\n", cause);
408*4882a593Smuzhiyun 	printk(KERN_ERR "Address Low: 0x%08x\n",
409*4882a593Smuzhiyun 	       readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_LO));
410*4882a593Smuzhiyun 	printk(KERN_ERR "Address High: 0x%08x\n",
411*4882a593Smuzhiyun 	       readl(pdata->cpu_vbase[0] + MV64x60_CPU_ERR_ADDR_HI));
412*4882a593Smuzhiyun 	printk(KERN_ERR "Data Low: 0x%08x\n",
413*4882a593Smuzhiyun 	       readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_LO));
414*4882a593Smuzhiyun 	printk(KERN_ERR "Data High: 0x%08x\n",
415*4882a593Smuzhiyun 	       readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_DATA_HI));
416*4882a593Smuzhiyun 	printk(KERN_ERR "Parity: 0x%08x\n",
417*4882a593Smuzhiyun 	       readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_PARITY));
418*4882a593Smuzhiyun 	writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
mv64x60_cpu_isr(int irq,void * dev_id)423*4882a593Smuzhiyun static irqreturn_t mv64x60_cpu_isr(int irq, void *dev_id)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev = dev_id;
426*4882a593Smuzhiyun 	struct mv64x60_cpu_pdata *pdata = edac_dev->pvt_info;
427*4882a593Smuzhiyun 	u32 cause;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	cause = readl(pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE) &
430*4882a593Smuzhiyun 	    MV64x60_CPU_CAUSE_MASK;
431*4882a593Smuzhiyun 	if (!cause)
432*4882a593Smuzhiyun 		return IRQ_NONE;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	mv64x60_cpu_check(edac_dev);
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	return IRQ_HANDLED;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
mv64x60_cpu_err_probe(struct platform_device * pdev)439*4882a593Smuzhiyun static int mv64x60_cpu_err_probe(struct platform_device *pdev)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev;
442*4882a593Smuzhiyun 	struct resource *r;
443*4882a593Smuzhiyun 	struct mv64x60_cpu_pdata *pdata;
444*4882a593Smuzhiyun 	int res = 0;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (!devres_open_group(&pdev->dev, mv64x60_cpu_err_probe, GFP_KERNEL))
447*4882a593Smuzhiyun 		return -ENOMEM;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	edac_dev = edac_device_alloc_ctl_info(sizeof(*pdata),
450*4882a593Smuzhiyun 					      "cpu", 1, NULL, 0, 0, NULL, 0,
451*4882a593Smuzhiyun 					      edac_dev_idx);
452*4882a593Smuzhiyun 	if (!edac_dev) {
453*4882a593Smuzhiyun 		devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
454*4882a593Smuzhiyun 		return -ENOMEM;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	pdata = edac_dev->pvt_info;
458*4882a593Smuzhiyun 	pdata->name = "mv64x60_cpu_err";
459*4882a593Smuzhiyun 	edac_dev->dev = &pdev->dev;
460*4882a593Smuzhiyun 	platform_set_drvdata(pdev, edac_dev);
461*4882a593Smuzhiyun 	edac_dev->dev_name = dev_name(&pdev->dev);
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
464*4882a593Smuzhiyun 	if (!r) {
465*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to get resource for "
466*4882a593Smuzhiyun 		       "CPU err regs\n", __func__);
467*4882a593Smuzhiyun 		res = -ENOENT;
468*4882a593Smuzhiyun 		goto err;
469*4882a593Smuzhiyun 	}
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev,
472*4882a593Smuzhiyun 				     r->start,
473*4882a593Smuzhiyun 				     resource_size(r),
474*4882a593Smuzhiyun 				     pdata->name)) {
475*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Error while requesting mem region\n",
476*4882a593Smuzhiyun 		       __func__);
477*4882a593Smuzhiyun 		res = -EBUSY;
478*4882a593Smuzhiyun 		goto err;
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	pdata->cpu_vbase[0] = devm_ioremap(&pdev->dev,
482*4882a593Smuzhiyun 					   r->start,
483*4882a593Smuzhiyun 					   resource_size(r));
484*4882a593Smuzhiyun 	if (!pdata->cpu_vbase[0]) {
485*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
486*4882a593Smuzhiyun 		res = -ENOMEM;
487*4882a593Smuzhiyun 		goto err;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
491*4882a593Smuzhiyun 	if (!r) {
492*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to get resource for "
493*4882a593Smuzhiyun 		       "CPU err regs\n", __func__);
494*4882a593Smuzhiyun 		res = -ENOENT;
495*4882a593Smuzhiyun 		goto err;
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev,
499*4882a593Smuzhiyun 				     r->start,
500*4882a593Smuzhiyun 				     resource_size(r),
501*4882a593Smuzhiyun 				     pdata->name)) {
502*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Error while requesting mem region\n",
503*4882a593Smuzhiyun 		       __func__);
504*4882a593Smuzhiyun 		res = -EBUSY;
505*4882a593Smuzhiyun 		goto err;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	pdata->cpu_vbase[1] = devm_ioremap(&pdev->dev,
509*4882a593Smuzhiyun 					   r->start,
510*4882a593Smuzhiyun 					   resource_size(r));
511*4882a593Smuzhiyun 	if (!pdata->cpu_vbase[1]) {
512*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to setup CPU err regs\n", __func__);
513*4882a593Smuzhiyun 		res = -ENOMEM;
514*4882a593Smuzhiyun 		goto err;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/* setup CPU err registers */
518*4882a593Smuzhiyun 	writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_CAUSE);
519*4882a593Smuzhiyun 	writel(0, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
520*4882a593Smuzhiyun 	writel(0x000000ff, pdata->cpu_vbase[1] + MV64x60_CPU_ERR_MASK);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	edac_dev->mod_name = EDAC_MOD_STR;
523*4882a593Smuzhiyun 	edac_dev->ctl_name = pdata->name;
524*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_POLL)
525*4882a593Smuzhiyun 		edac_dev->edac_check = mv64x60_cpu_check;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	pdata->edac_idx = edac_dev_idx++;
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	if (edac_device_add_device(edac_dev) > 0) {
530*4882a593Smuzhiyun 		edac_dbg(3, "failed edac_device_add_device()\n");
531*4882a593Smuzhiyun 		goto err;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_INT) {
535*4882a593Smuzhiyun 		pdata->irq = platform_get_irq(pdev, 0);
536*4882a593Smuzhiyun 		res = devm_request_irq(&pdev->dev,
537*4882a593Smuzhiyun 				       pdata->irq,
538*4882a593Smuzhiyun 				       mv64x60_cpu_isr,
539*4882a593Smuzhiyun 				       0,
540*4882a593Smuzhiyun 				       "[EDAC] CPU err",
541*4882a593Smuzhiyun 				       edac_dev);
542*4882a593Smuzhiyun 		if (res < 0) {
543*4882a593Smuzhiyun 			printk(KERN_ERR
544*4882a593Smuzhiyun 			       "%s: Unable to request irq %d for MV64x60 "
545*4882a593Smuzhiyun 			       "CPU ERR\n", __func__, pdata->irq);
546*4882a593Smuzhiyun 			res = -ENODEV;
547*4882a593Smuzhiyun 			goto err2;
548*4882a593Smuzhiyun 		}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 		printk(KERN_INFO EDAC_MOD_STR
551*4882a593Smuzhiyun 		       " acquired irq %d for CPU Err\n", pdata->irq);
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	devres_remove_group(&pdev->dev, mv64x60_cpu_err_probe);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	/* get this far and it's successful */
557*4882a593Smuzhiyun 	edac_dbg(3, "success\n");
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	return 0;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun err2:
562*4882a593Smuzhiyun 	edac_device_del_device(&pdev->dev);
563*4882a593Smuzhiyun err:
564*4882a593Smuzhiyun 	devres_release_group(&pdev->dev, mv64x60_cpu_err_probe);
565*4882a593Smuzhiyun 	edac_device_free_ctl_info(edac_dev);
566*4882a593Smuzhiyun 	return res;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
mv64x60_cpu_err_remove(struct platform_device * pdev)569*4882a593Smuzhiyun static int mv64x60_cpu_err_remove(struct platform_device *pdev)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev = platform_get_drvdata(pdev);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	edac_dbg(0, "\n");
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	edac_device_del_device(&pdev->dev);
576*4882a593Smuzhiyun 	edac_device_free_ctl_info(edac_dev);
577*4882a593Smuzhiyun 	return 0;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun static struct platform_driver mv64x60_cpu_err_driver = {
581*4882a593Smuzhiyun 	.probe = mv64x60_cpu_err_probe,
582*4882a593Smuzhiyun 	.remove = mv64x60_cpu_err_remove,
583*4882a593Smuzhiyun 	.driver = {
584*4882a593Smuzhiyun 		   .name = "mv64x60_cpu_err",
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun };
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun /*********************** DRAM err device **********************************/
589*4882a593Smuzhiyun 
mv64x60_mc_check(struct mem_ctl_info * mci)590*4882a593Smuzhiyun static void mv64x60_mc_check(struct mem_ctl_info *mci)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun 	struct mv64x60_mc_pdata *pdata = mci->pvt_info;
593*4882a593Smuzhiyun 	u32 reg;
594*4882a593Smuzhiyun 	u32 err_addr;
595*4882a593Smuzhiyun 	u32 sdram_ecc;
596*4882a593Smuzhiyun 	u32 comp_ecc;
597*4882a593Smuzhiyun 	u32 syndrome;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
600*4882a593Smuzhiyun 	if (!reg)
601*4882a593Smuzhiyun 		return;
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	err_addr = reg & ~0x3;
604*4882a593Smuzhiyun 	sdram_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_RCVD);
605*4882a593Smuzhiyun 	comp_ecc = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CALC);
606*4882a593Smuzhiyun 	syndrome = sdram_ecc ^ comp_ecc;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 	/* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
609*4882a593Smuzhiyun 	if (!(reg & 0x1))
610*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
611*4882a593Smuzhiyun 				     err_addr >> PAGE_SHIFT,
612*4882a593Smuzhiyun 				     err_addr & PAGE_MASK, syndrome,
613*4882a593Smuzhiyun 				     0, 0, -1,
614*4882a593Smuzhiyun 				     mci->ctl_name, "");
615*4882a593Smuzhiyun 	else	/* 2 bit error, UE */
616*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
617*4882a593Smuzhiyun 				     err_addr >> PAGE_SHIFT,
618*4882a593Smuzhiyun 				     err_addr & PAGE_MASK, 0,
619*4882a593Smuzhiyun 				     0, 0, -1,
620*4882a593Smuzhiyun 				     mci->ctl_name, "");
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* clear the error */
623*4882a593Smuzhiyun 	writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun 
mv64x60_mc_isr(int irq,void * dev_id)626*4882a593Smuzhiyun static irqreturn_t mv64x60_mc_isr(int irq, void *dev_id)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct mem_ctl_info *mci = dev_id;
629*4882a593Smuzhiyun 	struct mv64x60_mc_pdata *pdata = mci->pvt_info;
630*4882a593Smuzhiyun 	u32 reg;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	reg = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
633*4882a593Smuzhiyun 	if (!reg)
634*4882a593Smuzhiyun 		return IRQ_NONE;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* writing 0's to the ECC err addr in check function clears irq */
637*4882a593Smuzhiyun 	mv64x60_mc_check(mci);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	return IRQ_HANDLED;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun 
get_total_mem(struct mv64x60_mc_pdata * pdata)642*4882a593Smuzhiyun static void get_total_mem(struct mv64x60_mc_pdata *pdata)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun 	struct device_node *np = NULL;
645*4882a593Smuzhiyun 	const unsigned int *reg;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	np = of_find_node_by_type(NULL, "memory");
648*4882a593Smuzhiyun 	if (!np)
649*4882a593Smuzhiyun 		return;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	reg = of_get_property(np, "reg", NULL);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	pdata->total_mem = reg[1];
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
mv64x60_init_csrows(struct mem_ctl_info * mci,struct mv64x60_mc_pdata * pdata)656*4882a593Smuzhiyun static void mv64x60_init_csrows(struct mem_ctl_info *mci,
657*4882a593Smuzhiyun 				struct mv64x60_mc_pdata *pdata)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	struct csrow_info *csrow;
660*4882a593Smuzhiyun 	struct dimm_info *dimm;
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	u32 devtype;
663*4882a593Smuzhiyun 	u32 ctl;
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	get_total_mem(pdata);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	csrow = mci->csrows[0];
670*4882a593Smuzhiyun 	dimm = csrow->channels[0]->dimm;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
673*4882a593Smuzhiyun 	dimm->grain = 8;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	devtype = (ctl >> 20) & 0x3;
678*4882a593Smuzhiyun 	switch (devtype) {
679*4882a593Smuzhiyun 	case 0x0:
680*4882a593Smuzhiyun 		dimm->dtype = DEV_X32;
681*4882a593Smuzhiyun 		break;
682*4882a593Smuzhiyun 	case 0x2:		/* could be X8 too, but no way to tell */
683*4882a593Smuzhiyun 		dimm->dtype = DEV_X16;
684*4882a593Smuzhiyun 		break;
685*4882a593Smuzhiyun 	case 0x3:
686*4882a593Smuzhiyun 		dimm->dtype = DEV_X4;
687*4882a593Smuzhiyun 		break;
688*4882a593Smuzhiyun 	default:
689*4882a593Smuzhiyun 		dimm->dtype = DEV_UNKNOWN;
690*4882a593Smuzhiyun 		break;
691*4882a593Smuzhiyun 	}
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	dimm->edac_mode = EDAC_SECDED;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
mv64x60_mc_err_probe(struct platform_device * pdev)696*4882a593Smuzhiyun static int mv64x60_mc_err_probe(struct platform_device *pdev)
697*4882a593Smuzhiyun {
698*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
699*4882a593Smuzhiyun 	struct edac_mc_layer layers[2];
700*4882a593Smuzhiyun 	struct mv64x60_mc_pdata *pdata;
701*4882a593Smuzhiyun 	struct resource *r;
702*4882a593Smuzhiyun 	u32 ctl;
703*4882a593Smuzhiyun 	int res = 0;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
706*4882a593Smuzhiyun 		return -ENOMEM;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
709*4882a593Smuzhiyun 	layers[0].size = 1;
710*4882a593Smuzhiyun 	layers[0].is_virt_csrow = true;
711*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
712*4882a593Smuzhiyun 	layers[1].size = 1;
713*4882a593Smuzhiyun 	layers[1].is_virt_csrow = false;
714*4882a593Smuzhiyun 	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
715*4882a593Smuzhiyun 			    sizeof(struct mv64x60_mc_pdata));
716*4882a593Smuzhiyun 	if (!mci) {
717*4882a593Smuzhiyun 		printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
718*4882a593Smuzhiyun 		devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
719*4882a593Smuzhiyun 		return -ENOMEM;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	pdata = mci->pvt_info;
723*4882a593Smuzhiyun 	mci->pdev = &pdev->dev;
724*4882a593Smuzhiyun 	platform_set_drvdata(pdev, mci);
725*4882a593Smuzhiyun 	pdata->name = "mv64x60_mc_err";
726*4882a593Smuzhiyun 	mci->dev_name = dev_name(&pdev->dev);
727*4882a593Smuzhiyun 	pdata->edac_idx = edac_mc_idx++;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
730*4882a593Smuzhiyun 	if (!r) {
731*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to get resource for "
732*4882a593Smuzhiyun 		       "MC err regs\n", __func__);
733*4882a593Smuzhiyun 		res = -ENOENT;
734*4882a593Smuzhiyun 		goto err;
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev,
738*4882a593Smuzhiyun 				     r->start,
739*4882a593Smuzhiyun 				     resource_size(r),
740*4882a593Smuzhiyun 				     pdata->name)) {
741*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Error while requesting mem region\n",
742*4882a593Smuzhiyun 		       __func__);
743*4882a593Smuzhiyun 		res = -EBUSY;
744*4882a593Smuzhiyun 		goto err;
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	pdata->mc_vbase = devm_ioremap(&pdev->dev,
748*4882a593Smuzhiyun 				       r->start,
749*4882a593Smuzhiyun 				       resource_size(r));
750*4882a593Smuzhiyun 	if (!pdata->mc_vbase) {
751*4882a593Smuzhiyun 		printk(KERN_ERR "%s: Unable to setup MC err regs\n", __func__);
752*4882a593Smuzhiyun 		res = -ENOMEM;
753*4882a593Smuzhiyun 		goto err;
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
757*4882a593Smuzhiyun 	if (!(ctl & MV64X60_SDRAM_ECC)) {
758*4882a593Smuzhiyun 		/* Non-ECC RAM? */
759*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: No ECC DIMMs discovered\n", __func__);
760*4882a593Smuzhiyun 		res = -ENODEV;
761*4882a593Smuzhiyun 		goto err;
762*4882a593Smuzhiyun 	}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	edac_dbg(3, "init mci\n");
765*4882a593Smuzhiyun 	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
766*4882a593Smuzhiyun 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
767*4882a593Smuzhiyun 	mci->edac_cap = EDAC_FLAG_SECDED;
768*4882a593Smuzhiyun 	mci->mod_name = EDAC_MOD_STR;
769*4882a593Smuzhiyun 	mci->ctl_name = mv64x60_ctl_name;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_POLL)
772*4882a593Smuzhiyun 		mci->edac_check = mv64x60_mc_check;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	mci->ctl_page_to_phys = NULL;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	mci->scrub_mode = SCRUB_SW_SRC;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	mv64x60_init_csrows(mci, pdata);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	/* setup MC registers */
781*4882a593Smuzhiyun 	writel(0, pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR);
782*4882a593Smuzhiyun 	ctl = readl(pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
783*4882a593Smuzhiyun 	ctl = (ctl & 0xff00ffff) | 0x10000;
784*4882a593Smuzhiyun 	writel(ctl, pdata->mc_vbase + MV64X60_SDRAM_ERR_ECC_CNTL);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	res = edac_mc_add_mc(mci);
787*4882a593Smuzhiyun 	if (res) {
788*4882a593Smuzhiyun 		edac_dbg(3, "failed edac_mc_add_mc()\n");
789*4882a593Smuzhiyun 		goto err;
790*4882a593Smuzhiyun 	}
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_INT) {
793*4882a593Smuzhiyun 		/* acquire interrupt that reports errors */
794*4882a593Smuzhiyun 		pdata->irq = platform_get_irq(pdev, 0);
795*4882a593Smuzhiyun 		res = devm_request_irq(&pdev->dev,
796*4882a593Smuzhiyun 				       pdata->irq,
797*4882a593Smuzhiyun 				       mv64x60_mc_isr,
798*4882a593Smuzhiyun 				       0,
799*4882a593Smuzhiyun 				       "[EDAC] MC err",
800*4882a593Smuzhiyun 				       mci);
801*4882a593Smuzhiyun 		if (res < 0) {
802*4882a593Smuzhiyun 			printk(KERN_ERR "%s: Unable to request irq %d for "
803*4882a593Smuzhiyun 			       "MV64x60 DRAM ERR\n", __func__, pdata->irq);
804*4882a593Smuzhiyun 			res = -ENODEV;
805*4882a593Smuzhiyun 			goto err2;
806*4882a593Smuzhiyun 		}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 		printk(KERN_INFO EDAC_MOD_STR " acquired irq %d for MC Err\n",
809*4882a593Smuzhiyun 		       pdata->irq);
810*4882a593Smuzhiyun 	}
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	/* get this far and it's successful */
813*4882a593Smuzhiyun 	edac_dbg(3, "success\n");
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	return 0;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun err2:
818*4882a593Smuzhiyun 	edac_mc_del_mc(&pdev->dev);
819*4882a593Smuzhiyun err:
820*4882a593Smuzhiyun 	devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
821*4882a593Smuzhiyun 	edac_mc_free(mci);
822*4882a593Smuzhiyun 	return res;
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
mv64x60_mc_err_remove(struct platform_device * pdev)825*4882a593Smuzhiyun static int mv64x60_mc_err_remove(struct platform_device *pdev)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	edac_dbg(0, "\n");
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	edac_mc_del_mc(&pdev->dev);
832*4882a593Smuzhiyun 	edac_mc_free(mci);
833*4882a593Smuzhiyun 	return 0;
834*4882a593Smuzhiyun }
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun static struct platform_driver mv64x60_mc_err_driver = {
837*4882a593Smuzhiyun 	.probe = mv64x60_mc_err_probe,
838*4882a593Smuzhiyun 	.remove = mv64x60_mc_err_remove,
839*4882a593Smuzhiyun 	.driver = {
840*4882a593Smuzhiyun 		   .name = "mv64x60_mc_err",
841*4882a593Smuzhiyun 	}
842*4882a593Smuzhiyun };
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun static struct platform_driver * const drivers[] = {
845*4882a593Smuzhiyun 	&mv64x60_mc_err_driver,
846*4882a593Smuzhiyun 	&mv64x60_cpu_err_driver,
847*4882a593Smuzhiyun 	&mv64x60_sram_err_driver,
848*4882a593Smuzhiyun #ifdef CONFIG_PCI
849*4882a593Smuzhiyun 	&mv64x60_pci_err_driver,
850*4882a593Smuzhiyun #endif
851*4882a593Smuzhiyun };
852*4882a593Smuzhiyun 
mv64x60_edac_init(void)853*4882a593Smuzhiyun static int __init mv64x60_edac_init(void)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	printk(KERN_INFO "Marvell MV64x60 EDAC driver " MV64x60_REVISION "\n");
857*4882a593Smuzhiyun 	printk(KERN_INFO "\t(C) 2006-2007 MontaVista Software\n");
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* make sure error reporting method is sane */
860*4882a593Smuzhiyun 	switch (edac_op_state) {
861*4882a593Smuzhiyun 	case EDAC_OPSTATE_POLL:
862*4882a593Smuzhiyun 	case EDAC_OPSTATE_INT:
863*4882a593Smuzhiyun 		break;
864*4882a593Smuzhiyun 	default:
865*4882a593Smuzhiyun 		edac_op_state = EDAC_OPSTATE_INT;
866*4882a593Smuzhiyun 		break;
867*4882a593Smuzhiyun 	}
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	return platform_register_drivers(drivers, ARRAY_SIZE(drivers));
870*4882a593Smuzhiyun }
871*4882a593Smuzhiyun module_init(mv64x60_edac_init);
872*4882a593Smuzhiyun 
mv64x60_edac_exit(void)873*4882a593Smuzhiyun static void __exit mv64x60_edac_exit(void)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun 	platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun module_exit(mv64x60_edac_exit);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun MODULE_LICENSE("GPL");
880*4882a593Smuzhiyun MODULE_AUTHOR("Montavista Software, Inc.");
881*4882a593Smuzhiyun module_param(edac_op_state, int, 0444);
882*4882a593Smuzhiyun MODULE_PARM_DESC(edac_op_state,
883*4882a593Smuzhiyun 		 "EDAC Error Reporting state: 0=Poll, 2=Interrupt");
884