xref: /OK3568_Linux_fs/kernel/drivers/edac/aspeed_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2018, 2019 Cisco Systems
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/edac.h>
7*4882a593Smuzhiyun #include <linux/module.h>
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/stop_machine.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/regmap.h>
15*4882a593Smuzhiyun #include "edac_module.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #define DRV_NAME "aspeed-edac"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define ASPEED_MCR_PROT        0x00 /* protection key register */
22*4882a593Smuzhiyun #define ASPEED_MCR_CONF        0x04 /* configuration register */
23*4882a593Smuzhiyun #define ASPEED_MCR_INTR_CTRL   0x50 /* interrupt control/status register */
24*4882a593Smuzhiyun #define ASPEED_MCR_ADDR_UNREC  0x58 /* address of first un-recoverable error */
25*4882a593Smuzhiyun #define ASPEED_MCR_ADDR_REC    0x5c /* address of last recoverable error */
26*4882a593Smuzhiyun #define ASPEED_MCR_LAST        ASPEED_MCR_ADDR_REC
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define ASPEED_MCR_PROT_PASSWD	            0xfc600309
30*4882a593Smuzhiyun #define ASPEED_MCR_CONF_DRAM_TYPE               BIT(4)
31*4882a593Smuzhiyun #define ASPEED_MCR_CONF_ECC                     BIT(7)
32*4882a593Smuzhiyun #define ASPEED_MCR_INTR_CTRL_CLEAR             BIT(31)
33*4882a593Smuzhiyun #define ASPEED_MCR_INTR_CTRL_CNT_REC   GENMASK(23, 16)
34*4882a593Smuzhiyun #define ASPEED_MCR_INTR_CTRL_CNT_UNREC GENMASK(15, 12)
35*4882a593Smuzhiyun #define ASPEED_MCR_INTR_CTRL_ENABLE  (BIT(0) | BIT(1))
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static struct regmap *aspeed_regmap;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 
regmap_reg_write(void * context,unsigned int reg,unsigned int val)41*4882a593Smuzhiyun static int regmap_reg_write(void *context, unsigned int reg, unsigned int val)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun 	void __iomem *regs = (void __iomem *)context;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	/* enable write to MCR register set */
46*4882a593Smuzhiyun 	writel(ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	writel(val, regs + reg);
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun 	/* disable write to MCR register set */
51*4882a593Smuzhiyun 	writel(~ASPEED_MCR_PROT_PASSWD, regs + ASPEED_MCR_PROT);
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	return 0;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 
regmap_reg_read(void * context,unsigned int reg,unsigned int * val)57*4882a593Smuzhiyun static int regmap_reg_read(void *context, unsigned int reg, unsigned int *val)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	void __iomem *regs = (void __iomem *)context;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	*val = readl(regs + reg);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	return 0;
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
regmap_is_volatile(struct device * dev,unsigned int reg)66*4882a593Smuzhiyun static bool regmap_is_volatile(struct device *dev, unsigned int reg)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	switch (reg) {
69*4882a593Smuzhiyun 	case ASPEED_MCR_PROT:
70*4882a593Smuzhiyun 	case ASPEED_MCR_INTR_CTRL:
71*4882a593Smuzhiyun 	case ASPEED_MCR_ADDR_UNREC:
72*4882a593Smuzhiyun 	case ASPEED_MCR_ADDR_REC:
73*4882a593Smuzhiyun 		return true;
74*4882a593Smuzhiyun 	default:
75*4882a593Smuzhiyun 		return false;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static const struct regmap_config aspeed_regmap_config = {
81*4882a593Smuzhiyun 	.reg_bits = 32,
82*4882a593Smuzhiyun 	.val_bits = 32,
83*4882a593Smuzhiyun 	.reg_stride = 4,
84*4882a593Smuzhiyun 	.max_register = ASPEED_MCR_LAST,
85*4882a593Smuzhiyun 	.reg_write = regmap_reg_write,
86*4882a593Smuzhiyun 	.reg_read = regmap_reg_read,
87*4882a593Smuzhiyun 	.volatile_reg = regmap_is_volatile,
88*4882a593Smuzhiyun 	.fast_io = true,
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 
count_rec(struct mem_ctl_info * mci,u8 rec_cnt,u32 rec_addr)92*4882a593Smuzhiyun static void count_rec(struct mem_ctl_info *mci, u8 rec_cnt, u32 rec_addr)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct csrow_info *csrow = mci->csrows[0];
95*4882a593Smuzhiyun 	u32 page, offset, syndrome;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	if (!rec_cnt)
98*4882a593Smuzhiyun 		return;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* report first few errors (if there are) */
101*4882a593Smuzhiyun 	/* note: no addresses are recorded */
102*4882a593Smuzhiyun 	if (rec_cnt > 1) {
103*4882a593Smuzhiyun 		/* page, offset and syndrome are not available */
104*4882a593Smuzhiyun 		page = 0;
105*4882a593Smuzhiyun 		offset = 0;
106*4882a593Smuzhiyun 		syndrome = 0;
107*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, rec_cnt-1,
108*4882a593Smuzhiyun 				     page, offset, syndrome, 0, 0, -1,
109*4882a593Smuzhiyun 				     "address(es) not available", "");
110*4882a593Smuzhiyun 	}
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	/* report last error */
113*4882a593Smuzhiyun 	/* note: rec_addr is the last recoverable error addr */
114*4882a593Smuzhiyun 	page = rec_addr >> PAGE_SHIFT;
115*4882a593Smuzhiyun 	offset = rec_addr & ~PAGE_MASK;
116*4882a593Smuzhiyun 	/* syndrome is not available */
117*4882a593Smuzhiyun 	syndrome = 0;
118*4882a593Smuzhiyun 	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
119*4882a593Smuzhiyun 			     csrow->first_page + page, offset, syndrome,
120*4882a593Smuzhiyun 			     0, 0, -1, "", "");
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 
count_un_rec(struct mem_ctl_info * mci,u8 un_rec_cnt,u32 un_rec_addr)124*4882a593Smuzhiyun static void count_un_rec(struct mem_ctl_info *mci, u8 un_rec_cnt,
125*4882a593Smuzhiyun 			 u32 un_rec_addr)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	struct csrow_info *csrow = mci->csrows[0];
128*4882a593Smuzhiyun 	u32 page, offset, syndrome;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!un_rec_cnt)
131*4882a593Smuzhiyun 		return;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* report 1. error */
134*4882a593Smuzhiyun 	/* note: un_rec_addr is the first unrecoverable error addr */
135*4882a593Smuzhiyun 	page = un_rec_addr >> PAGE_SHIFT;
136*4882a593Smuzhiyun 	offset = un_rec_addr & ~PAGE_MASK;
137*4882a593Smuzhiyun 	/* syndrome is not available */
138*4882a593Smuzhiyun 	syndrome = 0;
139*4882a593Smuzhiyun 	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
140*4882a593Smuzhiyun 			     csrow->first_page + page, offset, syndrome,
141*4882a593Smuzhiyun 			     0, 0, -1, "", "");
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	/* report further errors (if there are) */
144*4882a593Smuzhiyun 	/* note: no addresses are recorded */
145*4882a593Smuzhiyun 	if (un_rec_cnt > 1) {
146*4882a593Smuzhiyun 		/* page, offset and syndrome are not available */
147*4882a593Smuzhiyun 		page = 0;
148*4882a593Smuzhiyun 		offset = 0;
149*4882a593Smuzhiyun 		syndrome = 0;
150*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, un_rec_cnt-1,
151*4882a593Smuzhiyun 				     page, offset, syndrome, 0, 0, -1,
152*4882a593Smuzhiyun 				     "address(es) not available", "");
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 
mcr_isr(int irq,void * arg)157*4882a593Smuzhiyun static irqreturn_t mcr_isr(int irq, void *arg)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct mem_ctl_info *mci = arg;
160*4882a593Smuzhiyun 	u32 rec_addr, un_rec_addr;
161*4882a593Smuzhiyun 	u32 reg50, reg5c, reg58;
162*4882a593Smuzhiyun 	u8  rec_cnt, un_rec_cnt;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, &reg50);
165*4882a593Smuzhiyun 	dev_dbg(mci->pdev, "received edac interrupt w/ mcr register 50: 0x%x\n",
166*4882a593Smuzhiyun 		reg50);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	/* collect data about recoverable and unrecoverable errors */
169*4882a593Smuzhiyun 	rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_REC) >> 16;
170*4882a593Smuzhiyun 	un_rec_cnt = (reg50 & ASPEED_MCR_INTR_CTRL_CNT_UNREC) >> 12;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	dev_dbg(mci->pdev, "%d recoverable interrupts and %d unrecoverable interrupts\n",
173*4882a593Smuzhiyun 		rec_cnt, un_rec_cnt);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_UNREC, &reg58);
176*4882a593Smuzhiyun 	un_rec_addr = reg58;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	regmap_read(aspeed_regmap, ASPEED_MCR_ADDR_REC, &reg5c);
179*4882a593Smuzhiyun 	rec_addr = reg5c;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* clear interrupt flags and error counters: */
182*4882a593Smuzhiyun 	regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
183*4882a593Smuzhiyun 			   ASPEED_MCR_INTR_CTRL_CLEAR,
184*4882a593Smuzhiyun 			   ASPEED_MCR_INTR_CTRL_CLEAR);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
187*4882a593Smuzhiyun 			   ASPEED_MCR_INTR_CTRL_CLEAR, 0);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	/* process recoverable and unrecoverable errors */
190*4882a593Smuzhiyun 	count_rec(mci, rec_cnt, rec_addr);
191*4882a593Smuzhiyun 	count_un_rec(mci, un_rec_cnt, un_rec_addr);
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	if (!rec_cnt && !un_rec_cnt)
194*4882a593Smuzhiyun 		dev_dbg(mci->pdev, "received edac interrupt, but did not find any ECC counters\n");
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	regmap_read(aspeed_regmap, ASPEED_MCR_INTR_CTRL, &reg50);
197*4882a593Smuzhiyun 	dev_dbg(mci->pdev, "edac interrupt handled. mcr reg 50 is now: 0x%x\n",
198*4882a593Smuzhiyun 		reg50);
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return IRQ_HANDLED;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 
config_irq(void * ctx,struct platform_device * pdev)204*4882a593Smuzhiyun static int config_irq(void *ctx, struct platform_device *pdev)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	int irq;
207*4882a593Smuzhiyun 	int rc;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* register interrupt handler */
210*4882a593Smuzhiyun 	irq = platform_get_irq(pdev, 0);
211*4882a593Smuzhiyun 	dev_dbg(&pdev->dev, "got irq %d\n", irq);
212*4882a593Smuzhiyun 	if (irq < 0)
213*4882a593Smuzhiyun 		return irq;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	rc = devm_request_irq(&pdev->dev, irq, mcr_isr, IRQF_TRIGGER_HIGH,
216*4882a593Smuzhiyun 			      DRV_NAME, ctx);
217*4882a593Smuzhiyun 	if (rc) {
218*4882a593Smuzhiyun 		dev_err(&pdev->dev, "unable to request irq %d\n", irq);
219*4882a593Smuzhiyun 		return rc;
220*4882a593Smuzhiyun 	}
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* enable interrupts */
223*4882a593Smuzhiyun 	regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
224*4882a593Smuzhiyun 			   ASPEED_MCR_INTR_CTRL_ENABLE,
225*4882a593Smuzhiyun 			   ASPEED_MCR_INTR_CTRL_ENABLE);
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	return 0;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 
init_csrows(struct mem_ctl_info * mci)231*4882a593Smuzhiyun static int init_csrows(struct mem_ctl_info *mci)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun 	struct csrow_info *csrow = mci->csrows[0];
234*4882a593Smuzhiyun 	u32 nr_pages, dram_type;
235*4882a593Smuzhiyun 	struct dimm_info *dimm;
236*4882a593Smuzhiyun 	struct device_node *np;
237*4882a593Smuzhiyun 	struct resource r;
238*4882a593Smuzhiyun 	u32 reg04;
239*4882a593Smuzhiyun 	int rc;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* retrieve info about physical memory from device tree */
242*4882a593Smuzhiyun 	np = of_find_node_by_path("/memory");
243*4882a593Smuzhiyun 	if (!np) {
244*4882a593Smuzhiyun 		dev_err(mci->pdev, "dt: missing /memory node\n");
245*4882a593Smuzhiyun 		return -ENODEV;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	rc = of_address_to_resource(np, 0, &r);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	of_node_put(np);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	if (rc) {
253*4882a593Smuzhiyun 		dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n");
254*4882a593Smuzhiyun 		return rc;
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n",
258*4882a593Smuzhiyun 		r.start, resource_size(&r), PAGE_SHIFT);
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	csrow->first_page = r.start >> PAGE_SHIFT;
261*4882a593Smuzhiyun 	nr_pages = resource_size(&r) >> PAGE_SHIFT;
262*4882a593Smuzhiyun 	csrow->last_page = csrow->first_page + nr_pages - 1;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	regmap_read(aspeed_regmap, ASPEED_MCR_CONF, &reg04);
265*4882a593Smuzhiyun 	dram_type = (reg04 & ASPEED_MCR_CONF_DRAM_TYPE) ? MEM_DDR4 : MEM_DDR3;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	dimm = csrow->channels[0]->dimm;
268*4882a593Smuzhiyun 	dimm->mtype = dram_type;
269*4882a593Smuzhiyun 	dimm->edac_mode = EDAC_SECDED;
270*4882a593Smuzhiyun 	dimm->nr_pages = nr_pages / csrow->nr_channels;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dev_dbg(mci->pdev, "initialized dimm with first_page=0x%lx and nr_pages=0x%x\n",
273*4882a593Smuzhiyun 		csrow->first_page, nr_pages);
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	return 0;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 
aspeed_probe(struct platform_device * pdev)279*4882a593Smuzhiyun static int aspeed_probe(struct platform_device *pdev)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
282*4882a593Smuzhiyun 	struct edac_mc_layer layers[2];
283*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
284*4882a593Smuzhiyun 	void __iomem *regs;
285*4882a593Smuzhiyun 	u32 reg04;
286*4882a593Smuzhiyun 	int rc;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	regs = devm_platform_ioremap_resource(pdev, 0);
289*4882a593Smuzhiyun 	if (IS_ERR(regs))
290*4882a593Smuzhiyun 		return PTR_ERR(regs);
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	aspeed_regmap = devm_regmap_init(dev, NULL, (__force void *)regs,
293*4882a593Smuzhiyun 					 &aspeed_regmap_config);
294*4882a593Smuzhiyun 	if (IS_ERR(aspeed_regmap))
295*4882a593Smuzhiyun 		return PTR_ERR(aspeed_regmap);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* bail out if ECC mode is not configured */
298*4882a593Smuzhiyun 	regmap_read(aspeed_regmap, ASPEED_MCR_CONF, &reg04);
299*4882a593Smuzhiyun 	if (!(reg04 & ASPEED_MCR_CONF_ECC)) {
300*4882a593Smuzhiyun 		dev_err(&pdev->dev, "ECC mode is not configured in u-boot\n");
301*4882a593Smuzhiyun 		return -EPERM;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	edac_op_state = EDAC_OPSTATE_INT;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/* allocate & init EDAC MC data structure */
307*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
308*4882a593Smuzhiyun 	layers[0].size = 1;
309*4882a593Smuzhiyun 	layers[0].is_virt_csrow = true;
310*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
311*4882a593Smuzhiyun 	layers[1].size = 1;
312*4882a593Smuzhiyun 	layers[1].is_virt_csrow = false;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
315*4882a593Smuzhiyun 	if (!mci)
316*4882a593Smuzhiyun 		return -ENOMEM;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	mci->pdev = &pdev->dev;
319*4882a593Smuzhiyun 	mci->mtype_cap = MEM_FLAG_DDR3 | MEM_FLAG_DDR4;
320*4882a593Smuzhiyun 	mci->edac_ctl_cap = EDAC_FLAG_SECDED;
321*4882a593Smuzhiyun 	mci->edac_cap = EDAC_FLAG_SECDED;
322*4882a593Smuzhiyun 	mci->scrub_cap = SCRUB_FLAG_HW_SRC;
323*4882a593Smuzhiyun 	mci->scrub_mode = SCRUB_HW_SRC;
324*4882a593Smuzhiyun 	mci->mod_name = DRV_NAME;
325*4882a593Smuzhiyun 	mci->ctl_name = "MIC";
326*4882a593Smuzhiyun 	mci->dev_name = dev_name(&pdev->dev);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	rc = init_csrows(mci);
329*4882a593Smuzhiyun 	if (rc) {
330*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to init csrows\n");
331*4882a593Smuzhiyun 		goto probe_exit02;
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	platform_set_drvdata(pdev, mci);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* register with edac core */
337*4882a593Smuzhiyun 	rc = edac_mc_add_mc(mci);
338*4882a593Smuzhiyun 	if (rc) {
339*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to register with EDAC core\n");
340*4882a593Smuzhiyun 		goto probe_exit02;
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* register interrupt handler and enable interrupts */
344*4882a593Smuzhiyun 	rc = config_irq(mci, pdev);
345*4882a593Smuzhiyun 	if (rc) {
346*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed setting up irq\n");
347*4882a593Smuzhiyun 		goto probe_exit01;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	return 0;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun probe_exit01:
353*4882a593Smuzhiyun 	edac_mc_del_mc(&pdev->dev);
354*4882a593Smuzhiyun probe_exit02:
355*4882a593Smuzhiyun 	edac_mc_free(mci);
356*4882a593Smuzhiyun 	return rc;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 
aspeed_remove(struct platform_device * pdev)360*4882a593Smuzhiyun static int aspeed_remove(struct platform_device *pdev)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/* disable interrupts */
365*4882a593Smuzhiyun 	regmap_update_bits(aspeed_regmap, ASPEED_MCR_INTR_CTRL,
366*4882a593Smuzhiyun 			   ASPEED_MCR_INTR_CTRL_ENABLE, 0);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* free resources */
369*4882a593Smuzhiyun 	mci = edac_mc_del_mc(&pdev->dev);
370*4882a593Smuzhiyun 	if (mci)
371*4882a593Smuzhiyun 		edac_mc_free(mci);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	return 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun static const struct of_device_id aspeed_of_match[] = {
378*4882a593Smuzhiyun 	{ .compatible = "aspeed,ast2500-sdram-edac" },
379*4882a593Smuzhiyun 	{},
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun static struct platform_driver aspeed_driver = {
384*4882a593Smuzhiyun 	.driver		= {
385*4882a593Smuzhiyun 		.name	= DRV_NAME,
386*4882a593Smuzhiyun 		.of_match_table = aspeed_of_match
387*4882a593Smuzhiyun 	},
388*4882a593Smuzhiyun 	.probe		= aspeed_probe,
389*4882a593Smuzhiyun 	.remove		= aspeed_remove
390*4882a593Smuzhiyun };
391*4882a593Smuzhiyun module_platform_driver(aspeed_driver);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun MODULE_LICENSE("GPL");
394*4882a593Smuzhiyun MODULE_AUTHOR("Stefan Schaeckeler <sschaeck@cisco.com>");
395*4882a593Smuzhiyun MODULE_DESCRIPTION("Aspeed AST2500 EDAC driver");
396*4882a593Smuzhiyun MODULE_VERSION("1.0");
397