xref: /OK3568_Linux_fs/kernel/drivers/edac/cpc925_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * cpc925_edac.c, EDAC driver for IBM CPC925 Bridge and Memory Controller.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2008 Wind River Systems, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Authors:	Cao Qingtao <qingtao.cao@windriver.com>
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/edac.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/gfp.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "edac_module.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define CPC925_EDAC_REVISION	" Ver: 1.0.0"
21*4882a593Smuzhiyun #define CPC925_EDAC_MOD_STR	"cpc925_edac"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define cpc925_printk(level, fmt, arg...) \
24*4882a593Smuzhiyun 	edac_printk(level, "CPC925", fmt, ##arg)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define cpc925_mc_printk(mci, level, fmt, arg...) \
27*4882a593Smuzhiyun 	edac_mc_chipset_printk(mci, level, "CPC925", fmt, ##arg)
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * CPC925 registers are of 32 bits with bit0 defined at the
31*4882a593Smuzhiyun  * most significant bit and bit31 at that of least significant.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #define CPC925_BITS_PER_REG	32
34*4882a593Smuzhiyun #define CPC925_BIT(nr)		(1UL << (CPC925_BITS_PER_REG - 1 - nr))
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun  * EDAC device names for the error detections of
38*4882a593Smuzhiyun  * CPU Interface and Hypertransport Link.
39*4882a593Smuzhiyun  */
40*4882a593Smuzhiyun #define CPC925_CPU_ERR_DEV	"cpu"
41*4882a593Smuzhiyun #define CPC925_HT_LINK_DEV	"htlink"
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /* Suppose DDR Refresh cycle is 15.6 microsecond */
44*4882a593Smuzhiyun #define CPC925_REF_FREQ		0xFA69
45*4882a593Smuzhiyun #define CPC925_SCRUB_BLOCK_SIZE 64	/* bytes */
46*4882a593Smuzhiyun #define CPC925_NR_CSROWS	8
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * All registers and bits definitions are taken from
50*4882a593Smuzhiyun  * "CPC925 Bridge and Memory Controller User Manual, SA14-2761-02".
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * CPU and Memory Controller Registers
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun /************************************************************
57*4882a593Smuzhiyun  *	Processor Interface Exception Mask Register (APIMASK)
58*4882a593Smuzhiyun  ************************************************************/
59*4882a593Smuzhiyun #define REG_APIMASK_OFFSET	0x30070
60*4882a593Smuzhiyun enum apimask_bits {
61*4882a593Smuzhiyun 	APIMASK_DART	= CPC925_BIT(0), /* DART Exception */
62*4882a593Smuzhiyun 	APIMASK_ADI0	= CPC925_BIT(1), /* Handshake Error on PI0_ADI */
63*4882a593Smuzhiyun 	APIMASK_ADI1	= CPC925_BIT(2), /* Handshake Error on PI1_ADI */
64*4882a593Smuzhiyun 	APIMASK_STAT	= CPC925_BIT(3), /* Status Exception */
65*4882a593Smuzhiyun 	APIMASK_DERR	= CPC925_BIT(4), /* Data Error Exception */
66*4882a593Smuzhiyun 	APIMASK_ADRS0	= CPC925_BIT(5), /* Addressing Exception on PI0 */
67*4882a593Smuzhiyun 	APIMASK_ADRS1	= CPC925_BIT(6), /* Addressing Exception on PI1 */
68*4882a593Smuzhiyun 					 /* BIT(7) Reserved */
69*4882a593Smuzhiyun 	APIMASK_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
70*4882a593Smuzhiyun 	APIMASK_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
71*4882a593Smuzhiyun 	APIMASK_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
72*4882a593Smuzhiyun 	APIMASK_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	CPU_MASK_ENABLE = (APIMASK_DART | APIMASK_ADI0 | APIMASK_ADI1 |
75*4882a593Smuzhiyun 			   APIMASK_STAT | APIMASK_DERR | APIMASK_ADRS0 |
76*4882a593Smuzhiyun 			   APIMASK_ADRS1),
77*4882a593Smuzhiyun 	ECC_MASK_ENABLE = (APIMASK_ECC_UE_H | APIMASK_ECC_CE_H |
78*4882a593Smuzhiyun 			   APIMASK_ECC_UE_L | APIMASK_ECC_CE_L),
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun #define APIMASK_ADI(n)		CPC925_BIT(((n)+1))
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /************************************************************
83*4882a593Smuzhiyun  *	Processor Interface Exception Register (APIEXCP)
84*4882a593Smuzhiyun  ************************************************************/
85*4882a593Smuzhiyun #define REG_APIEXCP_OFFSET	0x30060
86*4882a593Smuzhiyun enum apiexcp_bits {
87*4882a593Smuzhiyun 	APIEXCP_DART	= CPC925_BIT(0), /* DART Exception */
88*4882a593Smuzhiyun 	APIEXCP_ADI0	= CPC925_BIT(1), /* Handshake Error on PI0_ADI */
89*4882a593Smuzhiyun 	APIEXCP_ADI1	= CPC925_BIT(2), /* Handshake Error on PI1_ADI */
90*4882a593Smuzhiyun 	APIEXCP_STAT	= CPC925_BIT(3), /* Status Exception */
91*4882a593Smuzhiyun 	APIEXCP_DERR	= CPC925_BIT(4), /* Data Error Exception */
92*4882a593Smuzhiyun 	APIEXCP_ADRS0	= CPC925_BIT(5), /* Addressing Exception on PI0 */
93*4882a593Smuzhiyun 	APIEXCP_ADRS1	= CPC925_BIT(6), /* Addressing Exception on PI1 */
94*4882a593Smuzhiyun 					 /* BIT(7) Reserved */
95*4882a593Smuzhiyun 	APIEXCP_ECC_UE_H = CPC925_BIT(8), /* UECC upper */
96*4882a593Smuzhiyun 	APIEXCP_ECC_CE_H = CPC925_BIT(9), /* CECC upper */
97*4882a593Smuzhiyun 	APIEXCP_ECC_UE_L = CPC925_BIT(10), /* UECC lower */
98*4882a593Smuzhiyun 	APIEXCP_ECC_CE_L = CPC925_BIT(11), /* CECC lower */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	CPU_EXCP_DETECTED = (APIEXCP_DART | APIEXCP_ADI0 | APIEXCP_ADI1 |
101*4882a593Smuzhiyun 			     APIEXCP_STAT | APIEXCP_DERR | APIEXCP_ADRS0 |
102*4882a593Smuzhiyun 			     APIEXCP_ADRS1),
103*4882a593Smuzhiyun 	UECC_EXCP_DETECTED = (APIEXCP_ECC_UE_H | APIEXCP_ECC_UE_L),
104*4882a593Smuzhiyun 	CECC_EXCP_DETECTED = (APIEXCP_ECC_CE_H | APIEXCP_ECC_CE_L),
105*4882a593Smuzhiyun 	ECC_EXCP_DETECTED = (UECC_EXCP_DETECTED | CECC_EXCP_DETECTED),
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun /************************************************************
109*4882a593Smuzhiyun  *	Memory Bus Configuration Register (MBCR)
110*4882a593Smuzhiyun ************************************************************/
111*4882a593Smuzhiyun #define REG_MBCR_OFFSET		0x2190
112*4882a593Smuzhiyun #define MBCR_64BITCFG_SHIFT	23
113*4882a593Smuzhiyun #define MBCR_64BITCFG_MASK	(1UL << MBCR_64BITCFG_SHIFT)
114*4882a593Smuzhiyun #define MBCR_64BITBUS_SHIFT	22
115*4882a593Smuzhiyun #define MBCR_64BITBUS_MASK	(1UL << MBCR_64BITBUS_SHIFT)
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /************************************************************
118*4882a593Smuzhiyun  *	Memory Bank Mode Register (MBMR)
119*4882a593Smuzhiyun ************************************************************/
120*4882a593Smuzhiyun #define REG_MBMR_OFFSET		0x21C0
121*4882a593Smuzhiyun #define MBMR_MODE_MAX_VALUE	0xF
122*4882a593Smuzhiyun #define MBMR_MODE_SHIFT		25
123*4882a593Smuzhiyun #define MBMR_MODE_MASK		(MBMR_MODE_MAX_VALUE << MBMR_MODE_SHIFT)
124*4882a593Smuzhiyun #define MBMR_BBA_SHIFT		24
125*4882a593Smuzhiyun #define MBMR_BBA_MASK		(1UL << MBMR_BBA_SHIFT)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /************************************************************
128*4882a593Smuzhiyun  *	Memory Bank Boundary Address Register (MBBAR)
129*4882a593Smuzhiyun  ************************************************************/
130*4882a593Smuzhiyun #define REG_MBBAR_OFFSET	0x21D0
131*4882a593Smuzhiyun #define MBBAR_BBA_MAX_VALUE	0xFF
132*4882a593Smuzhiyun #define MBBAR_BBA_SHIFT		24
133*4882a593Smuzhiyun #define MBBAR_BBA_MASK		(MBBAR_BBA_MAX_VALUE << MBBAR_BBA_SHIFT)
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun /************************************************************
136*4882a593Smuzhiyun  *	Memory Scrub Control Register (MSCR)
137*4882a593Smuzhiyun  ************************************************************/
138*4882a593Smuzhiyun #define REG_MSCR_OFFSET		0x2400
139*4882a593Smuzhiyun #define MSCR_SCRUB_MOD_MASK	0xC0000000 /* scrub_mod - bit0:1*/
140*4882a593Smuzhiyun #define MSCR_BACKGR_SCRUB	0x40000000 /* 01 */
141*4882a593Smuzhiyun #define MSCR_SI_SHIFT		16 	/* si - bit8:15*/
142*4882a593Smuzhiyun #define MSCR_SI_MAX_VALUE	0xFF
143*4882a593Smuzhiyun #define MSCR_SI_MASK		(MSCR_SI_MAX_VALUE << MSCR_SI_SHIFT)
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /************************************************************
146*4882a593Smuzhiyun  *	Memory Scrub Range Start Register (MSRSR)
147*4882a593Smuzhiyun  ************************************************************/
148*4882a593Smuzhiyun #define REG_MSRSR_OFFSET	0x2410
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /************************************************************
151*4882a593Smuzhiyun  *	Memory Scrub Range End Register (MSRER)
152*4882a593Smuzhiyun  ************************************************************/
153*4882a593Smuzhiyun #define REG_MSRER_OFFSET	0x2420
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /************************************************************
156*4882a593Smuzhiyun  *	Memory Scrub Pattern Register (MSPR)
157*4882a593Smuzhiyun  ************************************************************/
158*4882a593Smuzhiyun #define REG_MSPR_OFFSET		0x2430
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /************************************************************
161*4882a593Smuzhiyun  *	Memory Check Control Register (MCCR)
162*4882a593Smuzhiyun  ************************************************************/
163*4882a593Smuzhiyun #define REG_MCCR_OFFSET		0x2440
164*4882a593Smuzhiyun enum mccr_bits {
165*4882a593Smuzhiyun 	MCCR_ECC_EN	= CPC925_BIT(0), /* ECC high and low check */
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /************************************************************
169*4882a593Smuzhiyun  *	Memory Check Range End Register (MCRER)
170*4882a593Smuzhiyun  ************************************************************/
171*4882a593Smuzhiyun #define REG_MCRER_OFFSET	0x2450
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /************************************************************
174*4882a593Smuzhiyun  *	Memory Error Address Register (MEAR)
175*4882a593Smuzhiyun  ************************************************************/
176*4882a593Smuzhiyun #define REG_MEAR_OFFSET		0x2460
177*4882a593Smuzhiyun #define MEAR_BCNT_MAX_VALUE	0x3
178*4882a593Smuzhiyun #define MEAR_BCNT_SHIFT		30
179*4882a593Smuzhiyun #define MEAR_BCNT_MASK		(MEAR_BCNT_MAX_VALUE << MEAR_BCNT_SHIFT)
180*4882a593Smuzhiyun #define MEAR_RANK_MAX_VALUE	0x7
181*4882a593Smuzhiyun #define MEAR_RANK_SHIFT		27
182*4882a593Smuzhiyun #define MEAR_RANK_MASK		(MEAR_RANK_MAX_VALUE << MEAR_RANK_SHIFT)
183*4882a593Smuzhiyun #define MEAR_COL_MAX_VALUE	0x7FF
184*4882a593Smuzhiyun #define MEAR_COL_SHIFT		16
185*4882a593Smuzhiyun #define MEAR_COL_MASK		(MEAR_COL_MAX_VALUE << MEAR_COL_SHIFT)
186*4882a593Smuzhiyun #define MEAR_BANK_MAX_VALUE	0x3
187*4882a593Smuzhiyun #define MEAR_BANK_SHIFT		14
188*4882a593Smuzhiyun #define MEAR_BANK_MASK		(MEAR_BANK_MAX_VALUE << MEAR_BANK_SHIFT)
189*4882a593Smuzhiyun #define MEAR_ROW_MASK		0x00003FFF
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /************************************************************
192*4882a593Smuzhiyun  *	Memory Error Syndrome Register (MESR)
193*4882a593Smuzhiyun  ************************************************************/
194*4882a593Smuzhiyun #define REG_MESR_OFFSET		0x2470
195*4882a593Smuzhiyun #define MESR_ECC_SYN_H_MASK	0xFF00
196*4882a593Smuzhiyun #define MESR_ECC_SYN_L_MASK	0x00FF
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /************************************************************
199*4882a593Smuzhiyun  *	Memory Mode Control Register (MMCR)
200*4882a593Smuzhiyun  ************************************************************/
201*4882a593Smuzhiyun #define REG_MMCR_OFFSET		0x2500
202*4882a593Smuzhiyun enum mmcr_bits {
203*4882a593Smuzhiyun 	MMCR_REG_DIMM_MODE = CPC925_BIT(3),
204*4882a593Smuzhiyun };
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun /*
207*4882a593Smuzhiyun  * HyperTransport Link Registers
208*4882a593Smuzhiyun  */
209*4882a593Smuzhiyun /************************************************************
210*4882a593Smuzhiyun  *  Error Handling/Enumeration Scratch Pad Register (ERRCTRL)
211*4882a593Smuzhiyun  ************************************************************/
212*4882a593Smuzhiyun #define REG_ERRCTRL_OFFSET	0x70140
213*4882a593Smuzhiyun enum errctrl_bits {			 /* nonfatal interrupts for */
214*4882a593Smuzhiyun 	ERRCTRL_SERR_NF	= CPC925_BIT(0), /* system error */
215*4882a593Smuzhiyun 	ERRCTRL_CRC_NF	= CPC925_BIT(1), /* CRC error */
216*4882a593Smuzhiyun 	ERRCTRL_RSP_NF	= CPC925_BIT(2), /* Response error */
217*4882a593Smuzhiyun 	ERRCTRL_EOC_NF	= CPC925_BIT(3), /* End-Of-Chain error */
218*4882a593Smuzhiyun 	ERRCTRL_OVF_NF	= CPC925_BIT(4), /* Overflow error */
219*4882a593Smuzhiyun 	ERRCTRL_PROT_NF	= CPC925_BIT(5), /* Protocol error */
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	ERRCTRL_RSP_ERR	= CPC925_BIT(6), /* Response error received */
222*4882a593Smuzhiyun 	ERRCTRL_CHN_FAL = CPC925_BIT(7), /* Sync flooding detected */
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	HT_ERRCTRL_ENABLE = (ERRCTRL_SERR_NF | ERRCTRL_CRC_NF |
225*4882a593Smuzhiyun 			     ERRCTRL_RSP_NF | ERRCTRL_EOC_NF |
226*4882a593Smuzhiyun 			     ERRCTRL_OVF_NF | ERRCTRL_PROT_NF),
227*4882a593Smuzhiyun 	HT_ERRCTRL_DETECTED = (ERRCTRL_RSP_ERR | ERRCTRL_CHN_FAL),
228*4882a593Smuzhiyun };
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /************************************************************
231*4882a593Smuzhiyun  *  Link Configuration and Link Control Register (LINKCTRL)
232*4882a593Smuzhiyun  ************************************************************/
233*4882a593Smuzhiyun #define REG_LINKCTRL_OFFSET	0x70110
234*4882a593Smuzhiyun enum linkctrl_bits {
235*4882a593Smuzhiyun 	LINKCTRL_CRC_ERR	= (CPC925_BIT(22) | CPC925_BIT(23)),
236*4882a593Smuzhiyun 	LINKCTRL_LINK_FAIL	= CPC925_BIT(27),
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	HT_LINKCTRL_DETECTED	= (LINKCTRL_CRC_ERR | LINKCTRL_LINK_FAIL),
239*4882a593Smuzhiyun };
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun /************************************************************
242*4882a593Smuzhiyun  *  Link FreqCap/Error/Freq/Revision ID Register (LINKERR)
243*4882a593Smuzhiyun  ************************************************************/
244*4882a593Smuzhiyun #define REG_LINKERR_OFFSET	0x70120
245*4882a593Smuzhiyun enum linkerr_bits {
246*4882a593Smuzhiyun 	LINKERR_EOC_ERR		= CPC925_BIT(17), /* End-Of-Chain error */
247*4882a593Smuzhiyun 	LINKERR_OVF_ERR		= CPC925_BIT(18), /* Receive Buffer Overflow */
248*4882a593Smuzhiyun 	LINKERR_PROT_ERR	= CPC925_BIT(19), /* Protocol error */
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	HT_LINKERR_DETECTED	= (LINKERR_EOC_ERR | LINKERR_OVF_ERR |
251*4882a593Smuzhiyun 				   LINKERR_PROT_ERR),
252*4882a593Smuzhiyun };
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun /************************************************************
255*4882a593Smuzhiyun  *	Bridge Control Register (BRGCTRL)
256*4882a593Smuzhiyun  ************************************************************/
257*4882a593Smuzhiyun #define REG_BRGCTRL_OFFSET	0x70300
258*4882a593Smuzhiyun enum brgctrl_bits {
259*4882a593Smuzhiyun 	BRGCTRL_DETSERR = CPC925_BIT(0), /* SERR on Secondary Bus */
260*4882a593Smuzhiyun 	BRGCTRL_SECBUSRESET = CPC925_BIT(9), /* Secondary Bus Reset */
261*4882a593Smuzhiyun };
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun /* Private structure for edac memory controller */
264*4882a593Smuzhiyun struct cpc925_mc_pdata {
265*4882a593Smuzhiyun 	void __iomem *vbase;
266*4882a593Smuzhiyun 	unsigned long total_mem;
267*4882a593Smuzhiyun 	const char *name;
268*4882a593Smuzhiyun 	int edac_idx;
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /* Private structure for common edac device */
272*4882a593Smuzhiyun struct cpc925_dev_info {
273*4882a593Smuzhiyun 	void __iomem *vbase;
274*4882a593Smuzhiyun 	struct platform_device *pdev;
275*4882a593Smuzhiyun 	char *ctl_name;
276*4882a593Smuzhiyun 	int edac_idx;
277*4882a593Smuzhiyun 	struct edac_device_ctl_info *edac_dev;
278*4882a593Smuzhiyun 	void (*init)(struct cpc925_dev_info *dev_info);
279*4882a593Smuzhiyun 	void (*exit)(struct cpc925_dev_info *dev_info);
280*4882a593Smuzhiyun 	void (*check)(struct edac_device_ctl_info *edac_dev);
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /* Get total memory size from Open Firmware DTB */
get_total_mem(struct cpc925_mc_pdata * pdata)284*4882a593Smuzhiyun static void get_total_mem(struct cpc925_mc_pdata *pdata)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	struct device_node *np = NULL;
287*4882a593Smuzhiyun 	const unsigned int *reg, *reg_end;
288*4882a593Smuzhiyun 	int len, sw, aw;
289*4882a593Smuzhiyun 	unsigned long start, size;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	np = of_find_node_by_type(NULL, "memory");
292*4882a593Smuzhiyun 	if (!np)
293*4882a593Smuzhiyun 		return;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	aw = of_n_addr_cells(np);
296*4882a593Smuzhiyun 	sw = of_n_size_cells(np);
297*4882a593Smuzhiyun 	reg = (const unsigned int *)of_get_property(np, "reg", &len);
298*4882a593Smuzhiyun 	reg_end = reg + len/4;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	pdata->total_mem = 0;
301*4882a593Smuzhiyun 	do {
302*4882a593Smuzhiyun 		start = of_read_number(reg, aw);
303*4882a593Smuzhiyun 		reg += aw;
304*4882a593Smuzhiyun 		size = of_read_number(reg, sw);
305*4882a593Smuzhiyun 		reg += sw;
306*4882a593Smuzhiyun 		edac_dbg(1, "start 0x%lx, size 0x%lx\n", start, size);
307*4882a593Smuzhiyun 		pdata->total_mem += size;
308*4882a593Smuzhiyun 	} while (reg < reg_end);
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	of_node_put(np);
311*4882a593Smuzhiyun 	edac_dbg(0, "total_mem 0x%lx\n", pdata->total_mem);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun 
cpc925_init_csrows(struct mem_ctl_info * mci)314*4882a593Smuzhiyun static void cpc925_init_csrows(struct mem_ctl_info *mci)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct cpc925_mc_pdata *pdata = mci->pvt_info;
317*4882a593Smuzhiyun 	struct csrow_info *csrow;
318*4882a593Smuzhiyun 	struct dimm_info *dimm;
319*4882a593Smuzhiyun 	enum dev_type dtype;
320*4882a593Smuzhiyun 	int index, j;
321*4882a593Smuzhiyun 	u32 mbmr, mbbar, bba, grain;
322*4882a593Smuzhiyun 	unsigned long row_size, nr_pages, last_nr_pages = 0;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	get_total_mem(pdata);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	for (index = 0; index < mci->nr_csrows; index++) {
327*4882a593Smuzhiyun 		mbmr = __raw_readl(pdata->vbase + REG_MBMR_OFFSET +
328*4882a593Smuzhiyun 				   0x20 * index);
329*4882a593Smuzhiyun 		mbbar = __raw_readl(pdata->vbase + REG_MBBAR_OFFSET +
330*4882a593Smuzhiyun 				   0x20 + index);
331*4882a593Smuzhiyun 		bba = (((mbmr & MBMR_BBA_MASK) >> MBMR_BBA_SHIFT) << 8) |
332*4882a593Smuzhiyun 		       ((mbbar & MBBAR_BBA_MASK) >> MBBAR_BBA_SHIFT);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 		if (bba == 0)
335*4882a593Smuzhiyun 			continue; /* not populated */
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		csrow = mci->csrows[index];
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		row_size = bba * (1UL << 28);	/* 256M */
340*4882a593Smuzhiyun 		csrow->first_page = last_nr_pages;
341*4882a593Smuzhiyun 		nr_pages = row_size >> PAGE_SHIFT;
342*4882a593Smuzhiyun 		csrow->last_page = csrow->first_page + nr_pages - 1;
343*4882a593Smuzhiyun 		last_nr_pages = csrow->last_page + 1;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		switch (csrow->nr_channels) {
346*4882a593Smuzhiyun 		case 1: /* Single channel */
347*4882a593Smuzhiyun 			grain = 32; /* four-beat burst of 32 bytes */
348*4882a593Smuzhiyun 			break;
349*4882a593Smuzhiyun 		case 2: /* Dual channel */
350*4882a593Smuzhiyun 		default:
351*4882a593Smuzhiyun 			grain = 64; /* four-beat burst of 64 bytes */
352*4882a593Smuzhiyun 			break;
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 		switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
355*4882a593Smuzhiyun 		case 6: /* 0110, no way to differentiate X8 VS X16 */
356*4882a593Smuzhiyun 		case 5:	/* 0101 */
357*4882a593Smuzhiyun 		case 8: /* 1000 */
358*4882a593Smuzhiyun 			dtype = DEV_X16;
359*4882a593Smuzhiyun 			break;
360*4882a593Smuzhiyun 		case 7: /* 0111 */
361*4882a593Smuzhiyun 		case 9: /* 1001 */
362*4882a593Smuzhiyun 			dtype = DEV_X8;
363*4882a593Smuzhiyun 			break;
364*4882a593Smuzhiyun 		default:
365*4882a593Smuzhiyun 			dtype = DEV_UNKNOWN;
366*4882a593Smuzhiyun 		break;
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 		for (j = 0; j < csrow->nr_channels; j++) {
369*4882a593Smuzhiyun 			dimm = csrow->channels[j]->dimm;
370*4882a593Smuzhiyun 			dimm->nr_pages = nr_pages / csrow->nr_channels;
371*4882a593Smuzhiyun 			dimm->mtype = MEM_RDDR;
372*4882a593Smuzhiyun 			dimm->edac_mode = EDAC_SECDED;
373*4882a593Smuzhiyun 			dimm->grain = grain;
374*4882a593Smuzhiyun 			dimm->dtype = dtype;
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun /* Enable memory controller ECC detection */
cpc925_mc_init(struct mem_ctl_info * mci)380*4882a593Smuzhiyun static void cpc925_mc_init(struct mem_ctl_info *mci)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct cpc925_mc_pdata *pdata = mci->pvt_info;
383*4882a593Smuzhiyun 	u32 apimask;
384*4882a593Smuzhiyun 	u32 mccr;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* Enable various ECC error exceptions */
387*4882a593Smuzhiyun 	apimask = __raw_readl(pdata->vbase + REG_APIMASK_OFFSET);
388*4882a593Smuzhiyun 	if ((apimask & ECC_MASK_ENABLE) == 0) {
389*4882a593Smuzhiyun 		apimask |= ECC_MASK_ENABLE;
390*4882a593Smuzhiyun 		__raw_writel(apimask, pdata->vbase + REG_APIMASK_OFFSET);
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/* Enable ECC detection */
394*4882a593Smuzhiyun 	mccr = __raw_readl(pdata->vbase + REG_MCCR_OFFSET);
395*4882a593Smuzhiyun 	if ((mccr & MCCR_ECC_EN) == 0) {
396*4882a593Smuzhiyun 		mccr |= MCCR_ECC_EN;
397*4882a593Smuzhiyun 		__raw_writel(mccr, pdata->vbase + REG_MCCR_OFFSET);
398*4882a593Smuzhiyun 	}
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun /* Disable memory controller ECC detection */
cpc925_mc_exit(struct mem_ctl_info * mci)402*4882a593Smuzhiyun static void cpc925_mc_exit(struct mem_ctl_info *mci)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	/*
405*4882a593Smuzhiyun 	 * WARNING:
406*4882a593Smuzhiyun 	 * We are supposed to clear the ECC error detection bits,
407*4882a593Smuzhiyun 	 * and it will be no problem to do so. However, once they
408*4882a593Smuzhiyun 	 * are cleared here if we want to re-install CPC925 EDAC
409*4882a593Smuzhiyun 	 * module later, setting them up in cpc925_mc_init() will
410*4882a593Smuzhiyun 	 * trigger machine check exception.
411*4882a593Smuzhiyun 	 * Also, it's ok to leave ECC error detection bits enabled,
412*4882a593Smuzhiyun 	 * since they are reset to 1 by default or by boot loader.
413*4882a593Smuzhiyun 	 */
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun /*
419*4882a593Smuzhiyun  * Revert DDR column/row/bank addresses into page frame number and
420*4882a593Smuzhiyun  * offset in page.
421*4882a593Smuzhiyun  *
422*4882a593Smuzhiyun  * Suppose memory mode is 0x0111(128-bit mode, identical DIMM pairs),
423*4882a593Smuzhiyun  * physical address(PA) bits to column address(CA) bits mappings are:
424*4882a593Smuzhiyun  * CA	0   1   2   3   4   5   6   7   8   9   10
425*4882a593Smuzhiyun  * PA	59  58  57  56  55  54  53  52  51  50  49
426*4882a593Smuzhiyun  *
427*4882a593Smuzhiyun  * physical address(PA) bits to bank address(BA) bits mappings are:
428*4882a593Smuzhiyun  * BA	0   1
429*4882a593Smuzhiyun  * PA	43  44
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * physical address(PA) bits to row address(RA) bits mappings are:
432*4882a593Smuzhiyun  * RA	0   1   2   3   4   5   6   7   8   9   10   11   12
433*4882a593Smuzhiyun  * PA	36  35  34  48  47  46  45  40  41  42  39   38   37
434*4882a593Smuzhiyun  */
cpc925_mc_get_pfn(struct mem_ctl_info * mci,u32 mear,unsigned long * pfn,unsigned long * offset,int * csrow)435*4882a593Smuzhiyun static void cpc925_mc_get_pfn(struct mem_ctl_info *mci, u32 mear,
436*4882a593Smuzhiyun 		unsigned long *pfn, unsigned long *offset, int *csrow)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	u32 bcnt, rank, col, bank, row;
439*4882a593Smuzhiyun 	u32 c;
440*4882a593Smuzhiyun 	unsigned long pa;
441*4882a593Smuzhiyun 	int i;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	bcnt = (mear & MEAR_BCNT_MASK) >> MEAR_BCNT_SHIFT;
444*4882a593Smuzhiyun 	rank = (mear & MEAR_RANK_MASK) >> MEAR_RANK_SHIFT;
445*4882a593Smuzhiyun 	col = (mear & MEAR_COL_MASK) >> MEAR_COL_SHIFT;
446*4882a593Smuzhiyun 	bank = (mear & MEAR_BANK_MASK) >> MEAR_BANK_SHIFT;
447*4882a593Smuzhiyun 	row = mear & MEAR_ROW_MASK;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	*csrow = rank;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun #ifdef CONFIG_EDAC_DEBUG
452*4882a593Smuzhiyun 	if (mci->csrows[rank]->first_page == 0) {
453*4882a593Smuzhiyun 		cpc925_mc_printk(mci, KERN_ERR, "ECC occurs in a "
454*4882a593Smuzhiyun 			"non-populated csrow, broken hardware?\n");
455*4882a593Smuzhiyun 		return;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun #endif
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/* Revert csrow number */
460*4882a593Smuzhiyun 	pa = mci->csrows[rank]->first_page << PAGE_SHIFT;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	/* Revert column address */
463*4882a593Smuzhiyun 	col += bcnt;
464*4882a593Smuzhiyun 	for (i = 0; i < 11; i++) {
465*4882a593Smuzhiyun 		c = col & 0x1;
466*4882a593Smuzhiyun 		col >>= 1;
467*4882a593Smuzhiyun 		pa |= c << (14 - i);
468*4882a593Smuzhiyun 	}
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	/* Revert bank address */
471*4882a593Smuzhiyun 	pa |= bank << 19;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* Revert row address, in 4 steps */
474*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
475*4882a593Smuzhiyun 		c = row & 0x1;
476*4882a593Smuzhiyun 		row >>= 1;
477*4882a593Smuzhiyun 		pa |= c << (26 - i);
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
481*4882a593Smuzhiyun 		c = row & 0x1;
482*4882a593Smuzhiyun 		row >>= 1;
483*4882a593Smuzhiyun 		pa |= c << (21 + i);
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
487*4882a593Smuzhiyun 		c = row & 0x1;
488*4882a593Smuzhiyun 		row >>= 1;
489*4882a593Smuzhiyun 		pa |= c << (18 - i);
490*4882a593Smuzhiyun 	}
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
493*4882a593Smuzhiyun 		c = row & 0x1;
494*4882a593Smuzhiyun 		row >>= 1;
495*4882a593Smuzhiyun 		pa |= c << (29 - i);
496*4882a593Smuzhiyun 	}
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	*offset = pa & (PAGE_SIZE - 1);
499*4882a593Smuzhiyun 	*pfn = pa >> PAGE_SHIFT;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	edac_dbg(0, "ECC physical address 0x%lx\n", pa);
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun 
cpc925_mc_find_channel(struct mem_ctl_info * mci,u16 syndrome)504*4882a593Smuzhiyun static int cpc925_mc_find_channel(struct mem_ctl_info *mci, u16 syndrome)
505*4882a593Smuzhiyun {
506*4882a593Smuzhiyun 	if ((syndrome & MESR_ECC_SYN_H_MASK) == 0)
507*4882a593Smuzhiyun 		return 0;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	if ((syndrome & MESR_ECC_SYN_L_MASK) == 0)
510*4882a593Smuzhiyun 		return 1;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Unexpected syndrome value: 0x%x\n",
513*4882a593Smuzhiyun 			 syndrome);
514*4882a593Smuzhiyun 	return 1;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun /* Check memory controller registers for ECC errors */
cpc925_mc_check(struct mem_ctl_info * mci)518*4882a593Smuzhiyun static void cpc925_mc_check(struct mem_ctl_info *mci)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun 	struct cpc925_mc_pdata *pdata = mci->pvt_info;
521*4882a593Smuzhiyun 	u32 apiexcp;
522*4882a593Smuzhiyun 	u32 mear;
523*4882a593Smuzhiyun 	u32 mesr;
524*4882a593Smuzhiyun 	u16 syndrome;
525*4882a593Smuzhiyun 	unsigned long pfn = 0, offset = 0;
526*4882a593Smuzhiyun 	int csrow = 0, channel = 0;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* APIEXCP is cleared when read */
529*4882a593Smuzhiyun 	apiexcp = __raw_readl(pdata->vbase + REG_APIEXCP_OFFSET);
530*4882a593Smuzhiyun 	if ((apiexcp & ECC_EXCP_DETECTED) == 0)
531*4882a593Smuzhiyun 		return;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	mesr = __raw_readl(pdata->vbase + REG_MESR_OFFSET);
534*4882a593Smuzhiyun 	syndrome = mesr | (MESR_ECC_SYN_H_MASK | MESR_ECC_SYN_L_MASK);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	mear = __raw_readl(pdata->vbase + REG_MEAR_OFFSET);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* Revert column/row addresses into page frame number, etc */
539*4882a593Smuzhiyun 	cpc925_mc_get_pfn(mci, mear, &pfn, &offset, &csrow);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (apiexcp & CECC_EXCP_DETECTED) {
542*4882a593Smuzhiyun 		cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
543*4882a593Smuzhiyun 		channel = cpc925_mc_find_channel(mci, syndrome);
544*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
545*4882a593Smuzhiyun 				     pfn, offset, syndrome,
546*4882a593Smuzhiyun 				     csrow, channel, -1,
547*4882a593Smuzhiyun 				     mci->ctl_name, "");
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (apiexcp & UECC_EXCP_DETECTED) {
551*4882a593Smuzhiyun 		cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
552*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
553*4882a593Smuzhiyun 				     pfn, offset, 0,
554*4882a593Smuzhiyun 				     csrow, -1, -1,
555*4882a593Smuzhiyun 				     mci->ctl_name, "");
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
559*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "APIMASK		0x%08x\n",
560*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_APIMASK_OFFSET));
561*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "APIEXCP		0x%08x\n",
562*4882a593Smuzhiyun 		apiexcp);
563*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Ctrl	0x%08x\n",
564*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_MSCR_OFFSET));
565*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge Start	0x%08x\n",
566*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_MSRSR_OFFSET));
567*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Rge End	0x%08x\n",
568*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_MSRER_OFFSET));
569*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Scrub Pattern	0x%08x\n",
570*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_MSPR_OFFSET));
571*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Ctrl		0x%08x\n",
572*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_MCCR_OFFSET));
573*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Chk Rge End	0x%08x\n",
574*4882a593Smuzhiyun 		__raw_readl(pdata->vbase + REG_MCRER_OFFSET));
575*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Err Address	0x%08x\n",
576*4882a593Smuzhiyun 		mesr);
577*4882a593Smuzhiyun 	cpc925_mc_printk(mci, KERN_INFO, "Mem Err Syndrome	0x%08x\n",
578*4882a593Smuzhiyun 		syndrome);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun /******************** CPU err device********************************/
cpc925_cpu_mask_disabled(void)582*4882a593Smuzhiyun static u32 cpc925_cpu_mask_disabled(void)
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun 	struct device_node *cpunode;
585*4882a593Smuzhiyun 	static u32 mask = 0;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* use cached value if available */
588*4882a593Smuzhiyun 	if (mask != 0)
589*4882a593Smuzhiyun 		return mask;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	mask = APIMASK_ADI0 | APIMASK_ADI1;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	for_each_of_cpu_node(cpunode) {
594*4882a593Smuzhiyun 		const u32 *reg = of_get_property(cpunode, "reg", NULL);
595*4882a593Smuzhiyun 		if (reg == NULL || *reg > 2) {
596*4882a593Smuzhiyun 			cpc925_printk(KERN_ERR, "Bad reg value at %pOF\n", cpunode);
597*4882a593Smuzhiyun 			continue;
598*4882a593Smuzhiyun 		}
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 		mask &= ~APIMASK_ADI(*reg);
601*4882a593Smuzhiyun 	}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	if (mask != (APIMASK_ADI0 | APIMASK_ADI1)) {
604*4882a593Smuzhiyun 		/* We assume that each CPU sits on it's own PI and that
605*4882a593Smuzhiyun 		 * for present CPUs the reg property equals to the PI
606*4882a593Smuzhiyun 		 * interface id */
607*4882a593Smuzhiyun 		cpc925_printk(KERN_WARNING,
608*4882a593Smuzhiyun 				"Assuming PI id is equal to CPU MPIC id!\n");
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	return mask;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun /* Enable CPU Errors detection */
cpc925_cpu_init(struct cpc925_dev_info * dev_info)615*4882a593Smuzhiyun static void cpc925_cpu_init(struct cpc925_dev_info *dev_info)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	u32 apimask;
618*4882a593Smuzhiyun 	u32 cpumask;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	cpumask = cpc925_cpu_mask_disabled();
623*4882a593Smuzhiyun 	if (apimask & cpumask) {
624*4882a593Smuzhiyun 		cpc925_printk(KERN_WARNING, "CPU(s) not present, "
625*4882a593Smuzhiyun 				"but enabled in APIMASK, disabling\n");
626*4882a593Smuzhiyun 		apimask &= ~cpumask;
627*4882a593Smuzhiyun 	}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if ((apimask & CPU_MASK_ENABLE) == 0)
630*4882a593Smuzhiyun 		apimask |= CPU_MASK_ENABLE;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	__raw_writel(apimask, dev_info->vbase + REG_APIMASK_OFFSET);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun /* Disable CPU Errors detection */
cpc925_cpu_exit(struct cpc925_dev_info * dev_info)636*4882a593Smuzhiyun static void cpc925_cpu_exit(struct cpc925_dev_info *dev_info)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	/*
639*4882a593Smuzhiyun 	 * WARNING:
640*4882a593Smuzhiyun 	 * We are supposed to clear the CPU error detection bits,
641*4882a593Smuzhiyun 	 * and it will be no problem to do so. However, once they
642*4882a593Smuzhiyun 	 * are cleared here if we want to re-install CPC925 EDAC
643*4882a593Smuzhiyun 	 * module later, setting them up in cpc925_cpu_init() will
644*4882a593Smuzhiyun 	 * trigger machine check exception.
645*4882a593Smuzhiyun 	 * Also, it's ok to leave CPU error detection bits enabled,
646*4882a593Smuzhiyun 	 * since they are reset to 1 by default.
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	return;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun /* Check for CPU Errors */
cpc925_cpu_check(struct edac_device_ctl_info * edac_dev)653*4882a593Smuzhiyun static void cpc925_cpu_check(struct edac_device_ctl_info *edac_dev)
654*4882a593Smuzhiyun {
655*4882a593Smuzhiyun 	struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
656*4882a593Smuzhiyun 	u32 apiexcp;
657*4882a593Smuzhiyun 	u32 apimask;
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* APIEXCP is cleared when read */
660*4882a593Smuzhiyun 	apiexcp = __raw_readl(dev_info->vbase + REG_APIEXCP_OFFSET);
661*4882a593Smuzhiyun 	if ((apiexcp & CPU_EXCP_DETECTED) == 0)
662*4882a593Smuzhiyun 		return;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	if ((apiexcp & ~cpc925_cpu_mask_disabled()) == 0)
665*4882a593Smuzhiyun 		return;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	apimask = __raw_readl(dev_info->vbase + REG_APIMASK_OFFSET);
668*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "Processor Interface Fault\n"
669*4882a593Smuzhiyun 				 "Processor Interface register dump:\n");
670*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "APIMASK		0x%08x\n", apimask);
671*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "APIEXCP		0x%08x\n", apiexcp);
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	edac_device_handle_ue(edac_dev, 0, 0, edac_dev->ctl_name);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /******************** HT Link err device****************************/
677*4882a593Smuzhiyun /* Enable HyperTransport Link Error detection */
cpc925_htlink_init(struct cpc925_dev_info * dev_info)678*4882a593Smuzhiyun static void cpc925_htlink_init(struct cpc925_dev_info *dev_info)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun 	u32 ht_errctrl;
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
683*4882a593Smuzhiyun 	if ((ht_errctrl & HT_ERRCTRL_ENABLE) == 0) {
684*4882a593Smuzhiyun 		ht_errctrl |= HT_ERRCTRL_ENABLE;
685*4882a593Smuzhiyun 		__raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun /* Disable HyperTransport Link Error detection */
cpc925_htlink_exit(struct cpc925_dev_info * dev_info)690*4882a593Smuzhiyun static void cpc925_htlink_exit(struct cpc925_dev_info *dev_info)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	u32 ht_errctrl;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	ht_errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
695*4882a593Smuzhiyun 	ht_errctrl &= ~HT_ERRCTRL_ENABLE;
696*4882a593Smuzhiyun 	__raw_writel(ht_errctrl, dev_info->vbase + REG_ERRCTRL_OFFSET);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun /* Check for HyperTransport Link errors */
cpc925_htlink_check(struct edac_device_ctl_info * edac_dev)700*4882a593Smuzhiyun static void cpc925_htlink_check(struct edac_device_ctl_info *edac_dev)
701*4882a593Smuzhiyun {
702*4882a593Smuzhiyun 	struct cpc925_dev_info *dev_info = edac_dev->pvt_info;
703*4882a593Smuzhiyun 	u32 brgctrl = __raw_readl(dev_info->vbase + REG_BRGCTRL_OFFSET);
704*4882a593Smuzhiyun 	u32 linkctrl = __raw_readl(dev_info->vbase + REG_LINKCTRL_OFFSET);
705*4882a593Smuzhiyun 	u32 errctrl = __raw_readl(dev_info->vbase + REG_ERRCTRL_OFFSET);
706*4882a593Smuzhiyun 	u32 linkerr = __raw_readl(dev_info->vbase + REG_LINKERR_OFFSET);
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	if (!((brgctrl & BRGCTRL_DETSERR) ||
709*4882a593Smuzhiyun 	      (linkctrl & HT_LINKCTRL_DETECTED) ||
710*4882a593Smuzhiyun 	      (errctrl & HT_ERRCTRL_DETECTED) ||
711*4882a593Smuzhiyun 	      (linkerr & HT_LINKERR_DETECTED)))
712*4882a593Smuzhiyun 		return;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "HT Link Fault\n"
715*4882a593Smuzhiyun 				 "HT register dump:\n");
716*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "Bridge Ctrl			0x%08x\n",
717*4882a593Smuzhiyun 		      brgctrl);
718*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "Link Config Ctrl		0x%08x\n",
719*4882a593Smuzhiyun 		      linkctrl);
720*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "Error Enum and Ctrl		0x%08x\n",
721*4882a593Smuzhiyun 		      errctrl);
722*4882a593Smuzhiyun 	cpc925_printk(KERN_INFO, "Link Error			0x%08x\n",
723*4882a593Smuzhiyun 		      linkerr);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* Clear by write 1 */
726*4882a593Smuzhiyun 	if (brgctrl & BRGCTRL_DETSERR)
727*4882a593Smuzhiyun 		__raw_writel(BRGCTRL_DETSERR,
728*4882a593Smuzhiyun 				dev_info->vbase + REG_BRGCTRL_OFFSET);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (linkctrl & HT_LINKCTRL_DETECTED)
731*4882a593Smuzhiyun 		__raw_writel(HT_LINKCTRL_DETECTED,
732*4882a593Smuzhiyun 				dev_info->vbase + REG_LINKCTRL_OFFSET);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	/* Initiate Secondary Bus Reset to clear the chain failure */
735*4882a593Smuzhiyun 	if (errctrl & ERRCTRL_CHN_FAL)
736*4882a593Smuzhiyun 		__raw_writel(BRGCTRL_SECBUSRESET,
737*4882a593Smuzhiyun 				dev_info->vbase + REG_BRGCTRL_OFFSET);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	if (errctrl & ERRCTRL_RSP_ERR)
740*4882a593Smuzhiyun 		__raw_writel(ERRCTRL_RSP_ERR,
741*4882a593Smuzhiyun 				dev_info->vbase + REG_ERRCTRL_OFFSET);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (linkerr & HT_LINKERR_DETECTED)
744*4882a593Smuzhiyun 		__raw_writel(HT_LINKERR_DETECTED,
745*4882a593Smuzhiyun 				dev_info->vbase + REG_LINKERR_OFFSET);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	edac_device_handle_ce(edac_dev, 0, 0, edac_dev->ctl_name);
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun static struct cpc925_dev_info cpc925_devs[] = {
751*4882a593Smuzhiyun 	{
752*4882a593Smuzhiyun 	.ctl_name = CPC925_CPU_ERR_DEV,
753*4882a593Smuzhiyun 	.init = cpc925_cpu_init,
754*4882a593Smuzhiyun 	.exit = cpc925_cpu_exit,
755*4882a593Smuzhiyun 	.check = cpc925_cpu_check,
756*4882a593Smuzhiyun 	},
757*4882a593Smuzhiyun 	{
758*4882a593Smuzhiyun 	.ctl_name = CPC925_HT_LINK_DEV,
759*4882a593Smuzhiyun 	.init = cpc925_htlink_init,
760*4882a593Smuzhiyun 	.exit = cpc925_htlink_exit,
761*4882a593Smuzhiyun 	.check = cpc925_htlink_check,
762*4882a593Smuzhiyun 	},
763*4882a593Smuzhiyun 	{ }
764*4882a593Smuzhiyun };
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun /*
767*4882a593Smuzhiyun  * Add CPU Err detection and HyperTransport Link Err detection
768*4882a593Smuzhiyun  * as common "edac_device", they have no corresponding device
769*4882a593Smuzhiyun  * nodes in the Open Firmware DTB and we have to add platform
770*4882a593Smuzhiyun  * devices for them. Also, they will share the MMIO with that
771*4882a593Smuzhiyun  * of memory controller.
772*4882a593Smuzhiyun  */
cpc925_add_edac_devices(void __iomem * vbase)773*4882a593Smuzhiyun static void cpc925_add_edac_devices(void __iomem *vbase)
774*4882a593Smuzhiyun {
775*4882a593Smuzhiyun 	struct cpc925_dev_info *dev_info;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (!vbase) {
778*4882a593Smuzhiyun 		cpc925_printk(KERN_ERR, "MMIO not established yet\n");
779*4882a593Smuzhiyun 		return;
780*4882a593Smuzhiyun 	}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
783*4882a593Smuzhiyun 		dev_info->vbase = vbase;
784*4882a593Smuzhiyun 		dev_info->pdev = platform_device_register_simple(
785*4882a593Smuzhiyun 					dev_info->ctl_name, 0, NULL, 0);
786*4882a593Smuzhiyun 		if (IS_ERR(dev_info->pdev)) {
787*4882a593Smuzhiyun 			cpc925_printk(KERN_ERR,
788*4882a593Smuzhiyun 				"Can't register platform device for %s\n",
789*4882a593Smuzhiyun 				dev_info->ctl_name);
790*4882a593Smuzhiyun 			continue;
791*4882a593Smuzhiyun 		}
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 		/*
794*4882a593Smuzhiyun 		 * Don't have to allocate private structure but
795*4882a593Smuzhiyun 		 * make use of cpc925_devs[] instead.
796*4882a593Smuzhiyun 		 */
797*4882a593Smuzhiyun 		dev_info->edac_idx = edac_device_alloc_index();
798*4882a593Smuzhiyun 		dev_info->edac_dev =
799*4882a593Smuzhiyun 			edac_device_alloc_ctl_info(0, dev_info->ctl_name,
800*4882a593Smuzhiyun 				1, NULL, 0, 0, NULL, 0, dev_info->edac_idx);
801*4882a593Smuzhiyun 		if (!dev_info->edac_dev) {
802*4882a593Smuzhiyun 			cpc925_printk(KERN_ERR, "No memory for edac device\n");
803*4882a593Smuzhiyun 			goto err1;
804*4882a593Smuzhiyun 		}
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 		dev_info->edac_dev->pvt_info = dev_info;
807*4882a593Smuzhiyun 		dev_info->edac_dev->dev = &dev_info->pdev->dev;
808*4882a593Smuzhiyun 		dev_info->edac_dev->ctl_name = dev_info->ctl_name;
809*4882a593Smuzhiyun 		dev_info->edac_dev->mod_name = CPC925_EDAC_MOD_STR;
810*4882a593Smuzhiyun 		dev_info->edac_dev->dev_name = dev_name(&dev_info->pdev->dev);
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 		if (edac_op_state == EDAC_OPSTATE_POLL)
813*4882a593Smuzhiyun 			dev_info->edac_dev->edac_check = dev_info->check;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 		if (dev_info->init)
816*4882a593Smuzhiyun 			dev_info->init(dev_info);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 		if (edac_device_add_device(dev_info->edac_dev) > 0) {
819*4882a593Smuzhiyun 			cpc925_printk(KERN_ERR,
820*4882a593Smuzhiyun 				"Unable to add edac device for %s\n",
821*4882a593Smuzhiyun 				dev_info->ctl_name);
822*4882a593Smuzhiyun 			goto err2;
823*4882a593Smuzhiyun 		}
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 		edac_dbg(0, "Successfully added edac device for %s\n",
826*4882a593Smuzhiyun 			 dev_info->ctl_name);
827*4882a593Smuzhiyun 
828*4882a593Smuzhiyun 		continue;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun err2:
831*4882a593Smuzhiyun 		if (dev_info->exit)
832*4882a593Smuzhiyun 			dev_info->exit(dev_info);
833*4882a593Smuzhiyun 		edac_device_free_ctl_info(dev_info->edac_dev);
834*4882a593Smuzhiyun err1:
835*4882a593Smuzhiyun 		platform_device_unregister(dev_info->pdev);
836*4882a593Smuzhiyun 	}
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun /*
840*4882a593Smuzhiyun  * Delete the common "edac_device" for CPU Err Detection
841*4882a593Smuzhiyun  * and HyperTransport Link Err Detection
842*4882a593Smuzhiyun  */
cpc925_del_edac_devices(void)843*4882a593Smuzhiyun static void cpc925_del_edac_devices(void)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun 	struct cpc925_dev_info *dev_info;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	for (dev_info = &cpc925_devs[0]; dev_info->init; dev_info++) {
848*4882a593Smuzhiyun 		if (dev_info->edac_dev) {
849*4882a593Smuzhiyun 			edac_device_del_device(dev_info->edac_dev->dev);
850*4882a593Smuzhiyun 			edac_device_free_ctl_info(dev_info->edac_dev);
851*4882a593Smuzhiyun 			platform_device_unregister(dev_info->pdev);
852*4882a593Smuzhiyun 		}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 		if (dev_info->exit)
855*4882a593Smuzhiyun 			dev_info->exit(dev_info);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 		edac_dbg(0, "Successfully deleted edac device for %s\n",
858*4882a593Smuzhiyun 			 dev_info->ctl_name);
859*4882a593Smuzhiyun 	}
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun /* Convert current back-ground scrub rate into byte/sec bandwidth */
cpc925_get_sdram_scrub_rate(struct mem_ctl_info * mci)863*4882a593Smuzhiyun static int cpc925_get_sdram_scrub_rate(struct mem_ctl_info *mci)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	struct cpc925_mc_pdata *pdata = mci->pvt_info;
866*4882a593Smuzhiyun 	int bw;
867*4882a593Smuzhiyun 	u32 mscr;
868*4882a593Smuzhiyun 	u8 si;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	mscr = __raw_readl(pdata->vbase + REG_MSCR_OFFSET);
871*4882a593Smuzhiyun 	si = (mscr & MSCR_SI_MASK) >> MSCR_SI_SHIFT;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	edac_dbg(0, "Mem Scrub Ctrl Register 0x%x\n", mscr);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	if (((mscr & MSCR_SCRUB_MOD_MASK) != MSCR_BACKGR_SCRUB) ||
876*4882a593Smuzhiyun 	    (si == 0)) {
877*4882a593Smuzhiyun 		cpc925_mc_printk(mci, KERN_INFO, "Scrub mode not enabled\n");
878*4882a593Smuzhiyun 		bw = 0;
879*4882a593Smuzhiyun 	} else
880*4882a593Smuzhiyun 		bw = CPC925_SCRUB_BLOCK_SIZE * 0xFA67 / si;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	return bw;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun /* Return 0 for single channel; 1 for dual channel */
cpc925_mc_get_channels(void __iomem * vbase)886*4882a593Smuzhiyun static int cpc925_mc_get_channels(void __iomem *vbase)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	int dual = 0;
889*4882a593Smuzhiyun 	u32 mbcr;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	mbcr = __raw_readl(vbase + REG_MBCR_OFFSET);
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	/*
894*4882a593Smuzhiyun 	 * Dual channel only when 128-bit wide physical bus
895*4882a593Smuzhiyun 	 * and 128-bit configuration.
896*4882a593Smuzhiyun 	 */
897*4882a593Smuzhiyun 	if (((mbcr & MBCR_64BITCFG_MASK) == 0) &&
898*4882a593Smuzhiyun 	    ((mbcr & MBCR_64BITBUS_MASK) == 0))
899*4882a593Smuzhiyun 		dual = 1;
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	edac_dbg(0, "%s channel\n", (dual > 0) ? "Dual" : "Single");
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	return dual;
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun 
cpc925_probe(struct platform_device * pdev)906*4882a593Smuzhiyun static int cpc925_probe(struct platform_device *pdev)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun 	static int edac_mc_idx;
909*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
910*4882a593Smuzhiyun 	struct edac_mc_layer layers[2];
911*4882a593Smuzhiyun 	void __iomem *vbase;
912*4882a593Smuzhiyun 	struct cpc925_mc_pdata *pdata;
913*4882a593Smuzhiyun 	struct resource *r;
914*4882a593Smuzhiyun 	int res = 0, nr_channels;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	edac_dbg(0, "%s platform device found!\n", pdev->name);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	if (!devres_open_group(&pdev->dev, cpc925_probe, GFP_KERNEL)) {
919*4882a593Smuzhiyun 		res = -ENOMEM;
920*4882a593Smuzhiyun 		goto out;
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
924*4882a593Smuzhiyun 	if (!r) {
925*4882a593Smuzhiyun 		cpc925_printk(KERN_ERR, "Unable to get resource\n");
926*4882a593Smuzhiyun 		res = -ENOENT;
927*4882a593Smuzhiyun 		goto err1;
928*4882a593Smuzhiyun 	}
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	if (!devm_request_mem_region(&pdev->dev,
931*4882a593Smuzhiyun 				     r->start,
932*4882a593Smuzhiyun 				     resource_size(r),
933*4882a593Smuzhiyun 				     pdev->name)) {
934*4882a593Smuzhiyun 		cpc925_printk(KERN_ERR, "Unable to request mem region\n");
935*4882a593Smuzhiyun 		res = -EBUSY;
936*4882a593Smuzhiyun 		goto err1;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	vbase = devm_ioremap(&pdev->dev, r->start, resource_size(r));
940*4882a593Smuzhiyun 	if (!vbase) {
941*4882a593Smuzhiyun 		cpc925_printk(KERN_ERR, "Unable to ioremap device\n");
942*4882a593Smuzhiyun 		res = -ENOMEM;
943*4882a593Smuzhiyun 		goto err2;
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	nr_channels = cpc925_mc_get_channels(vbase) + 1;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
949*4882a593Smuzhiyun 	layers[0].size = CPC925_NR_CSROWS;
950*4882a593Smuzhiyun 	layers[0].is_virt_csrow = true;
951*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
952*4882a593Smuzhiyun 	layers[1].size = nr_channels;
953*4882a593Smuzhiyun 	layers[1].is_virt_csrow = false;
954*4882a593Smuzhiyun 	mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
955*4882a593Smuzhiyun 			    sizeof(struct cpc925_mc_pdata));
956*4882a593Smuzhiyun 	if (!mci) {
957*4882a593Smuzhiyun 		cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
958*4882a593Smuzhiyun 		res = -ENOMEM;
959*4882a593Smuzhiyun 		goto err2;
960*4882a593Smuzhiyun 	}
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	pdata = mci->pvt_info;
963*4882a593Smuzhiyun 	pdata->vbase = vbase;
964*4882a593Smuzhiyun 	pdata->edac_idx = edac_mc_idx++;
965*4882a593Smuzhiyun 	pdata->name = pdev->name;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	mci->pdev = &pdev->dev;
968*4882a593Smuzhiyun 	platform_set_drvdata(pdev, mci);
969*4882a593Smuzhiyun 	mci->dev_name = dev_name(&pdev->dev);
970*4882a593Smuzhiyun 	mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR;
971*4882a593Smuzhiyun 	mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
972*4882a593Smuzhiyun 	mci->edac_cap = EDAC_FLAG_SECDED;
973*4882a593Smuzhiyun 	mci->mod_name = CPC925_EDAC_MOD_STR;
974*4882a593Smuzhiyun 	mci->ctl_name = pdev->name;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (edac_op_state == EDAC_OPSTATE_POLL)
977*4882a593Smuzhiyun 		mci->edac_check = cpc925_mc_check;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	mci->ctl_page_to_phys = NULL;
980*4882a593Smuzhiyun 	mci->scrub_mode = SCRUB_SW_SRC;
981*4882a593Smuzhiyun 	mci->set_sdram_scrub_rate = NULL;
982*4882a593Smuzhiyun 	mci->get_sdram_scrub_rate = cpc925_get_sdram_scrub_rate;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	cpc925_init_csrows(mci);
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	/* Setup memory controller registers */
987*4882a593Smuzhiyun 	cpc925_mc_init(mci);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	if (edac_mc_add_mc(mci) > 0) {
990*4882a593Smuzhiyun 		cpc925_mc_printk(mci, KERN_ERR, "Failed edac_mc_add_mc()\n");
991*4882a593Smuzhiyun 		goto err3;
992*4882a593Smuzhiyun 	}
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	cpc925_add_edac_devices(vbase);
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* get this far and it's successful */
997*4882a593Smuzhiyun 	edac_dbg(0, "success\n");
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	res = 0;
1000*4882a593Smuzhiyun 	goto out;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun err3:
1003*4882a593Smuzhiyun 	cpc925_mc_exit(mci);
1004*4882a593Smuzhiyun 	edac_mc_free(mci);
1005*4882a593Smuzhiyun err2:
1006*4882a593Smuzhiyun 	devm_release_mem_region(&pdev->dev, r->start, resource_size(r));
1007*4882a593Smuzhiyun err1:
1008*4882a593Smuzhiyun 	devres_release_group(&pdev->dev, cpc925_probe);
1009*4882a593Smuzhiyun out:
1010*4882a593Smuzhiyun 	return res;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun 
cpc925_remove(struct platform_device * pdev)1013*4882a593Smuzhiyun static int cpc925_remove(struct platform_device *pdev)
1014*4882a593Smuzhiyun {
1015*4882a593Smuzhiyun 	struct mem_ctl_info *mci = platform_get_drvdata(pdev);
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun 	/*
1018*4882a593Smuzhiyun 	 * Delete common edac devices before edac mc, because
1019*4882a593Smuzhiyun 	 * the former share the MMIO of the latter.
1020*4882a593Smuzhiyun 	 */
1021*4882a593Smuzhiyun 	cpc925_del_edac_devices();
1022*4882a593Smuzhiyun 	cpc925_mc_exit(mci);
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	edac_mc_del_mc(&pdev->dev);
1025*4882a593Smuzhiyun 	edac_mc_free(mci);
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	return 0;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun static struct platform_driver cpc925_edac_driver = {
1031*4882a593Smuzhiyun 	.probe = cpc925_probe,
1032*4882a593Smuzhiyun 	.remove = cpc925_remove,
1033*4882a593Smuzhiyun 	.driver = {
1034*4882a593Smuzhiyun 		   .name = "cpc925_edac",
1035*4882a593Smuzhiyun 	}
1036*4882a593Smuzhiyun };
1037*4882a593Smuzhiyun 
cpc925_edac_init(void)1038*4882a593Smuzhiyun static int __init cpc925_edac_init(void)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun 	int ret = 0;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	printk(KERN_INFO "IBM CPC925 EDAC driver " CPC925_EDAC_REVISION "\n");
1043*4882a593Smuzhiyun 	printk(KERN_INFO "\t(c) 2008 Wind River Systems, Inc\n");
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/* Only support POLL mode so far */
1046*4882a593Smuzhiyun 	edac_op_state = EDAC_OPSTATE_POLL;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	ret = platform_driver_register(&cpc925_edac_driver);
1049*4882a593Smuzhiyun 	if (ret) {
1050*4882a593Smuzhiyun 		printk(KERN_WARNING "Failed to register %s\n",
1051*4882a593Smuzhiyun 			CPC925_EDAC_MOD_STR);
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	return ret;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
cpc925_edac_exit(void)1057*4882a593Smuzhiyun static void __exit cpc925_edac_exit(void)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	platform_driver_unregister(&cpc925_edac_driver);
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun module_init(cpc925_edac_init);
1063*4882a593Smuzhiyun module_exit(cpc925_edac_exit);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1066*4882a593Smuzhiyun MODULE_AUTHOR("Cao Qingtao <qingtao.cao@windriver.com>");
1067*4882a593Smuzhiyun MODULE_DESCRIPTION("IBM CPC925 Bridge and MC EDAC kernel module");
1068