xref: /OK3568_Linux_fs/kernel/drivers/edac/e752x_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Intel e752x Memory Controller kernel module
3*4882a593Smuzhiyun  * (C) 2004 Linux Networx (http://lnxi.com)
4*4882a593Smuzhiyun  * This file may be distributed under the terms of the
5*4882a593Smuzhiyun  * GNU General Public License.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Datasheets:
10*4882a593Smuzhiyun  *	https://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11*4882a593Smuzhiyun  *	ftp://download.intel.com/design/intarch/datashts/31345803.pdf
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Written by Tom Zimmerman
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * Contributors:
16*4882a593Smuzhiyun  * 	Thayne Harbaugh at realmsys.com (?)
17*4882a593Smuzhiyun  * 	Wang Zhenyu at intel.com
18*4882a593Smuzhiyun  * 	Dave Jiang at mvista.com
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/pci.h>
25*4882a593Smuzhiyun #include <linux/pci_ids.h>
26*4882a593Smuzhiyun #include <linux/edac.h>
27*4882a593Smuzhiyun #include "edac_module.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define EDAC_MOD_STR	"e752x_edac"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun static int report_non_memory_errors;
32*4882a593Smuzhiyun static int force_function_unhide;
33*4882a593Smuzhiyun static int sysbus_parity = -1;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static struct edac_pci_ctl_info *e752x_pci;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #define e752x_printk(level, fmt, arg...) \
38*4882a593Smuzhiyun 	edac_printk(level, "e752x", fmt, ##arg)
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define e752x_mc_printk(mci, level, fmt, arg...) \
41*4882a593Smuzhiyun 	edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg)
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_7520_0
44*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_7520_0      0x3590
45*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_7520_0      */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_7520_1_ERR
48*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_7520_1_ERR  0x3591
49*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_7520_1_ERR  */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_7525_0
52*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_7525_0      0x359E
53*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_7525_0      */
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_7525_1_ERR
56*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_7525_1_ERR  0x3593
57*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_7525_1_ERR  */
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_7320_0
60*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_7320_0	0x3592
61*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_7320_0 */
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_7320_1_ERR
64*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_7320_1_ERR	0x3593
65*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_7320_1_ERR */
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_3100_0
68*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_3100_0	0x35B0
69*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_3100_0 */
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR
72*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_3100_1_ERR	0x35B1
73*4882a593Smuzhiyun #endif				/* PCI_DEVICE_ID_INTEL_3100_1_ERR */
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define E752X_NR_CSROWS		8	/* number of csrows */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /* E752X register addresses - device 0 function 0 */
78*4882a593Smuzhiyun #define E752X_MCHSCRB		0x52	/* Memory Scrub register (16b) */
79*4882a593Smuzhiyun 					/*
80*4882a593Smuzhiyun 					 * 6:5     Scrub Completion Count
81*4882a593Smuzhiyun 					 * 3:2     Scrub Rate (i3100 only)
82*4882a593Smuzhiyun 					 *      01=fast 10=normal
83*4882a593Smuzhiyun 					 * 1:0     Scrub Mode enable
84*4882a593Smuzhiyun 					 *      00=off 10=on
85*4882a593Smuzhiyun 					 */
86*4882a593Smuzhiyun #define E752X_DRB		0x60	/* DRAM row boundary register (8b) */
87*4882a593Smuzhiyun #define E752X_DRA		0x70	/* DRAM row attribute register (8b) */
88*4882a593Smuzhiyun 					/*
89*4882a593Smuzhiyun 					 * 31:30   Device width row 7
90*4882a593Smuzhiyun 					 *      01=x8 10=x4 11=x8 DDR2
91*4882a593Smuzhiyun 					 * 27:26   Device width row 6
92*4882a593Smuzhiyun 					 * 23:22   Device width row 5
93*4882a593Smuzhiyun 					 * 19:20   Device width row 4
94*4882a593Smuzhiyun 					 * 15:14   Device width row 3
95*4882a593Smuzhiyun 					 * 11:10   Device width row 2
96*4882a593Smuzhiyun 					 *  7:6    Device width row 1
97*4882a593Smuzhiyun 					 *  3:2    Device width row 0
98*4882a593Smuzhiyun 					 */
99*4882a593Smuzhiyun #define E752X_DRC		0x7C	/* DRAM controller mode reg (32b) */
100*4882a593Smuzhiyun 					/* FIXME:IS THIS RIGHT? */
101*4882a593Smuzhiyun 					/*
102*4882a593Smuzhiyun 					 * 22    Number channels 0=1,1=2
103*4882a593Smuzhiyun 					 * 19:18 DRB Granularity 32/64MB
104*4882a593Smuzhiyun 					 */
105*4882a593Smuzhiyun #define E752X_DRM		0x80	/* Dimm mapping register */
106*4882a593Smuzhiyun #define E752X_DDRCSR		0x9A	/* DDR control and status reg (16b) */
107*4882a593Smuzhiyun 					/*
108*4882a593Smuzhiyun 					 * 14:12 1 single A, 2 single B, 3 dual
109*4882a593Smuzhiyun 					 */
110*4882a593Smuzhiyun #define E752X_TOLM		0xC4	/* DRAM top of low memory reg (16b) */
111*4882a593Smuzhiyun #define E752X_REMAPBASE		0xC6	/* DRAM remap base address reg (16b) */
112*4882a593Smuzhiyun #define E752X_REMAPLIMIT	0xC8	/* DRAM remap limit address reg (16b) */
113*4882a593Smuzhiyun #define E752X_REMAPOFFSET	0xCA	/* DRAM remap limit offset reg (16b) */
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* E752X register addresses - device 0 function 1 */
116*4882a593Smuzhiyun #define E752X_FERR_GLOBAL	0x40	/* Global first error register (32b) */
117*4882a593Smuzhiyun #define E752X_NERR_GLOBAL	0x44	/* Global next error register (32b) */
118*4882a593Smuzhiyun #define E752X_HI_FERR		0x50	/* Hub interface first error reg (8b) */
119*4882a593Smuzhiyun #define E752X_HI_NERR		0x52	/* Hub interface next error reg (8b) */
120*4882a593Smuzhiyun #define E752X_HI_ERRMASK	0x54	/* Hub interface error mask reg (8b) */
121*4882a593Smuzhiyun #define E752X_HI_SMICMD		0x5A	/* Hub interface SMI command reg (8b) */
122*4882a593Smuzhiyun #define E752X_SYSBUS_FERR	0x60	/* System buss first error reg (16b) */
123*4882a593Smuzhiyun #define E752X_SYSBUS_NERR	0x62	/* System buss next error reg (16b) */
124*4882a593Smuzhiyun #define E752X_SYSBUS_ERRMASK	0x64	/* System buss error mask reg (16b) */
125*4882a593Smuzhiyun #define E752X_SYSBUS_SMICMD	0x6A	/* System buss SMI command reg (16b) */
126*4882a593Smuzhiyun #define E752X_BUF_FERR		0x70	/* Memory buffer first error reg (8b) */
127*4882a593Smuzhiyun #define E752X_BUF_NERR		0x72	/* Memory buffer next error reg (8b) */
128*4882a593Smuzhiyun #define E752X_BUF_ERRMASK	0x74	/* Memory buffer error mask reg (8b) */
129*4882a593Smuzhiyun #define E752X_BUF_SMICMD	0x7A	/* Memory buffer SMI cmd reg (8b) */
130*4882a593Smuzhiyun #define E752X_DRAM_FERR		0x80	/* DRAM first error register (16b) */
131*4882a593Smuzhiyun #define E752X_DRAM_NERR		0x82	/* DRAM next error register (16b) */
132*4882a593Smuzhiyun #define E752X_DRAM_ERRMASK	0x84	/* DRAM error mask register (8b) */
133*4882a593Smuzhiyun #define E752X_DRAM_SMICMD	0x8A	/* DRAM SMI command register (8b) */
134*4882a593Smuzhiyun #define E752X_DRAM_RETR_ADD	0xAC	/* DRAM Retry address register (32b) */
135*4882a593Smuzhiyun #define E752X_DRAM_SEC1_ADD	0xA0	/* DRAM first correctable memory */
136*4882a593Smuzhiyun 					/*     error address register (32b) */
137*4882a593Smuzhiyun 					/*
138*4882a593Smuzhiyun 					 * 31    Reserved
139*4882a593Smuzhiyun 					 * 30:2  CE address (64 byte block 34:6
140*4882a593Smuzhiyun 					 * 1     Reserved
141*4882a593Smuzhiyun 					 * 0     HiLoCS
142*4882a593Smuzhiyun 					 */
143*4882a593Smuzhiyun #define E752X_DRAM_SEC2_ADD	0xC8	/* DRAM first correctable memory */
144*4882a593Smuzhiyun 					/*     error address register (32b) */
145*4882a593Smuzhiyun 					/*
146*4882a593Smuzhiyun 					 * 31    Reserved
147*4882a593Smuzhiyun 					 * 30:2  CE address (64 byte block 34:6)
148*4882a593Smuzhiyun 					 * 1     Reserved
149*4882a593Smuzhiyun 					 * 0     HiLoCS
150*4882a593Smuzhiyun 					 */
151*4882a593Smuzhiyun #define E752X_DRAM_DED_ADD	0xA4	/* DRAM first uncorrectable memory */
152*4882a593Smuzhiyun 					/*     error address register (32b) */
153*4882a593Smuzhiyun 					/*
154*4882a593Smuzhiyun 					 * 31    Reserved
155*4882a593Smuzhiyun 					 * 30:2  CE address (64 byte block 34:6)
156*4882a593Smuzhiyun 					 * 1     Reserved
157*4882a593Smuzhiyun 					 * 0     HiLoCS
158*4882a593Smuzhiyun 					 */
159*4882a593Smuzhiyun #define E752X_DRAM_SCRB_ADD	0xA8	/* DRAM 1st uncorrectable scrub mem */
160*4882a593Smuzhiyun 					/*     error address register (32b) */
161*4882a593Smuzhiyun 					/*
162*4882a593Smuzhiyun 					 * 31    Reserved
163*4882a593Smuzhiyun 					 * 30:2  CE address (64 byte block 34:6
164*4882a593Smuzhiyun 					 * 1     Reserved
165*4882a593Smuzhiyun 					 * 0     HiLoCS
166*4882a593Smuzhiyun 					 */
167*4882a593Smuzhiyun #define E752X_DRAM_SEC1_SYNDROME 0xC4	/* DRAM first correctable memory */
168*4882a593Smuzhiyun 					/*     error syndrome register (16b) */
169*4882a593Smuzhiyun #define E752X_DRAM_SEC2_SYNDROME 0xC6	/* DRAM second correctable memory */
170*4882a593Smuzhiyun 					/*     error syndrome register (16b) */
171*4882a593Smuzhiyun #define E752X_DEVPRES1		0xF4	/* Device Present 1 register (8b) */
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /* 3100 IMCH specific register addresses - device 0 function 1 */
174*4882a593Smuzhiyun #define I3100_NSI_FERR		0x48	/* NSI first error reg (32b) */
175*4882a593Smuzhiyun #define I3100_NSI_NERR		0x4C	/* NSI next error reg (32b) */
176*4882a593Smuzhiyun #define I3100_NSI_SMICMD	0x54	/* NSI SMI command register (32b) */
177*4882a593Smuzhiyun #define I3100_NSI_EMASK		0x90	/* NSI error mask register (32b) */
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /* ICH5R register addresses - device 30 function 0 */
180*4882a593Smuzhiyun #define ICH5R_PCI_STAT		0x06	/* PCI status register (16b) */
181*4882a593Smuzhiyun #define ICH5R_PCI_2ND_STAT	0x1E	/* PCI status secondary reg (16b) */
182*4882a593Smuzhiyun #define ICH5R_PCI_BRIDGE_CTL	0x3E	/* PCI bridge control register (16b) */
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun enum e752x_chips {
185*4882a593Smuzhiyun 	E7520 = 0,
186*4882a593Smuzhiyun 	E7525 = 1,
187*4882a593Smuzhiyun 	E7320 = 2,
188*4882a593Smuzhiyun 	I3100 = 3
189*4882a593Smuzhiyun };
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /*
192*4882a593Smuzhiyun  * Those chips Support single-rank and dual-rank memories only.
193*4882a593Smuzhiyun  *
194*4882a593Smuzhiyun  * On e752x chips, the odd rows are present only on dual-rank memories.
195*4882a593Smuzhiyun  * Dividing the rank by two will provide the dimm#
196*4882a593Smuzhiyun  *
197*4882a593Smuzhiyun  * i3100 MC has a different mapping: it supports only 4 ranks.
198*4882a593Smuzhiyun  *
199*4882a593Smuzhiyun  * The mapping is (from 1 to n):
200*4882a593Smuzhiyun  *	slot	   single-ranked	double-ranked
201*4882a593Smuzhiyun  *	dimm #1 -> rank #4		NA
202*4882a593Smuzhiyun  *	dimm #2 -> rank #3		NA
203*4882a593Smuzhiyun  *	dimm #3 -> rank #2		Ranks 2 and 3
204*4882a593Smuzhiyun  *	dimm #4 -> rank $1		Ranks 1 and 4
205*4882a593Smuzhiyun  *
206*4882a593Smuzhiyun  * FIXME: The current mapping for i3100 considers that it supports up to 8
207*4882a593Smuzhiyun  *	  ranks/chanel, but datasheet says that the MC supports only 4 ranks.
208*4882a593Smuzhiyun  */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun struct e752x_pvt {
211*4882a593Smuzhiyun 	struct pci_dev *dev_d0f0;
212*4882a593Smuzhiyun 	struct pci_dev *dev_d0f1;
213*4882a593Smuzhiyun 	u32 tolm;
214*4882a593Smuzhiyun 	u32 remapbase;
215*4882a593Smuzhiyun 	u32 remaplimit;
216*4882a593Smuzhiyun 	int mc_symmetric;
217*4882a593Smuzhiyun 	u8 map[8];
218*4882a593Smuzhiyun 	int map_type;
219*4882a593Smuzhiyun 	const struct e752x_dev_info *dev_info;
220*4882a593Smuzhiyun };
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun struct e752x_dev_info {
223*4882a593Smuzhiyun 	u16 err_dev;
224*4882a593Smuzhiyun 	u16 ctl_dev;
225*4882a593Smuzhiyun 	const char *ctl_name;
226*4882a593Smuzhiyun };
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun struct e752x_error_info {
229*4882a593Smuzhiyun 	u32 ferr_global;
230*4882a593Smuzhiyun 	u32 nerr_global;
231*4882a593Smuzhiyun 	u32 nsi_ferr;	/* 3100 only */
232*4882a593Smuzhiyun 	u32 nsi_nerr;	/* 3100 only */
233*4882a593Smuzhiyun 	u8 hi_ferr;	/* all but 3100 */
234*4882a593Smuzhiyun 	u8 hi_nerr;	/* all but 3100 */
235*4882a593Smuzhiyun 	u16 sysbus_ferr;
236*4882a593Smuzhiyun 	u16 sysbus_nerr;
237*4882a593Smuzhiyun 	u8 buf_ferr;
238*4882a593Smuzhiyun 	u8 buf_nerr;
239*4882a593Smuzhiyun 	u16 dram_ferr;
240*4882a593Smuzhiyun 	u16 dram_nerr;
241*4882a593Smuzhiyun 	u32 dram_sec1_add;
242*4882a593Smuzhiyun 	u32 dram_sec2_add;
243*4882a593Smuzhiyun 	u16 dram_sec1_syndrome;
244*4882a593Smuzhiyun 	u16 dram_sec2_syndrome;
245*4882a593Smuzhiyun 	u32 dram_ded_add;
246*4882a593Smuzhiyun 	u32 dram_scrb_add;
247*4882a593Smuzhiyun 	u32 dram_retr_add;
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun static const struct e752x_dev_info e752x_devs[] = {
251*4882a593Smuzhiyun 	[E7520] = {
252*4882a593Smuzhiyun 		.err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR,
253*4882a593Smuzhiyun 		.ctl_dev = PCI_DEVICE_ID_INTEL_7520_0,
254*4882a593Smuzhiyun 		.ctl_name = "E7520"},
255*4882a593Smuzhiyun 	[E7525] = {
256*4882a593Smuzhiyun 		.err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR,
257*4882a593Smuzhiyun 		.ctl_dev = PCI_DEVICE_ID_INTEL_7525_0,
258*4882a593Smuzhiyun 		.ctl_name = "E7525"},
259*4882a593Smuzhiyun 	[E7320] = {
260*4882a593Smuzhiyun 		.err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR,
261*4882a593Smuzhiyun 		.ctl_dev = PCI_DEVICE_ID_INTEL_7320_0,
262*4882a593Smuzhiyun 		.ctl_name = "E7320"},
263*4882a593Smuzhiyun 	[I3100] = {
264*4882a593Smuzhiyun 		.err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR,
265*4882a593Smuzhiyun 		.ctl_dev = PCI_DEVICE_ID_INTEL_3100_0,
266*4882a593Smuzhiyun 		.ctl_name = "3100"},
267*4882a593Smuzhiyun };
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /* Valid scrub rates for the e752x/3100 hardware memory scrubber. We
270*4882a593Smuzhiyun  * map the scrubbing bandwidth to a hardware register value. The 'set'
271*4882a593Smuzhiyun  * operation finds the 'matching or higher value'.  Note that scrubbing
272*4882a593Smuzhiyun  * on the e752x can only be enabled/disabled.  The 3100 supports
273*4882a593Smuzhiyun  * a normal and fast mode.
274*4882a593Smuzhiyun  */
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun #define SDRATE_EOT 0xFFFFFFFF
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun struct scrubrate {
279*4882a593Smuzhiyun 	u32 bandwidth;	/* bandwidth consumed by scrubbing in bytes/sec */
280*4882a593Smuzhiyun 	u16 scrubval;	/* register value for scrub rate */
281*4882a593Smuzhiyun };
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /* Rate below assumes same performance as i3100 using PC3200 DDR2 in
284*4882a593Smuzhiyun  * normal mode.  e752x bridges don't support choosing normal or fast mode,
285*4882a593Smuzhiyun  * so the scrubbing bandwidth value isn't all that important - scrubbing is
286*4882a593Smuzhiyun  * either on or off.
287*4882a593Smuzhiyun  */
288*4882a593Smuzhiyun static const struct scrubrate scrubrates_e752x[] = {
289*4882a593Smuzhiyun 	{0,		0x00},	/* Scrubbing Off */
290*4882a593Smuzhiyun 	{500000,	0x02},	/* Scrubbing On */
291*4882a593Smuzhiyun 	{SDRATE_EOT,	0x00}	/* End of Table */
292*4882a593Smuzhiyun };
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun /* Fast mode: 2 GByte PC3200 DDR2 scrubbed in 33s = 63161283 bytes/s
295*4882a593Smuzhiyun  * Normal mode: 125 (32000 / 256) times slower than fast mode.
296*4882a593Smuzhiyun  */
297*4882a593Smuzhiyun static const struct scrubrate scrubrates_i3100[] = {
298*4882a593Smuzhiyun 	{0,		0x00},	/* Scrubbing Off */
299*4882a593Smuzhiyun 	{500000,	0x0a},	/* Normal mode - 32k clocks */
300*4882a593Smuzhiyun 	{62500000,	0x06},	/* Fast mode - 256 clocks */
301*4882a593Smuzhiyun 	{SDRATE_EOT,	0x00}	/* End of Table */
302*4882a593Smuzhiyun };
303*4882a593Smuzhiyun 
ctl_page_to_phys(struct mem_ctl_info * mci,unsigned long page)304*4882a593Smuzhiyun static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci,
305*4882a593Smuzhiyun 				unsigned long page)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun 	u32 remap;
308*4882a593Smuzhiyun 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	edac_dbg(3, "\n");
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (page < pvt->tolm)
313*4882a593Smuzhiyun 		return page;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if ((page >= 0x100000) && (page < pvt->remapbase))
316*4882a593Smuzhiyun 		return page;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	remap = (page - pvt->tolm) + pvt->remapbase;
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (remap < pvt->remaplimit)
321*4882a593Smuzhiyun 		return remap;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page);
324*4882a593Smuzhiyun 	return pvt->tolm - 1;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
do_process_ce(struct mem_ctl_info * mci,u16 error_one,u32 sec1_add,u16 sec1_syndrome)327*4882a593Smuzhiyun static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
328*4882a593Smuzhiyun 			u32 sec1_add, u16 sec1_syndrome)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	u32 page;
331*4882a593Smuzhiyun 	int row;
332*4882a593Smuzhiyun 	int channel;
333*4882a593Smuzhiyun 	int i;
334*4882a593Smuzhiyun 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	edac_dbg(3, "\n");
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	/* convert the addr to 4k page */
339*4882a593Smuzhiyun 	page = sec1_add >> (PAGE_SHIFT - 4);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* FIXME - check for -1 */
342*4882a593Smuzhiyun 	if (pvt->mc_symmetric) {
343*4882a593Smuzhiyun 		/* chip select are bits 14 & 13 */
344*4882a593Smuzhiyun 		row = ((page >> 1) & 3);
345*4882a593Smuzhiyun 		e752x_printk(KERN_WARNING,
346*4882a593Smuzhiyun 			"Test row %d Table %d %d %d %d %d %d %d %d\n", row,
347*4882a593Smuzhiyun 			pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3],
348*4882a593Smuzhiyun 			pvt->map[4], pvt->map[5], pvt->map[6],
349*4882a593Smuzhiyun 			pvt->map[7]);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 		/* test for channel remapping */
352*4882a593Smuzhiyun 		for (i = 0; i < 8; i++) {
353*4882a593Smuzhiyun 			if (pvt->map[i] == row)
354*4882a593Smuzhiyun 				break;
355*4882a593Smuzhiyun 		}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 		e752x_printk(KERN_WARNING, "Test computed row %d\n", i);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		if (i < 8)
360*4882a593Smuzhiyun 			row = i;
361*4882a593Smuzhiyun 		else
362*4882a593Smuzhiyun 			e752x_mc_printk(mci, KERN_WARNING,
363*4882a593Smuzhiyun 					"row %d not found in remap table\n",
364*4882a593Smuzhiyun 					row);
365*4882a593Smuzhiyun 	} else
366*4882a593Smuzhiyun 		row = edac_mc_find_csrow_by_page(mci, page);
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* 0 = channel A, 1 = channel B */
369*4882a593Smuzhiyun 	channel = !(error_one & 1);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* e752x mc reads 34:6 of the DRAM linear address */
372*4882a593Smuzhiyun 	edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
373*4882a593Smuzhiyun 			     page, offset_in_page(sec1_add << 4), sec1_syndrome,
374*4882a593Smuzhiyun 			     row, channel, -1,
375*4882a593Smuzhiyun 			     "e752x CE", "");
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
process_ce(struct mem_ctl_info * mci,u16 error_one,u32 sec1_add,u16 sec1_syndrome,int * error_found,int handle_error)378*4882a593Smuzhiyun static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
379*4882a593Smuzhiyun 			u32 sec1_add, u16 sec1_syndrome, int *error_found,
380*4882a593Smuzhiyun 			int handle_error)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	*error_found = 1;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (handle_error)
385*4882a593Smuzhiyun 		do_process_ce(mci, error_one, sec1_add, sec1_syndrome);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
do_process_ue(struct mem_ctl_info * mci,u16 error_one,u32 ded_add,u32 scrb_add)388*4882a593Smuzhiyun static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
389*4882a593Smuzhiyun 			u32 ded_add, u32 scrb_add)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun 	u32 error_2b, block_page;
392*4882a593Smuzhiyun 	int row;
393*4882a593Smuzhiyun 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	edac_dbg(3, "\n");
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (error_one & 0x0202) {
398*4882a593Smuzhiyun 		error_2b = ded_add;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		/* convert to 4k address */
401*4882a593Smuzhiyun 		block_page = error_2b >> (PAGE_SHIFT - 4);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		row = pvt->mc_symmetric ?
404*4882a593Smuzhiyun 		/* chip select are bits 14 & 13 */
405*4882a593Smuzhiyun 			((block_page >> 1) & 3) :
406*4882a593Smuzhiyun 			edac_mc_find_csrow_by_page(mci, block_page);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		/* e752x mc reads 34:6 of the DRAM linear address */
409*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
410*4882a593Smuzhiyun 					block_page,
411*4882a593Smuzhiyun 					offset_in_page(error_2b << 4), 0,
412*4882a593Smuzhiyun 					 row, -1, -1,
413*4882a593Smuzhiyun 					"e752x UE from Read", "");
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	}
416*4882a593Smuzhiyun 	if (error_one & 0x0404) {
417*4882a593Smuzhiyun 		error_2b = scrb_add;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		/* convert to 4k address */
420*4882a593Smuzhiyun 		block_page = error_2b >> (PAGE_SHIFT - 4);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		row = pvt->mc_symmetric ?
423*4882a593Smuzhiyun 		/* chip select are bits 14 & 13 */
424*4882a593Smuzhiyun 			((block_page >> 1) & 3) :
425*4882a593Smuzhiyun 			edac_mc_find_csrow_by_page(mci, block_page);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		/* e752x mc reads 34:6 of the DRAM linear address */
428*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
429*4882a593Smuzhiyun 					block_page,
430*4882a593Smuzhiyun 					offset_in_page(error_2b << 4), 0,
431*4882a593Smuzhiyun 					row, -1, -1,
432*4882a593Smuzhiyun 					"e752x UE from Scruber", "");
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
process_ue(struct mem_ctl_info * mci,u16 error_one,u32 ded_add,u32 scrb_add,int * error_found,int handle_error)436*4882a593Smuzhiyun static inline void process_ue(struct mem_ctl_info *mci, u16 error_one,
437*4882a593Smuzhiyun 			u32 ded_add, u32 scrb_add, int *error_found,
438*4882a593Smuzhiyun 			int handle_error)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	*error_found = 1;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	if (handle_error)
443*4882a593Smuzhiyun 		do_process_ue(mci, error_one, ded_add, scrb_add);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
process_ue_no_info_wr(struct mem_ctl_info * mci,int * error_found,int handle_error)446*4882a593Smuzhiyun static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
447*4882a593Smuzhiyun 					 int *error_found, int handle_error)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	*error_found = 1;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (!handle_error)
452*4882a593Smuzhiyun 		return;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	edac_dbg(3, "\n");
455*4882a593Smuzhiyun 	edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0,
456*4882a593Smuzhiyun 			     -1, -1, -1,
457*4882a593Smuzhiyun 			     "e752x UE log memory write", "");
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
do_process_ded_retry(struct mem_ctl_info * mci,u16 error,u32 retry_add)460*4882a593Smuzhiyun static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
461*4882a593Smuzhiyun 				 u32 retry_add)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	u32 error_1b, page;
464*4882a593Smuzhiyun 	int row;
465*4882a593Smuzhiyun 	struct e752x_pvt *pvt = (struct e752x_pvt *)mci->pvt_info;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	error_1b = retry_add;
468*4882a593Smuzhiyun 	page = error_1b >> (PAGE_SHIFT - 4);  /* convert the addr to 4k page */
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	/* chip select are bits 14 & 13 */
471*4882a593Smuzhiyun 	row = pvt->mc_symmetric ? ((page >> 1) & 3) :
472*4882a593Smuzhiyun 		edac_mc_find_csrow_by_page(mci, page);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	e752x_mc_printk(mci, KERN_WARNING,
475*4882a593Smuzhiyun 			"CE page 0x%lx, row %d : Memory read retry\n",
476*4882a593Smuzhiyun 			(long unsigned int)page, row);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
process_ded_retry(struct mem_ctl_info * mci,u16 error,u32 retry_add,int * error_found,int handle_error)479*4882a593Smuzhiyun static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error,
480*4882a593Smuzhiyun 				u32 retry_add, int *error_found,
481*4882a593Smuzhiyun 				int handle_error)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	*error_found = 1;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (handle_error)
486*4882a593Smuzhiyun 		do_process_ded_retry(mci, error, retry_add);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
process_threshold_ce(struct mem_ctl_info * mci,u16 error,int * error_found,int handle_error)489*4882a593Smuzhiyun static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error,
490*4882a593Smuzhiyun 					int *error_found, int handle_error)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	*error_found = 1;
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 	if (handle_error)
495*4882a593Smuzhiyun 		e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n");
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun static char *global_message[11] = {
499*4882a593Smuzhiyun 	"PCI Express C1",
500*4882a593Smuzhiyun 	"PCI Express C",
501*4882a593Smuzhiyun 	"PCI Express B1",
502*4882a593Smuzhiyun 	"PCI Express B",
503*4882a593Smuzhiyun 	"PCI Express A1",
504*4882a593Smuzhiyun 	"PCI Express A",
505*4882a593Smuzhiyun 	"DMA Controller",
506*4882a593Smuzhiyun 	"HUB or NS Interface",
507*4882a593Smuzhiyun 	"System Bus",
508*4882a593Smuzhiyun 	"DRAM Controller",  /* 9th entry */
509*4882a593Smuzhiyun 	"Internal Buffer"
510*4882a593Smuzhiyun };
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun #define DRAM_ENTRY	9
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun static char *fatal_message[2] = { "Non-Fatal ", "Fatal " };
515*4882a593Smuzhiyun 
do_global_error(int fatal,u32 errors)516*4882a593Smuzhiyun static void do_global_error(int fatal, u32 errors)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	int i;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	for (i = 0; i < 11; i++) {
521*4882a593Smuzhiyun 		if (errors & (1 << i)) {
522*4882a593Smuzhiyun 			/* If the error is from DRAM Controller OR
523*4882a593Smuzhiyun 			 * we are to report ALL errors, then
524*4882a593Smuzhiyun 			 * report the error
525*4882a593Smuzhiyun 			 */
526*4882a593Smuzhiyun 			if ((i == DRAM_ENTRY) || report_non_memory_errors)
527*4882a593Smuzhiyun 				e752x_printk(KERN_WARNING, "%sError %s\n",
528*4882a593Smuzhiyun 					fatal_message[fatal],
529*4882a593Smuzhiyun 					global_message[i]);
530*4882a593Smuzhiyun 		}
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
global_error(int fatal,u32 errors,int * error_found,int handle_error)534*4882a593Smuzhiyun static inline void global_error(int fatal, u32 errors, int *error_found,
535*4882a593Smuzhiyun 				int handle_error)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun 	*error_found = 1;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (handle_error)
540*4882a593Smuzhiyun 		do_global_error(fatal, errors);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun static char *hub_message[7] = {
544*4882a593Smuzhiyun 	"HI Address or Command Parity", "HI Illegal Access",
545*4882a593Smuzhiyun 	"HI Internal Parity", "Out of Range Access",
546*4882a593Smuzhiyun 	"HI Data Parity", "Enhanced Config Access",
547*4882a593Smuzhiyun 	"Hub Interface Target Abort"
548*4882a593Smuzhiyun };
549*4882a593Smuzhiyun 
do_hub_error(int fatal,u8 errors)550*4882a593Smuzhiyun static void do_hub_error(int fatal, u8 errors)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun 	int i;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	for (i = 0; i < 7; i++) {
555*4882a593Smuzhiyun 		if (errors & (1 << i))
556*4882a593Smuzhiyun 			e752x_printk(KERN_WARNING, "%sError %s\n",
557*4882a593Smuzhiyun 				fatal_message[fatal], hub_message[i]);
558*4882a593Smuzhiyun 	}
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
hub_error(int fatal,u8 errors,int * error_found,int handle_error)561*4882a593Smuzhiyun static inline void hub_error(int fatal, u8 errors, int *error_found,
562*4882a593Smuzhiyun 			int handle_error)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	*error_found = 1;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	if (handle_error)
567*4882a593Smuzhiyun 		do_hub_error(fatal, errors);
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun #define NSI_FATAL_MASK		0x0c080081
571*4882a593Smuzhiyun #define NSI_NON_FATAL_MASK	0x23a0ba64
572*4882a593Smuzhiyun #define NSI_ERR_MASK		(NSI_FATAL_MASK | NSI_NON_FATAL_MASK)
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun static char *nsi_message[30] = {
575*4882a593Smuzhiyun 	"NSI Link Down",	/* NSI_FERR/NSI_NERR bit 0, fatal error */
576*4882a593Smuzhiyun 	"",						/* reserved */
577*4882a593Smuzhiyun 	"NSI Parity Error",				/* bit 2, non-fatal */
578*4882a593Smuzhiyun 	"",						/* reserved */
579*4882a593Smuzhiyun 	"",						/* reserved */
580*4882a593Smuzhiyun 	"Correctable Error Message",			/* bit 5, non-fatal */
581*4882a593Smuzhiyun 	"Non-Fatal Error Message",			/* bit 6, non-fatal */
582*4882a593Smuzhiyun 	"Fatal Error Message",				/* bit 7, fatal */
583*4882a593Smuzhiyun 	"",						/* reserved */
584*4882a593Smuzhiyun 	"Receiver Error",				/* bit 9, non-fatal */
585*4882a593Smuzhiyun 	"",						/* reserved */
586*4882a593Smuzhiyun 	"Bad TLP",					/* bit 11, non-fatal */
587*4882a593Smuzhiyun 	"Bad DLLP",					/* bit 12, non-fatal */
588*4882a593Smuzhiyun 	"REPLAY_NUM Rollover",				/* bit 13, non-fatal */
589*4882a593Smuzhiyun 	"",						/* reserved */
590*4882a593Smuzhiyun 	"Replay Timer Timeout",				/* bit 15, non-fatal */
591*4882a593Smuzhiyun 	"",						/* reserved */
592*4882a593Smuzhiyun 	"",						/* reserved */
593*4882a593Smuzhiyun 	"",						/* reserved */
594*4882a593Smuzhiyun 	"Data Link Protocol Error",			/* bit 19, fatal */
595*4882a593Smuzhiyun 	"",						/* reserved */
596*4882a593Smuzhiyun 	"Poisoned TLP",					/* bit 21, non-fatal */
597*4882a593Smuzhiyun 	"",						/* reserved */
598*4882a593Smuzhiyun 	"Completion Timeout",				/* bit 23, non-fatal */
599*4882a593Smuzhiyun 	"Completer Abort",				/* bit 24, non-fatal */
600*4882a593Smuzhiyun 	"Unexpected Completion",			/* bit 25, non-fatal */
601*4882a593Smuzhiyun 	"Receiver Overflow",				/* bit 26, fatal */
602*4882a593Smuzhiyun 	"Malformed TLP",				/* bit 27, fatal */
603*4882a593Smuzhiyun 	"",						/* reserved */
604*4882a593Smuzhiyun 	"Unsupported Request"				/* bit 29, non-fatal */
605*4882a593Smuzhiyun };
606*4882a593Smuzhiyun 
do_nsi_error(int fatal,u32 errors)607*4882a593Smuzhiyun static void do_nsi_error(int fatal, u32 errors)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	int i;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	for (i = 0; i < 30; i++) {
612*4882a593Smuzhiyun 		if (errors & (1 << i))
613*4882a593Smuzhiyun 			printk(KERN_WARNING "%sError %s\n",
614*4882a593Smuzhiyun 			       fatal_message[fatal], nsi_message[i]);
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun 
nsi_error(int fatal,u32 errors,int * error_found,int handle_error)618*4882a593Smuzhiyun static inline void nsi_error(int fatal, u32 errors, int *error_found,
619*4882a593Smuzhiyun 		int handle_error)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	*error_found = 1;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	if (handle_error)
624*4882a593Smuzhiyun 		do_nsi_error(fatal, errors);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun static char *membuf_message[4] = {
628*4882a593Smuzhiyun 	"Internal PMWB to DRAM parity",
629*4882a593Smuzhiyun 	"Internal PMWB to System Bus Parity",
630*4882a593Smuzhiyun 	"Internal System Bus or IO to PMWB Parity",
631*4882a593Smuzhiyun 	"Internal DRAM to PMWB Parity"
632*4882a593Smuzhiyun };
633*4882a593Smuzhiyun 
do_membuf_error(u8 errors)634*4882a593Smuzhiyun static void do_membuf_error(u8 errors)
635*4882a593Smuzhiyun {
636*4882a593Smuzhiyun 	int i;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	for (i = 0; i < 4; i++) {
639*4882a593Smuzhiyun 		if (errors & (1 << i))
640*4882a593Smuzhiyun 			e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n",
641*4882a593Smuzhiyun 				membuf_message[i]);
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
membuf_error(u8 errors,int * error_found,int handle_error)645*4882a593Smuzhiyun static inline void membuf_error(u8 errors, int *error_found, int handle_error)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun 	*error_found = 1;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	if (handle_error)
650*4882a593Smuzhiyun 		do_membuf_error(errors);
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun static char *sysbus_message[10] = {
654*4882a593Smuzhiyun 	"Addr or Request Parity",
655*4882a593Smuzhiyun 	"Data Strobe Glitch",
656*4882a593Smuzhiyun 	"Addr Strobe Glitch",
657*4882a593Smuzhiyun 	"Data Parity",
658*4882a593Smuzhiyun 	"Addr Above TOM",
659*4882a593Smuzhiyun 	"Non DRAM Lock Error",
660*4882a593Smuzhiyun 	"MCERR", "BINIT",
661*4882a593Smuzhiyun 	"Memory Parity",
662*4882a593Smuzhiyun 	"IO Subsystem Parity"
663*4882a593Smuzhiyun };
664*4882a593Smuzhiyun 
do_sysbus_error(int fatal,u32 errors)665*4882a593Smuzhiyun static void do_sysbus_error(int fatal, u32 errors)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	int i;
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	for (i = 0; i < 10; i++) {
670*4882a593Smuzhiyun 		if (errors & (1 << i))
671*4882a593Smuzhiyun 			e752x_printk(KERN_WARNING, "%sError System Bus %s\n",
672*4882a593Smuzhiyun 				fatal_message[fatal], sysbus_message[i]);
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
sysbus_error(int fatal,u32 errors,int * error_found,int handle_error)676*4882a593Smuzhiyun static inline void sysbus_error(int fatal, u32 errors, int *error_found,
677*4882a593Smuzhiyun 				int handle_error)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun 	*error_found = 1;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (handle_error)
682*4882a593Smuzhiyun 		do_sysbus_error(fatal, errors);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
e752x_check_hub_interface(struct e752x_error_info * info,int * error_found,int handle_error)685*4882a593Smuzhiyun static void e752x_check_hub_interface(struct e752x_error_info *info,
686*4882a593Smuzhiyun 				int *error_found, int handle_error)
687*4882a593Smuzhiyun {
688*4882a593Smuzhiyun 	u8 stat8;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	//pci_read_config_byte(dev,E752X_HI_FERR,&stat8);
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	stat8 = info->hi_ferr;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	if (stat8 & 0x7f) {	/* Error, so process */
695*4882a593Smuzhiyun 		stat8 &= 0x7f;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 		if (stat8 & 0x2b)
698*4882a593Smuzhiyun 			hub_error(1, stat8 & 0x2b, error_found, handle_error);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 		if (stat8 & 0x54)
701*4882a593Smuzhiyun 			hub_error(0, stat8 & 0x54, error_found, handle_error);
702*4882a593Smuzhiyun 	}
703*4882a593Smuzhiyun 	//pci_read_config_byte(dev,E752X_HI_NERR,&stat8);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	stat8 = info->hi_nerr;
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	if (stat8 & 0x7f) {	/* Error, so process */
708*4882a593Smuzhiyun 		stat8 &= 0x7f;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		if (stat8 & 0x2b)
711*4882a593Smuzhiyun 			hub_error(1, stat8 & 0x2b, error_found, handle_error);
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		if (stat8 & 0x54)
714*4882a593Smuzhiyun 			hub_error(0, stat8 & 0x54, error_found, handle_error);
715*4882a593Smuzhiyun 	}
716*4882a593Smuzhiyun }
717*4882a593Smuzhiyun 
e752x_check_ns_interface(struct e752x_error_info * info,int * error_found,int handle_error)718*4882a593Smuzhiyun static void e752x_check_ns_interface(struct e752x_error_info *info,
719*4882a593Smuzhiyun 				int *error_found, int handle_error)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun 	u32 stat32;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	stat32 = info->nsi_ferr;
724*4882a593Smuzhiyun 	if (stat32 & NSI_ERR_MASK) { /* Error, so process */
725*4882a593Smuzhiyun 		if (stat32 & NSI_FATAL_MASK)	/* check for fatal errors */
726*4882a593Smuzhiyun 			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
727*4882a593Smuzhiyun 				  handle_error);
728*4882a593Smuzhiyun 		if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */
729*4882a593Smuzhiyun 			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
730*4882a593Smuzhiyun 				  handle_error);
731*4882a593Smuzhiyun 	}
732*4882a593Smuzhiyun 	stat32 = info->nsi_nerr;
733*4882a593Smuzhiyun 	if (stat32 & NSI_ERR_MASK) {
734*4882a593Smuzhiyun 		if (stat32 & NSI_FATAL_MASK)
735*4882a593Smuzhiyun 			nsi_error(1, stat32 & NSI_FATAL_MASK, error_found,
736*4882a593Smuzhiyun 				  handle_error);
737*4882a593Smuzhiyun 		if (stat32 & NSI_NON_FATAL_MASK)
738*4882a593Smuzhiyun 			nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found,
739*4882a593Smuzhiyun 				  handle_error);
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun 
e752x_check_sysbus(struct e752x_error_info * info,int * error_found,int handle_error)743*4882a593Smuzhiyun static void e752x_check_sysbus(struct e752x_error_info *info,
744*4882a593Smuzhiyun 			int *error_found, int handle_error)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	u32 stat32, error32;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	//pci_read_config_dword(dev,E752X_SYSBUS_FERR,&stat32);
749*4882a593Smuzhiyun 	stat32 = info->sysbus_ferr + (info->sysbus_nerr << 16);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	if (stat32 == 0)
752*4882a593Smuzhiyun 		return;		/* no errors */
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	error32 = (stat32 >> 16) & 0x3ff;
755*4882a593Smuzhiyun 	stat32 = stat32 & 0x3ff;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (stat32 & 0x087)
758*4882a593Smuzhiyun 		sysbus_error(1, stat32 & 0x087, error_found, handle_error);
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (stat32 & 0x378)
761*4882a593Smuzhiyun 		sysbus_error(0, stat32 & 0x378, error_found, handle_error);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	if (error32 & 0x087)
764*4882a593Smuzhiyun 		sysbus_error(1, error32 & 0x087, error_found, handle_error);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	if (error32 & 0x378)
767*4882a593Smuzhiyun 		sysbus_error(0, error32 & 0x378, error_found, handle_error);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
e752x_check_membuf(struct e752x_error_info * info,int * error_found,int handle_error)770*4882a593Smuzhiyun static void e752x_check_membuf(struct e752x_error_info *info,
771*4882a593Smuzhiyun 			int *error_found, int handle_error)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	u8 stat8;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	stat8 = info->buf_ferr;
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (stat8 & 0x0f) {	/* Error, so process */
778*4882a593Smuzhiyun 		stat8 &= 0x0f;
779*4882a593Smuzhiyun 		membuf_error(stat8, error_found, handle_error);
780*4882a593Smuzhiyun 	}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	stat8 = info->buf_nerr;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (stat8 & 0x0f) {	/* Error, so process */
785*4882a593Smuzhiyun 		stat8 &= 0x0f;
786*4882a593Smuzhiyun 		membuf_error(stat8, error_found, handle_error);
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun 
e752x_check_dram(struct mem_ctl_info * mci,struct e752x_error_info * info,int * error_found,int handle_error)790*4882a593Smuzhiyun static void e752x_check_dram(struct mem_ctl_info *mci,
791*4882a593Smuzhiyun 			struct e752x_error_info *info, int *error_found,
792*4882a593Smuzhiyun 			int handle_error)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	u16 error_one, error_next;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	error_one = info->dram_ferr;
797*4882a593Smuzhiyun 	error_next = info->dram_nerr;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	/* decode and report errors */
800*4882a593Smuzhiyun 	if (error_one & 0x0101)	/* check first error correctable */
801*4882a593Smuzhiyun 		process_ce(mci, error_one, info->dram_sec1_add,
802*4882a593Smuzhiyun 			info->dram_sec1_syndrome, error_found, handle_error);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 	if (error_next & 0x0101)	/* check next error correctable */
805*4882a593Smuzhiyun 		process_ce(mci, error_next, info->dram_sec2_add,
806*4882a593Smuzhiyun 			info->dram_sec2_syndrome, error_found, handle_error);
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	if (error_one & 0x4040)
809*4882a593Smuzhiyun 		process_ue_no_info_wr(mci, error_found, handle_error);
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	if (error_next & 0x4040)
812*4882a593Smuzhiyun 		process_ue_no_info_wr(mci, error_found, handle_error);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	if (error_one & 0x2020)
815*4882a593Smuzhiyun 		process_ded_retry(mci, error_one, info->dram_retr_add,
816*4882a593Smuzhiyun 				error_found, handle_error);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	if (error_next & 0x2020)
819*4882a593Smuzhiyun 		process_ded_retry(mci, error_next, info->dram_retr_add,
820*4882a593Smuzhiyun 				error_found, handle_error);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	if (error_one & 0x0808)
823*4882a593Smuzhiyun 		process_threshold_ce(mci, error_one, error_found, handle_error);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	if (error_next & 0x0808)
826*4882a593Smuzhiyun 		process_threshold_ce(mci, error_next, error_found,
827*4882a593Smuzhiyun 				handle_error);
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	if (error_one & 0x0606)
830*4882a593Smuzhiyun 		process_ue(mci, error_one, info->dram_ded_add,
831*4882a593Smuzhiyun 			info->dram_scrb_add, error_found, handle_error);
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	if (error_next & 0x0606)
834*4882a593Smuzhiyun 		process_ue(mci, error_next, info->dram_ded_add,
835*4882a593Smuzhiyun 			info->dram_scrb_add, error_found, handle_error);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun 
e752x_get_error_info(struct mem_ctl_info * mci,struct e752x_error_info * info)838*4882a593Smuzhiyun static void e752x_get_error_info(struct mem_ctl_info *mci,
839*4882a593Smuzhiyun 				 struct e752x_error_info *info)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	struct pci_dev *dev;
842*4882a593Smuzhiyun 	struct e752x_pvt *pvt;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	memset(info, 0, sizeof(*info));
845*4882a593Smuzhiyun 	pvt = (struct e752x_pvt *)mci->pvt_info;
846*4882a593Smuzhiyun 	dev = pvt->dev_d0f1;
847*4882a593Smuzhiyun 	pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	if (info->ferr_global) {
850*4882a593Smuzhiyun 		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
851*4882a593Smuzhiyun 			pci_read_config_dword(dev, I3100_NSI_FERR,
852*4882a593Smuzhiyun 					     &info->nsi_ferr);
853*4882a593Smuzhiyun 			info->hi_ferr = 0;
854*4882a593Smuzhiyun 		} else {
855*4882a593Smuzhiyun 			pci_read_config_byte(dev, E752X_HI_FERR,
856*4882a593Smuzhiyun 					     &info->hi_ferr);
857*4882a593Smuzhiyun 			info->nsi_ferr = 0;
858*4882a593Smuzhiyun 		}
859*4882a593Smuzhiyun 		pci_read_config_word(dev, E752X_SYSBUS_FERR,
860*4882a593Smuzhiyun 				&info->sysbus_ferr);
861*4882a593Smuzhiyun 		pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr);
862*4882a593Smuzhiyun 		pci_read_config_word(dev, E752X_DRAM_FERR, &info->dram_ferr);
863*4882a593Smuzhiyun 		pci_read_config_dword(dev, E752X_DRAM_SEC1_ADD,
864*4882a593Smuzhiyun 				&info->dram_sec1_add);
865*4882a593Smuzhiyun 		pci_read_config_word(dev, E752X_DRAM_SEC1_SYNDROME,
866*4882a593Smuzhiyun 				&info->dram_sec1_syndrome);
867*4882a593Smuzhiyun 		pci_read_config_dword(dev, E752X_DRAM_DED_ADD,
868*4882a593Smuzhiyun 				&info->dram_ded_add);
869*4882a593Smuzhiyun 		pci_read_config_dword(dev, E752X_DRAM_SCRB_ADD,
870*4882a593Smuzhiyun 				&info->dram_scrb_add);
871*4882a593Smuzhiyun 		pci_read_config_dword(dev, E752X_DRAM_RETR_ADD,
872*4882a593Smuzhiyun 				&info->dram_retr_add);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 		/* ignore the reserved bits just in case */
875*4882a593Smuzhiyun 		if (info->hi_ferr & 0x7f)
876*4882a593Smuzhiyun 			pci_write_config_byte(dev, E752X_HI_FERR,
877*4882a593Smuzhiyun 					info->hi_ferr);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 		if (info->nsi_ferr & NSI_ERR_MASK)
880*4882a593Smuzhiyun 			pci_write_config_dword(dev, I3100_NSI_FERR,
881*4882a593Smuzhiyun 					info->nsi_ferr);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 		if (info->sysbus_ferr)
884*4882a593Smuzhiyun 			pci_write_config_word(dev, E752X_SYSBUS_FERR,
885*4882a593Smuzhiyun 					info->sysbus_ferr);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		if (info->buf_ferr & 0x0f)
888*4882a593Smuzhiyun 			pci_write_config_byte(dev, E752X_BUF_FERR,
889*4882a593Smuzhiyun 					info->buf_ferr);
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 		if (info->dram_ferr)
892*4882a593Smuzhiyun 			pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_FERR,
893*4882a593Smuzhiyun 					 info->dram_ferr, info->dram_ferr);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 		pci_write_config_dword(dev, E752X_FERR_GLOBAL,
896*4882a593Smuzhiyun 				info->ferr_global);
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	if (info->nerr_global) {
902*4882a593Smuzhiyun 		if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
903*4882a593Smuzhiyun 			pci_read_config_dword(dev, I3100_NSI_NERR,
904*4882a593Smuzhiyun 					     &info->nsi_nerr);
905*4882a593Smuzhiyun 			info->hi_nerr = 0;
906*4882a593Smuzhiyun 		} else {
907*4882a593Smuzhiyun 			pci_read_config_byte(dev, E752X_HI_NERR,
908*4882a593Smuzhiyun 					     &info->hi_nerr);
909*4882a593Smuzhiyun 			info->nsi_nerr = 0;
910*4882a593Smuzhiyun 		}
911*4882a593Smuzhiyun 		pci_read_config_word(dev, E752X_SYSBUS_NERR,
912*4882a593Smuzhiyun 				&info->sysbus_nerr);
913*4882a593Smuzhiyun 		pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr);
914*4882a593Smuzhiyun 		pci_read_config_word(dev, E752X_DRAM_NERR, &info->dram_nerr);
915*4882a593Smuzhiyun 		pci_read_config_dword(dev, E752X_DRAM_SEC2_ADD,
916*4882a593Smuzhiyun 				&info->dram_sec2_add);
917*4882a593Smuzhiyun 		pci_read_config_word(dev, E752X_DRAM_SEC2_SYNDROME,
918*4882a593Smuzhiyun 				&info->dram_sec2_syndrome);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 		if (info->hi_nerr & 0x7f)
921*4882a593Smuzhiyun 			pci_write_config_byte(dev, E752X_HI_NERR,
922*4882a593Smuzhiyun 					info->hi_nerr);
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 		if (info->nsi_nerr & NSI_ERR_MASK)
925*4882a593Smuzhiyun 			pci_write_config_dword(dev, I3100_NSI_NERR,
926*4882a593Smuzhiyun 					info->nsi_nerr);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 		if (info->sysbus_nerr)
929*4882a593Smuzhiyun 			pci_write_config_word(dev, E752X_SYSBUS_NERR,
930*4882a593Smuzhiyun 					info->sysbus_nerr);
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 		if (info->buf_nerr & 0x0f)
933*4882a593Smuzhiyun 			pci_write_config_byte(dev, E752X_BUF_NERR,
934*4882a593Smuzhiyun 					info->buf_nerr);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 		if (info->dram_nerr)
937*4882a593Smuzhiyun 			pci_write_bits16(pvt->dev_d0f1, E752X_DRAM_NERR,
938*4882a593Smuzhiyun 					 info->dram_nerr, info->dram_nerr);
939*4882a593Smuzhiyun 
940*4882a593Smuzhiyun 		pci_write_config_dword(dev, E752X_NERR_GLOBAL,
941*4882a593Smuzhiyun 				info->nerr_global);
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun }
944*4882a593Smuzhiyun 
e752x_process_error_info(struct mem_ctl_info * mci,struct e752x_error_info * info,int handle_errors)945*4882a593Smuzhiyun static int e752x_process_error_info(struct mem_ctl_info *mci,
946*4882a593Smuzhiyun 				struct e752x_error_info *info,
947*4882a593Smuzhiyun 				int handle_errors)
948*4882a593Smuzhiyun {
949*4882a593Smuzhiyun 	u32 error32, stat32;
950*4882a593Smuzhiyun 	int error_found;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	error_found = 0;
953*4882a593Smuzhiyun 	error32 = (info->ferr_global >> 18) & 0x3ff;
954*4882a593Smuzhiyun 	stat32 = (info->ferr_global >> 4) & 0x7ff;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	if (error32)
957*4882a593Smuzhiyun 		global_error(1, error32, &error_found, handle_errors);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	if (stat32)
960*4882a593Smuzhiyun 		global_error(0, stat32, &error_found, handle_errors);
961*4882a593Smuzhiyun 
962*4882a593Smuzhiyun 	error32 = (info->nerr_global >> 18) & 0x3ff;
963*4882a593Smuzhiyun 	stat32 = (info->nerr_global >> 4) & 0x7ff;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	if (error32)
966*4882a593Smuzhiyun 		global_error(1, error32, &error_found, handle_errors);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	if (stat32)
969*4882a593Smuzhiyun 		global_error(0, stat32, &error_found, handle_errors);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	e752x_check_hub_interface(info, &error_found, handle_errors);
972*4882a593Smuzhiyun 	e752x_check_ns_interface(info, &error_found, handle_errors);
973*4882a593Smuzhiyun 	e752x_check_sysbus(info, &error_found, handle_errors);
974*4882a593Smuzhiyun 	e752x_check_membuf(info, &error_found, handle_errors);
975*4882a593Smuzhiyun 	e752x_check_dram(mci, info, &error_found, handle_errors);
976*4882a593Smuzhiyun 	return error_found;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun 
e752x_check(struct mem_ctl_info * mci)979*4882a593Smuzhiyun static void e752x_check(struct mem_ctl_info *mci)
980*4882a593Smuzhiyun {
981*4882a593Smuzhiyun 	struct e752x_error_info info;
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	edac_dbg(3, "\n");
984*4882a593Smuzhiyun 	e752x_get_error_info(mci, &info);
985*4882a593Smuzhiyun 	e752x_process_error_info(mci, &info, 1);
986*4882a593Smuzhiyun }
987*4882a593Smuzhiyun 
988*4882a593Smuzhiyun /* Program byte/sec bandwidth scrub rate to hardware */
set_sdram_scrub_rate(struct mem_ctl_info * mci,u32 new_bw)989*4882a593Smuzhiyun static int set_sdram_scrub_rate(struct mem_ctl_info *mci, u32 new_bw)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun 	const struct scrubrate *scrubrates;
992*4882a593Smuzhiyun 	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
993*4882a593Smuzhiyun 	struct pci_dev *pdev = pvt->dev_d0f0;
994*4882a593Smuzhiyun 	int i;
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
997*4882a593Smuzhiyun 		scrubrates = scrubrates_i3100;
998*4882a593Smuzhiyun 	else
999*4882a593Smuzhiyun 		scrubrates = scrubrates_e752x;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 	/* Translate the desired scrub rate to a e752x/3100 register value.
1002*4882a593Smuzhiyun 	 * Search for the bandwidth that is equal or greater than the
1003*4882a593Smuzhiyun 	 * desired rate and program the cooresponding register value.
1004*4882a593Smuzhiyun 	 */
1005*4882a593Smuzhiyun 	for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1006*4882a593Smuzhiyun 		if (scrubrates[i].bandwidth >= new_bw)
1007*4882a593Smuzhiyun 			break;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (scrubrates[i].bandwidth == SDRATE_EOT)
1010*4882a593Smuzhiyun 		return -1;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	pci_write_config_word(pdev, E752X_MCHSCRB, scrubrates[i].scrubval);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	return scrubrates[i].bandwidth;
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun 
1017*4882a593Smuzhiyun /* Convert current scrub rate value into byte/sec bandwidth */
get_sdram_scrub_rate(struct mem_ctl_info * mci)1018*4882a593Smuzhiyun static int get_sdram_scrub_rate(struct mem_ctl_info *mci)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	const struct scrubrate *scrubrates;
1021*4882a593Smuzhiyun 	struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info;
1022*4882a593Smuzhiyun 	struct pci_dev *pdev = pvt->dev_d0f0;
1023*4882a593Smuzhiyun 	u16 scrubval;
1024*4882a593Smuzhiyun 	int i;
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	if (pvt->dev_info->ctl_dev == PCI_DEVICE_ID_INTEL_3100_0)
1027*4882a593Smuzhiyun 		scrubrates = scrubrates_i3100;
1028*4882a593Smuzhiyun 	else
1029*4882a593Smuzhiyun 		scrubrates = scrubrates_e752x;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	/* Find the bandwidth matching the memory scrubber configuration */
1032*4882a593Smuzhiyun 	pci_read_config_word(pdev, E752X_MCHSCRB, &scrubval);
1033*4882a593Smuzhiyun 	scrubval = scrubval & 0x0f;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	for (i = 0; scrubrates[i].bandwidth != SDRATE_EOT; i++)
1036*4882a593Smuzhiyun 		if (scrubrates[i].scrubval == scrubval)
1037*4882a593Smuzhiyun 			break;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	if (scrubrates[i].bandwidth == SDRATE_EOT) {
1040*4882a593Smuzhiyun 		e752x_printk(KERN_WARNING,
1041*4882a593Smuzhiyun 			"Invalid sdram scrub control value: 0x%x\n", scrubval);
1042*4882a593Smuzhiyun 		return -1;
1043*4882a593Smuzhiyun 	}
1044*4882a593Smuzhiyun 	return scrubrates[i].bandwidth;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun /* Return 1 if dual channel mode is active.  Else return 0. */
dual_channel_active(u16 ddrcsr)1049*4882a593Smuzhiyun static inline int dual_channel_active(u16 ddrcsr)
1050*4882a593Smuzhiyun {
1051*4882a593Smuzhiyun 	return (((ddrcsr >> 12) & 3) == 3);
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun /* Remap csrow index numbers if map_type is "reverse"
1055*4882a593Smuzhiyun  */
remap_csrow_index(struct mem_ctl_info * mci,int index)1056*4882a593Smuzhiyun static inline int remap_csrow_index(struct mem_ctl_info *mci, int index)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun 	struct e752x_pvt *pvt = mci->pvt_info;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	if (!pvt->map_type)
1061*4882a593Smuzhiyun 		return (7 - index);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	return (index);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun 
e752x_init_csrows(struct mem_ctl_info * mci,struct pci_dev * pdev,u16 ddrcsr)1066*4882a593Smuzhiyun static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1067*4882a593Smuzhiyun 			u16 ddrcsr)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun 	struct csrow_info *csrow;
1070*4882a593Smuzhiyun 	enum edac_type edac_mode;
1071*4882a593Smuzhiyun 	unsigned long last_cumul_size;
1072*4882a593Smuzhiyun 	int index, mem_dev, drc_chan;
1073*4882a593Smuzhiyun 	int drc_drbg;		/* DRB granularity 0=64mb, 1=128mb */
1074*4882a593Smuzhiyun 	int drc_ddim;		/* DRAM Data Integrity Mode 0=none, 2=edac */
1075*4882a593Smuzhiyun 	u8 value;
1076*4882a593Smuzhiyun 	u32 dra, drc, cumul_size, i, nr_pages;
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	dra = 0;
1079*4882a593Smuzhiyun 	for (index = 0; index < 4; index++) {
1080*4882a593Smuzhiyun 		u8 dra_reg;
1081*4882a593Smuzhiyun 		pci_read_config_byte(pdev, E752X_DRA + index, &dra_reg);
1082*4882a593Smuzhiyun 		dra |= dra_reg << (index * 8);
1083*4882a593Smuzhiyun 	}
1084*4882a593Smuzhiyun 	pci_read_config_dword(pdev, E752X_DRC, &drc);
1085*4882a593Smuzhiyun 	drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1086*4882a593Smuzhiyun 	drc_drbg = drc_chan + 1;	/* 128 in dual mode, 64 in single */
1087*4882a593Smuzhiyun 	drc_ddim = (drc >> 20) & 0x3;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	/* The dram row boundary (DRB) reg values are boundary address for
1090*4882a593Smuzhiyun 	 * each DRAM row with a granularity of 64 or 128MB (single/dual
1091*4882a593Smuzhiyun 	 * channel operation).  DRB regs are cumulative; therefore DRB7 will
1092*4882a593Smuzhiyun 	 * contain the total memory contained in all eight rows.
1093*4882a593Smuzhiyun 	 */
1094*4882a593Smuzhiyun 	for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) {
1095*4882a593Smuzhiyun 		/* mem_dev 0=x8, 1=x4 */
1096*4882a593Smuzhiyun 		mem_dev = (dra >> (index * 4 + 2)) & 0x3;
1097*4882a593Smuzhiyun 		csrow = mci->csrows[remap_csrow_index(mci, index)];
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 		mem_dev = (mem_dev == 2);
1100*4882a593Smuzhiyun 		pci_read_config_byte(pdev, E752X_DRB + index, &value);
1101*4882a593Smuzhiyun 		/* convert a 128 or 64 MiB DRB to a page size. */
1102*4882a593Smuzhiyun 		cumul_size = value << (25 + drc_drbg - PAGE_SHIFT);
1103*4882a593Smuzhiyun 		edac_dbg(3, "(%d) cumul_size 0x%x\n", index, cumul_size);
1104*4882a593Smuzhiyun 		if (cumul_size == last_cumul_size)
1105*4882a593Smuzhiyun 			continue;	/* not populated */
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 		csrow->first_page = last_cumul_size;
1108*4882a593Smuzhiyun 		csrow->last_page = cumul_size - 1;
1109*4882a593Smuzhiyun 		nr_pages = cumul_size - last_cumul_size;
1110*4882a593Smuzhiyun 		last_cumul_size = cumul_size;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 		/*
1113*4882a593Smuzhiyun 		* if single channel or x8 devices then SECDED
1114*4882a593Smuzhiyun 		* if dual channel and x4 then S4ECD4ED
1115*4882a593Smuzhiyun 		*/
1116*4882a593Smuzhiyun 		if (drc_ddim) {
1117*4882a593Smuzhiyun 			if (drc_chan && mem_dev) {
1118*4882a593Smuzhiyun 				edac_mode = EDAC_S4ECD4ED;
1119*4882a593Smuzhiyun 				mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1120*4882a593Smuzhiyun 			} else {
1121*4882a593Smuzhiyun 				edac_mode = EDAC_SECDED;
1122*4882a593Smuzhiyun 				mci->edac_cap |= EDAC_FLAG_SECDED;
1123*4882a593Smuzhiyun 			}
1124*4882a593Smuzhiyun 		} else
1125*4882a593Smuzhiyun 			edac_mode = EDAC_NONE;
1126*4882a593Smuzhiyun 		for (i = 0; i < csrow->nr_channels; i++) {
1127*4882a593Smuzhiyun 			struct dimm_info *dimm = csrow->channels[i]->dimm;
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun 			edac_dbg(3, "Initializing rank at (%i,%i)\n", index, i);
1130*4882a593Smuzhiyun 			dimm->nr_pages = nr_pages / csrow->nr_channels;
1131*4882a593Smuzhiyun 			dimm->grain = 1 << 12;	/* 4KiB - resolution of CELOG */
1132*4882a593Smuzhiyun 			dimm->mtype = MEM_RDDR;	/* only one type supported */
1133*4882a593Smuzhiyun 			dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1134*4882a593Smuzhiyun 			dimm->edac_mode = edac_mode;
1135*4882a593Smuzhiyun 		}
1136*4882a593Smuzhiyun 	}
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun 
e752x_init_mem_map_table(struct pci_dev * pdev,struct e752x_pvt * pvt)1139*4882a593Smuzhiyun static void e752x_init_mem_map_table(struct pci_dev *pdev,
1140*4882a593Smuzhiyun 				struct e752x_pvt *pvt)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	int index;
1143*4882a593Smuzhiyun 	u8 value, last, row;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	last = 0;
1146*4882a593Smuzhiyun 	row = 0;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	for (index = 0; index < 8; index += 2) {
1149*4882a593Smuzhiyun 		pci_read_config_byte(pdev, E752X_DRB + index, &value);
1150*4882a593Smuzhiyun 		/* test if there is a dimm in this slot */
1151*4882a593Smuzhiyun 		if (value == last) {
1152*4882a593Smuzhiyun 			/* no dimm in the slot, so flag it as empty */
1153*4882a593Smuzhiyun 			pvt->map[index] = 0xff;
1154*4882a593Smuzhiyun 			pvt->map[index + 1] = 0xff;
1155*4882a593Smuzhiyun 		} else {	/* there is a dimm in the slot */
1156*4882a593Smuzhiyun 			pvt->map[index] = row;
1157*4882a593Smuzhiyun 			row++;
1158*4882a593Smuzhiyun 			last = value;
1159*4882a593Smuzhiyun 			/* test the next value to see if the dimm is double
1160*4882a593Smuzhiyun 			 * sided
1161*4882a593Smuzhiyun 			 */
1162*4882a593Smuzhiyun 			pci_read_config_byte(pdev, E752X_DRB + index + 1,
1163*4882a593Smuzhiyun 					&value);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 			/* the dimm is single sided, so flag as empty */
1166*4882a593Smuzhiyun 			/* this is a double sided dimm to save the next row #*/
1167*4882a593Smuzhiyun 			pvt->map[index + 1] = (value == last) ? 0xff :	row;
1168*4882a593Smuzhiyun 			row++;
1169*4882a593Smuzhiyun 			last = value;
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 	}
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun /* Return 0 on success or 1 on failure. */
e752x_get_devs(struct pci_dev * pdev,int dev_idx,struct e752x_pvt * pvt)1175*4882a593Smuzhiyun static int e752x_get_devs(struct pci_dev *pdev, int dev_idx,
1176*4882a593Smuzhiyun 			struct e752x_pvt *pvt)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	pvt->dev_d0f1 = pci_get_device(PCI_VENDOR_ID_INTEL,
1179*4882a593Smuzhiyun 				pvt->dev_info->err_dev, NULL);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	if (pvt->dev_d0f1 == NULL) {
1182*4882a593Smuzhiyun 		pvt->dev_d0f1 = pci_scan_single_device(pdev->bus,
1183*4882a593Smuzhiyun 							PCI_DEVFN(0, 1));
1184*4882a593Smuzhiyun 		pci_dev_get(pvt->dev_d0f1);
1185*4882a593Smuzhiyun 	}
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	if (pvt->dev_d0f1 == NULL) {
1188*4882a593Smuzhiyun 		e752x_printk(KERN_ERR, "error reporting device not found:"
1189*4882a593Smuzhiyun 			"vendor %x device 0x%x (broken BIOS?)\n",
1190*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev);
1191*4882a593Smuzhiyun 		return 1;
1192*4882a593Smuzhiyun 	}
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	pvt->dev_d0f0 = pci_get_device(PCI_VENDOR_ID_INTEL,
1195*4882a593Smuzhiyun 				e752x_devs[dev_idx].ctl_dev,
1196*4882a593Smuzhiyun 				NULL);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	if (pvt->dev_d0f0 == NULL)
1199*4882a593Smuzhiyun 		goto fail;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	return 0;
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun fail:
1204*4882a593Smuzhiyun 	pci_dev_put(pvt->dev_d0f1);
1205*4882a593Smuzhiyun 	return 1;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun /* Setup system bus parity mask register.
1209*4882a593Smuzhiyun  * Sysbus parity supported on:
1210*4882a593Smuzhiyun  * e7320/e7520/e7525 + Xeon
1211*4882a593Smuzhiyun  */
e752x_init_sysbus_parity_mask(struct e752x_pvt * pvt)1212*4882a593Smuzhiyun static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt)
1213*4882a593Smuzhiyun {
1214*4882a593Smuzhiyun 	char *cpu_id = cpu_data(0).x86_model_id;
1215*4882a593Smuzhiyun 	struct pci_dev *dev = pvt->dev_d0f1;
1216*4882a593Smuzhiyun 	int enable = 1;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	/* Allow module parameter override, else see if CPU supports parity */
1219*4882a593Smuzhiyun 	if (sysbus_parity != -1) {
1220*4882a593Smuzhiyun 		enable = sysbus_parity;
1221*4882a593Smuzhiyun 	} else if (cpu_id[0] && !strstr(cpu_id, "Xeon")) {
1222*4882a593Smuzhiyun 		e752x_printk(KERN_INFO, "System Bus Parity not "
1223*4882a593Smuzhiyun 			     "supported by CPU, disabling\n");
1224*4882a593Smuzhiyun 		enable = 0;
1225*4882a593Smuzhiyun 	}
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	if (enable)
1228*4882a593Smuzhiyun 		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000);
1229*4882a593Smuzhiyun 	else
1230*4882a593Smuzhiyun 		pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun 
e752x_init_error_reporting_regs(struct e752x_pvt * pvt)1233*4882a593Smuzhiyun static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct pci_dev *dev;
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 	dev = pvt->dev_d0f1;
1238*4882a593Smuzhiyun 	/* Turn off error disable & SMI in case the BIOS turned it on */
1239*4882a593Smuzhiyun 	if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) {
1240*4882a593Smuzhiyun 		pci_write_config_dword(dev, I3100_NSI_EMASK, 0);
1241*4882a593Smuzhiyun 		pci_write_config_dword(dev, I3100_NSI_SMICMD, 0);
1242*4882a593Smuzhiyun 	} else {
1243*4882a593Smuzhiyun 		pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00);
1244*4882a593Smuzhiyun 		pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00);
1245*4882a593Smuzhiyun 	}
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	e752x_init_sysbus_parity_mask(pvt);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00);
1250*4882a593Smuzhiyun 	pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00);
1251*4882a593Smuzhiyun 	pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00);
1252*4882a593Smuzhiyun 	pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00);
1253*4882a593Smuzhiyun 	pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
e752x_probe1(struct pci_dev * pdev,int dev_idx)1256*4882a593Smuzhiyun static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun 	u16 pci_data;
1259*4882a593Smuzhiyun 	u8 stat8;
1260*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
1261*4882a593Smuzhiyun 	struct edac_mc_layer layers[2];
1262*4882a593Smuzhiyun 	struct e752x_pvt *pvt;
1263*4882a593Smuzhiyun 	u16 ddrcsr;
1264*4882a593Smuzhiyun 	int drc_chan;		/* Number of channels 0=1chan,1=2chan */
1265*4882a593Smuzhiyun 	struct e752x_error_info discard;
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	edac_dbg(0, "mci\n");
1268*4882a593Smuzhiyun 	edac_dbg(0, "Starting Probe1\n");
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	/* check to see if device 0 function 1 is enabled; if it isn't, we
1271*4882a593Smuzhiyun 	 * assume the BIOS has reserved it for a reason and is expecting
1272*4882a593Smuzhiyun 	 * exclusive access, we take care not to violate that assumption and
1273*4882a593Smuzhiyun 	 * fail the probe. */
1274*4882a593Smuzhiyun 	pci_read_config_byte(pdev, E752X_DEVPRES1, &stat8);
1275*4882a593Smuzhiyun 	if (!force_function_unhide && !(stat8 & (1 << 5))) {
1276*4882a593Smuzhiyun 		printk(KERN_INFO "Contact your BIOS vendor to see if the "
1277*4882a593Smuzhiyun 			"E752x error registers can be safely un-hidden\n");
1278*4882a593Smuzhiyun 		return -ENODEV;
1279*4882a593Smuzhiyun 	}
1280*4882a593Smuzhiyun 	stat8 |= (1 << 5);
1281*4882a593Smuzhiyun 	pci_write_config_byte(pdev, E752X_DEVPRES1, stat8);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	pci_read_config_word(pdev, E752X_DDRCSR, &ddrcsr);
1284*4882a593Smuzhiyun 	/* FIXME: should check >>12 or 0xf, true for all? */
1285*4882a593Smuzhiyun 	/* Dual channel = 1, Single channel = 0 */
1286*4882a593Smuzhiyun 	drc_chan = dual_channel_active(ddrcsr);
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1289*4882a593Smuzhiyun 	layers[0].size = E752X_NR_CSROWS;
1290*4882a593Smuzhiyun 	layers[0].is_virt_csrow = true;
1291*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1292*4882a593Smuzhiyun 	layers[1].size = drc_chan + 1;
1293*4882a593Smuzhiyun 	layers[1].is_virt_csrow = false;
1294*4882a593Smuzhiyun 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1295*4882a593Smuzhiyun 	if (mci == NULL)
1296*4882a593Smuzhiyun 		return -ENOMEM;
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	edac_dbg(3, "init mci\n");
1299*4882a593Smuzhiyun 	mci->mtype_cap = MEM_FLAG_RDDR;
1300*4882a593Smuzhiyun 	/* 3100 IMCH supports SECDEC only */
1301*4882a593Smuzhiyun 	mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED :
1302*4882a593Smuzhiyun 		(EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED);
1303*4882a593Smuzhiyun 	/* FIXME - what if different memory types are in different csrows? */
1304*4882a593Smuzhiyun 	mci->mod_name = EDAC_MOD_STR;
1305*4882a593Smuzhiyun 	mci->pdev = &pdev->dev;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	edac_dbg(3, "init pvt\n");
1308*4882a593Smuzhiyun 	pvt = (struct e752x_pvt *)mci->pvt_info;
1309*4882a593Smuzhiyun 	pvt->dev_info = &e752x_devs[dev_idx];
1310*4882a593Smuzhiyun 	pvt->mc_symmetric = ((ddrcsr & 0x10) != 0);
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun 	if (e752x_get_devs(pdev, dev_idx, pvt)) {
1313*4882a593Smuzhiyun 		edac_mc_free(mci);
1314*4882a593Smuzhiyun 		return -ENODEV;
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	edac_dbg(3, "more mci init\n");
1318*4882a593Smuzhiyun 	mci->ctl_name = pvt->dev_info->ctl_name;
1319*4882a593Smuzhiyun 	mci->dev_name = pci_name(pdev);
1320*4882a593Smuzhiyun 	mci->edac_check = e752x_check;
1321*4882a593Smuzhiyun 	mci->ctl_page_to_phys = ctl_page_to_phys;
1322*4882a593Smuzhiyun 	mci->set_sdram_scrub_rate = set_sdram_scrub_rate;
1323*4882a593Smuzhiyun 	mci->get_sdram_scrub_rate = get_sdram_scrub_rate;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	/* set the map type.  1 = normal, 0 = reversed
1326*4882a593Smuzhiyun 	 * Must be set before e752x_init_csrows in case csrow mapping
1327*4882a593Smuzhiyun 	 * is reversed.
1328*4882a593Smuzhiyun 	 */
1329*4882a593Smuzhiyun 	pci_read_config_byte(pdev, E752X_DRM, &stat8);
1330*4882a593Smuzhiyun 	pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f));
1331*4882a593Smuzhiyun 
1332*4882a593Smuzhiyun 	e752x_init_csrows(mci, pdev, ddrcsr);
1333*4882a593Smuzhiyun 	e752x_init_mem_map_table(pdev, pvt);
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	if (dev_idx == I3100)
1336*4882a593Smuzhiyun 		mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */
1337*4882a593Smuzhiyun 	else
1338*4882a593Smuzhiyun 		mci->edac_cap |= EDAC_FLAG_NONE;
1339*4882a593Smuzhiyun 	edac_dbg(3, "tolm, remapbase, remaplimit\n");
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	/* load the top of low memory, remap base, and remap limit vars */
1342*4882a593Smuzhiyun 	pci_read_config_word(pdev, E752X_TOLM, &pci_data);
1343*4882a593Smuzhiyun 	pvt->tolm = ((u32) pci_data) << 4;
1344*4882a593Smuzhiyun 	pci_read_config_word(pdev, E752X_REMAPBASE, &pci_data);
1345*4882a593Smuzhiyun 	pvt->remapbase = ((u32) pci_data) << 14;
1346*4882a593Smuzhiyun 	pci_read_config_word(pdev, E752X_REMAPLIMIT, &pci_data);
1347*4882a593Smuzhiyun 	pvt->remaplimit = ((u32) pci_data) << 14;
1348*4882a593Smuzhiyun 	e752x_printk(KERN_INFO,
1349*4882a593Smuzhiyun 			"tolm = %x, remapbase = %x, remaplimit = %x\n",
1350*4882a593Smuzhiyun 			pvt->tolm, pvt->remapbase, pvt->remaplimit);
1351*4882a593Smuzhiyun 
1352*4882a593Smuzhiyun 	/* Here we assume that we will never see multiple instances of this
1353*4882a593Smuzhiyun 	 * type of memory controller.  The ID is therefore hardcoded to 0.
1354*4882a593Smuzhiyun 	 */
1355*4882a593Smuzhiyun 	if (edac_mc_add_mc(mci)) {
1356*4882a593Smuzhiyun 		edac_dbg(3, "failed edac_mc_add_mc()\n");
1357*4882a593Smuzhiyun 		goto fail;
1358*4882a593Smuzhiyun 	}
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	e752x_init_error_reporting_regs(pvt);
1361*4882a593Smuzhiyun 	e752x_get_error_info(mci, &discard);	/* clear other MCH errors */
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	/* allocating generic PCI control info */
1364*4882a593Smuzhiyun 	e752x_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1365*4882a593Smuzhiyun 	if (!e752x_pci) {
1366*4882a593Smuzhiyun 		printk(KERN_WARNING
1367*4882a593Smuzhiyun 			"%s(): Unable to create PCI control\n", __func__);
1368*4882a593Smuzhiyun 		printk(KERN_WARNING
1369*4882a593Smuzhiyun 			"%s(): PCI error report via EDAC not setup\n",
1370*4882a593Smuzhiyun 			__func__);
1371*4882a593Smuzhiyun 	}
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 	/* get this far and it's successful */
1374*4882a593Smuzhiyun 	edac_dbg(3, "success\n");
1375*4882a593Smuzhiyun 	return 0;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun fail:
1378*4882a593Smuzhiyun 	pci_dev_put(pvt->dev_d0f0);
1379*4882a593Smuzhiyun 	pci_dev_put(pvt->dev_d0f1);
1380*4882a593Smuzhiyun 	edac_mc_free(mci);
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	return -ENODEV;
1383*4882a593Smuzhiyun }
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun /* returns count (>= 0), or negative on error */
e752x_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)1386*4882a593Smuzhiyun static int e752x_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1387*4882a593Smuzhiyun {
1388*4882a593Smuzhiyun 	edac_dbg(0, "\n");
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	/* wake up and enable device */
1391*4882a593Smuzhiyun 	if (pci_enable_device(pdev) < 0)
1392*4882a593Smuzhiyun 		return -EIO;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	return e752x_probe1(pdev, ent->driver_data);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun 
e752x_remove_one(struct pci_dev * pdev)1397*4882a593Smuzhiyun static void e752x_remove_one(struct pci_dev *pdev)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
1400*4882a593Smuzhiyun 	struct e752x_pvt *pvt;
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	edac_dbg(0, "\n");
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	if (e752x_pci)
1405*4882a593Smuzhiyun 		edac_pci_release_generic_ctl(e752x_pci);
1406*4882a593Smuzhiyun 
1407*4882a593Smuzhiyun 	if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL)
1408*4882a593Smuzhiyun 		return;
1409*4882a593Smuzhiyun 
1410*4882a593Smuzhiyun 	pvt = (struct e752x_pvt *)mci->pvt_info;
1411*4882a593Smuzhiyun 	pci_dev_put(pvt->dev_d0f0);
1412*4882a593Smuzhiyun 	pci_dev_put(pvt->dev_d0f1);
1413*4882a593Smuzhiyun 	edac_mc_free(mci);
1414*4882a593Smuzhiyun }
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun static const struct pci_device_id e752x_pci_tbl[] = {
1417*4882a593Smuzhiyun 	{
1418*4882a593Smuzhiyun 	 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1419*4882a593Smuzhiyun 	 E7520},
1420*4882a593Smuzhiyun 	{
1421*4882a593Smuzhiyun 	 PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1422*4882a593Smuzhiyun 	 E7525},
1423*4882a593Smuzhiyun 	{
1424*4882a593Smuzhiyun 	 PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1425*4882a593Smuzhiyun 	 E7320},
1426*4882a593Smuzhiyun 	{
1427*4882a593Smuzhiyun 	 PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1428*4882a593Smuzhiyun 	 I3100},
1429*4882a593Smuzhiyun 	{
1430*4882a593Smuzhiyun 	 0,
1431*4882a593Smuzhiyun 	 }			/* 0 terminated list. */
1432*4882a593Smuzhiyun };
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, e752x_pci_tbl);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun static struct pci_driver e752x_driver = {
1437*4882a593Smuzhiyun 	.name = EDAC_MOD_STR,
1438*4882a593Smuzhiyun 	.probe = e752x_init_one,
1439*4882a593Smuzhiyun 	.remove = e752x_remove_one,
1440*4882a593Smuzhiyun 	.id_table = e752x_pci_tbl,
1441*4882a593Smuzhiyun };
1442*4882a593Smuzhiyun 
e752x_init(void)1443*4882a593Smuzhiyun static int __init e752x_init(void)
1444*4882a593Smuzhiyun {
1445*4882a593Smuzhiyun 	int pci_rc;
1446*4882a593Smuzhiyun 
1447*4882a593Smuzhiyun 	edac_dbg(3, "\n");
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1450*4882a593Smuzhiyun 	opstate_init();
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 	pci_rc = pci_register_driver(&e752x_driver);
1453*4882a593Smuzhiyun 	return (pci_rc < 0) ? pci_rc : 0;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun 
e752x_exit(void)1456*4882a593Smuzhiyun static void __exit e752x_exit(void)
1457*4882a593Smuzhiyun {
1458*4882a593Smuzhiyun 	edac_dbg(3, "\n");
1459*4882a593Smuzhiyun 	pci_unregister_driver(&e752x_driver);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun module_init(e752x_init);
1463*4882a593Smuzhiyun module_exit(e752x_exit);
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1466*4882a593Smuzhiyun MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n");
1467*4882a593Smuzhiyun MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers");
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun module_param(force_function_unhide, int, 0444);
1470*4882a593Smuzhiyun MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:"
1471*4882a593Smuzhiyun 		 " 1=force unhide and hope BIOS doesn't fight driver for "
1472*4882a593Smuzhiyun 		"Dev0:Fun1 access");
1473*4882a593Smuzhiyun 
1474*4882a593Smuzhiyun module_param(edac_op_state, int, 0444);
1475*4882a593Smuzhiyun MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1476*4882a593Smuzhiyun 
1477*4882a593Smuzhiyun module_param(sysbus_parity, int, 0444);
1478*4882a593Smuzhiyun MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking,"
1479*4882a593Smuzhiyun 		" 1=enable system bus parity checking, default=auto-detect");
1480*4882a593Smuzhiyun module_param(report_non_memory_errors, int, 0644);
1481*4882a593Smuzhiyun MODULE_PARM_DESC(report_non_memory_errors, "0=disable non-memory error "
1482*4882a593Smuzhiyun 		"reporting, 1=enable non-memory error reporting");
1483