xref: /OK3568_Linux_fs/kernel/drivers/edac/i7300_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Intel 7300 class Memory Controllers kernel module (Clarksboro)
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2010 by:
6*4882a593Smuzhiyun  *	 Mauro Carvalho Chehab
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Red Hat Inc. https://www.redhat.com
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Intel 7300 Chipset Memory Controller Hub (MCH) - Datasheet
11*4882a593Smuzhiyun  *	http://www.intel.com/Assets/PDF/datasheet/318082.pdf
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * TODO: The chipset allow checking for PCI Express errors also. Currently,
14*4882a593Smuzhiyun  *	 the driver covers only memory error errors
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  * This driver uses "csrows" EDAC attribute to represent DIMM slot#
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/pci_ids.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/edac.h>
25*4882a593Smuzhiyun #include <linux/mmzone.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "edac_module.h"
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Alter this version for the I7300 module when modifications are made
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #define I7300_REVISION    " Ver: 1.0.0"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define EDAC_MOD_STR      "i7300_edac"
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define i7300_printk(level, fmt, arg...) \
37*4882a593Smuzhiyun 	edac_printk(level, "i7300", fmt, ##arg)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define i7300_mc_printk(mci, level, fmt, arg...) \
40*4882a593Smuzhiyun 	edac_mc_chipset_printk(mci, level, "i7300", fmt, ##arg)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /***********************************************
43*4882a593Smuzhiyun  * i7300 Limit constants Structs and static vars
44*4882a593Smuzhiyun  ***********************************************/
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /*
47*4882a593Smuzhiyun  * Memory topology is organized as:
48*4882a593Smuzhiyun  *	Branch 0 - 2 channels: channels 0 and 1 (FDB0 PCI dev 21.0)
49*4882a593Smuzhiyun  *	Branch 1 - 2 channels: channels 2 and 3 (FDB1 PCI dev 22.0)
50*4882a593Smuzhiyun  * Each channel can have to 8 DIMM sets (called as SLOTS)
51*4882a593Smuzhiyun  * Slots should generally be filled in pairs
52*4882a593Smuzhiyun  *	Except on Single Channel mode of operation
53*4882a593Smuzhiyun  *		just slot 0/channel0 filled on this mode
54*4882a593Smuzhiyun  *	On normal operation mode, the two channels on a branch should be
55*4882a593Smuzhiyun  *		filled together for the same SLOT#
56*4882a593Smuzhiyun  * When in mirrored mode, Branch 1 replicate memory at Branch 0, so, the four
57*4882a593Smuzhiyun  *		channels on both branches should be filled
58*4882a593Smuzhiyun  */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* Limits for i7300 */
61*4882a593Smuzhiyun #define MAX_SLOTS		8
62*4882a593Smuzhiyun #define MAX_BRANCHES		2
63*4882a593Smuzhiyun #define MAX_CH_PER_BRANCH	2
64*4882a593Smuzhiyun #define MAX_CHANNELS		(MAX_CH_PER_BRANCH * MAX_BRANCHES)
65*4882a593Smuzhiyun #define MAX_MIR			3
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define to_channel(ch, branch)	((((branch)) << 1) | (ch))
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define to_csrow(slot, ch, branch)					\
70*4882a593Smuzhiyun 		(to_channel(ch, branch) | ((slot) << 2))
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /* Device name and register DID (Device ID) */
73*4882a593Smuzhiyun struct i7300_dev_info {
74*4882a593Smuzhiyun 	const char *ctl_name;	/* name for this device */
75*4882a593Smuzhiyun 	u16 fsb_mapping_errors;	/* DID for the branchmap,control */
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* Table of devices attributes supported by this driver */
79*4882a593Smuzhiyun static const struct i7300_dev_info i7300_devs[] = {
80*4882a593Smuzhiyun 	{
81*4882a593Smuzhiyun 		.ctl_name = "I7300",
82*4882a593Smuzhiyun 		.fsb_mapping_errors = PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
83*4882a593Smuzhiyun 	},
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun struct i7300_dimm_info {
87*4882a593Smuzhiyun 	int megabytes;		/* size, 0 means not present  */
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* driver private data structure */
91*4882a593Smuzhiyun struct i7300_pvt {
92*4882a593Smuzhiyun 	struct pci_dev *pci_dev_16_0_fsb_ctlr;		/* 16.0 */
93*4882a593Smuzhiyun 	struct pci_dev *pci_dev_16_1_fsb_addr_map;	/* 16.1 */
94*4882a593Smuzhiyun 	struct pci_dev *pci_dev_16_2_fsb_err_regs;	/* 16.2 */
95*4882a593Smuzhiyun 	struct pci_dev *pci_dev_2x_0_fbd_branch[MAX_BRANCHES];	/* 21.0  and 22.0 */
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	u16 tolm;				/* top of low memory */
98*4882a593Smuzhiyun 	u64 ambase;				/* AMB BAR */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	u32 mc_settings;			/* Report several settings */
101*4882a593Smuzhiyun 	u32 mc_settings_a;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	u16 mir[MAX_MIR];			/* Memory Interleave Reg*/
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	u16 mtr[MAX_SLOTS][MAX_BRANCHES];	/* Memory Technlogy Reg */
106*4882a593Smuzhiyun 	u16 ambpresent[MAX_CHANNELS];		/* AMB present regs */
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* DIMM information matrix, allocating architecture maximums */
109*4882a593Smuzhiyun 	struct i7300_dimm_info dimm_info[MAX_SLOTS][MAX_CHANNELS];
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* Temporary buffer for use when preparing error messages */
112*4882a593Smuzhiyun 	char *tmp_prt_buffer;
113*4882a593Smuzhiyun };
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* FIXME: Why do we need to have this static? */
116*4882a593Smuzhiyun static struct edac_pci_ctl_info *i7300_pci;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun /***************************************************
119*4882a593Smuzhiyun  * i7300 Register definitions for memory enumeration
120*4882a593Smuzhiyun  ***************************************************/
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun  * Device 16,
124*4882a593Smuzhiyun  * Function 0: System Address (not documented)
125*4882a593Smuzhiyun  * Function 1: Memory Branch Map, Control, Errors Register
126*4882a593Smuzhiyun  */
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	/* OFFSETS for Function 0 */
129*4882a593Smuzhiyun #define AMBASE			0x48 /* AMB Mem Mapped Reg Region Base */
130*4882a593Smuzhiyun #define MAXCH			0x56 /* Max Channel Number */
131*4882a593Smuzhiyun #define MAXDIMMPERCH		0x57 /* Max DIMM PER Channel Number */
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	/* OFFSETS for Function 1 */
134*4882a593Smuzhiyun #define MC_SETTINGS		0x40
135*4882a593Smuzhiyun   #define IS_MIRRORED(mc)		((mc) & (1 << 16))
136*4882a593Smuzhiyun   #define IS_ECC_ENABLED(mc)		((mc) & (1 << 5))
137*4882a593Smuzhiyun   #define IS_RETRY_ENABLED(mc)		((mc) & (1 << 31))
138*4882a593Smuzhiyun   #define IS_SCRBALGO_ENHANCED(mc)	((mc) & (1 << 8))
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #define MC_SETTINGS_A		0x58
141*4882a593Smuzhiyun   #define IS_SINGLE_MODE(mca)		((mca) & (1 << 14))
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #define TOLM			0x6C
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun #define MIR0			0x80
146*4882a593Smuzhiyun #define MIR1			0x84
147*4882a593Smuzhiyun #define MIR2			0x88
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /*
150*4882a593Smuzhiyun  * Note: Other Intel EDAC drivers use AMBPRESENT to identify if the available
151*4882a593Smuzhiyun  * memory. From datasheet item 7.3.1 (FB-DIMM technology & organization), it
152*4882a593Smuzhiyun  * seems that we cannot use this information directly for the same usage.
153*4882a593Smuzhiyun  * Each memory slot may have up to 2 AMB interfaces, one for income and another
154*4882a593Smuzhiyun  * for outcome interface to the next slot.
155*4882a593Smuzhiyun  * For now, the driver just stores the AMB present registers, but rely only at
156*4882a593Smuzhiyun  * the MTR info to detect memory.
157*4882a593Smuzhiyun  * Datasheet is also not clear about how to map each AMBPRESENT registers to
158*4882a593Smuzhiyun  * one of the 4 available channels.
159*4882a593Smuzhiyun  */
160*4882a593Smuzhiyun #define AMBPRESENT_0	0x64
161*4882a593Smuzhiyun #define AMBPRESENT_1	0x66
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun static const u16 mtr_regs[MAX_SLOTS] = {
164*4882a593Smuzhiyun 	0x80, 0x84, 0x88, 0x8c,
165*4882a593Smuzhiyun 	0x82, 0x86, 0x8a, 0x8e
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun /*
169*4882a593Smuzhiyun  * Defines to extract the vaious fields from the
170*4882a593Smuzhiyun  *	MTRx - Memory Technology Registers
171*4882a593Smuzhiyun  */
172*4882a593Smuzhiyun #define MTR_DIMMS_PRESENT(mtr)		((mtr) & (1 << 8))
173*4882a593Smuzhiyun #define MTR_DIMMS_ETHROTTLE(mtr)	((mtr) & (1 << 7))
174*4882a593Smuzhiyun #define MTR_DRAM_WIDTH(mtr)		(((mtr) & (1 << 6)) ? 8 : 4)
175*4882a593Smuzhiyun #define MTR_DRAM_BANKS(mtr)		(((mtr) & (1 << 5)) ? 8 : 4)
176*4882a593Smuzhiyun #define MTR_DIMM_RANKS(mtr)		(((mtr) & (1 << 4)) ? 1 : 0)
177*4882a593Smuzhiyun #define MTR_DIMM_ROWS(mtr)		(((mtr) >> 2) & 0x3)
178*4882a593Smuzhiyun #define MTR_DRAM_BANKS_ADDR_BITS	2
179*4882a593Smuzhiyun #define MTR_DIMM_ROWS_ADDR_BITS(mtr)	(MTR_DIMM_ROWS(mtr) + 13)
180*4882a593Smuzhiyun #define MTR_DIMM_COLS(mtr)		((mtr) & 0x3)
181*4882a593Smuzhiyun #define MTR_DIMM_COLS_ADDR_BITS(mtr)	(MTR_DIMM_COLS(mtr) + 10)
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /************************************************
184*4882a593Smuzhiyun  * i7300 Register definitions for error detection
185*4882a593Smuzhiyun  ************************************************/
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun /*
188*4882a593Smuzhiyun  * Device 16.1: FBD Error Registers
189*4882a593Smuzhiyun  */
190*4882a593Smuzhiyun #define FERR_FAT_FBD	0x98
191*4882a593Smuzhiyun static const char *ferr_fat_fbd_name[] = {
192*4882a593Smuzhiyun 	[22] = "Non-Redundant Fast Reset Timeout",
193*4882a593Smuzhiyun 	[2]  = ">Tmid Thermal event with intelligent throttling disabled",
194*4882a593Smuzhiyun 	[1]  = "Memory or FBD configuration CRC read error",
195*4882a593Smuzhiyun 	[0]  = "Memory Write error on non-redundant retry or "
196*4882a593Smuzhiyun 	       "FBD configuration Write error on retry",
197*4882a593Smuzhiyun };
198*4882a593Smuzhiyun #define GET_FBD_FAT_IDX(fbderr)	(((fbderr) >> 28) & 3)
199*4882a593Smuzhiyun #define FERR_FAT_FBD_ERR_MASK ((1 << 0) | (1 << 1) | (1 << 2) | (1 << 22))
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define FERR_NF_FBD	0xa0
202*4882a593Smuzhiyun static const char *ferr_nf_fbd_name[] = {
203*4882a593Smuzhiyun 	[24] = "DIMM-Spare Copy Completed",
204*4882a593Smuzhiyun 	[23] = "DIMM-Spare Copy Initiated",
205*4882a593Smuzhiyun 	[22] = "Redundant Fast Reset Timeout",
206*4882a593Smuzhiyun 	[21] = "Memory Write error on redundant retry",
207*4882a593Smuzhiyun 	[18] = "SPD protocol Error",
208*4882a593Smuzhiyun 	[17] = "FBD Northbound parity error on FBD Sync Status",
209*4882a593Smuzhiyun 	[16] = "Correctable Patrol Data ECC",
210*4882a593Smuzhiyun 	[15] = "Correctable Resilver- or Spare-Copy Data ECC",
211*4882a593Smuzhiyun 	[14] = "Correctable Mirrored Demand Data ECC",
212*4882a593Smuzhiyun 	[13] = "Correctable Non-Mirrored Demand Data ECC",
213*4882a593Smuzhiyun 	[11] = "Memory or FBD configuration CRC read error",
214*4882a593Smuzhiyun 	[10] = "FBD Configuration Write error on first attempt",
215*4882a593Smuzhiyun 	[9]  = "Memory Write error on first attempt",
216*4882a593Smuzhiyun 	[8]  = "Non-Aliased Uncorrectable Patrol Data ECC",
217*4882a593Smuzhiyun 	[7]  = "Non-Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
218*4882a593Smuzhiyun 	[6]  = "Non-Aliased Uncorrectable Mirrored Demand Data ECC",
219*4882a593Smuzhiyun 	[5]  = "Non-Aliased Uncorrectable Non-Mirrored Demand Data ECC",
220*4882a593Smuzhiyun 	[4]  = "Aliased Uncorrectable Patrol Data ECC",
221*4882a593Smuzhiyun 	[3]  = "Aliased Uncorrectable Resilver- or Spare-Copy Data ECC",
222*4882a593Smuzhiyun 	[2]  = "Aliased Uncorrectable Mirrored Demand Data ECC",
223*4882a593Smuzhiyun 	[1]  = "Aliased Uncorrectable Non-Mirrored Demand Data ECC",
224*4882a593Smuzhiyun 	[0]  = "Uncorrectable Data ECC on Replay",
225*4882a593Smuzhiyun };
226*4882a593Smuzhiyun #define GET_FBD_NF_IDX(fbderr)	(((fbderr) >> 28) & 3)
227*4882a593Smuzhiyun #define FERR_NF_FBD_ERR_MASK ((1 << 24) | (1 << 23) | (1 << 22) | (1 << 21) |\
228*4882a593Smuzhiyun 			      (1 << 18) | (1 << 17) | (1 << 16) | (1 << 15) |\
229*4882a593Smuzhiyun 			      (1 << 14) | (1 << 13) | (1 << 11) | (1 << 10) |\
230*4882a593Smuzhiyun 			      (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
231*4882a593Smuzhiyun 			      (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
232*4882a593Smuzhiyun 			      (1 << 1)  | (1 << 0))
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun #define EMASK_FBD	0xa8
235*4882a593Smuzhiyun #define EMASK_FBD_ERR_MASK ((1 << 27) | (1 << 26) | (1 << 25) | (1 << 24) |\
236*4882a593Smuzhiyun 			    (1 << 22) | (1 << 21) | (1 << 20) | (1 << 19) |\
237*4882a593Smuzhiyun 			    (1 << 18) | (1 << 17) | (1 << 16) | (1 << 14) |\
238*4882a593Smuzhiyun 			    (1 << 13) | (1 << 12) | (1 << 11) | (1 << 10) |\
239*4882a593Smuzhiyun 			    (1 << 9)  | (1 << 8)  | (1 << 7)  | (1 << 6)  |\
240*4882a593Smuzhiyun 			    (1 << 5)  | (1 << 4)  | (1 << 3)  | (1 << 2)  |\
241*4882a593Smuzhiyun 			    (1 << 1)  | (1 << 0))
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun  * Device 16.2: Global Error Registers
245*4882a593Smuzhiyun  */
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun #define FERR_GLOBAL_HI	0x48
248*4882a593Smuzhiyun static const char *ferr_global_hi_name[] = {
249*4882a593Smuzhiyun 	[3] = "FSB 3 Fatal Error",
250*4882a593Smuzhiyun 	[2] = "FSB 2 Fatal Error",
251*4882a593Smuzhiyun 	[1] = "FSB 1 Fatal Error",
252*4882a593Smuzhiyun 	[0] = "FSB 0 Fatal Error",
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun #define ferr_global_hi_is_fatal(errno)	1
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun #define FERR_GLOBAL_LO	0x40
257*4882a593Smuzhiyun static const char *ferr_global_lo_name[] = {
258*4882a593Smuzhiyun 	[31] = "Internal MCH Fatal Error",
259*4882a593Smuzhiyun 	[30] = "Intel QuickData Technology Device Fatal Error",
260*4882a593Smuzhiyun 	[29] = "FSB1 Fatal Error",
261*4882a593Smuzhiyun 	[28] = "FSB0 Fatal Error",
262*4882a593Smuzhiyun 	[27] = "FBD Channel 3 Fatal Error",
263*4882a593Smuzhiyun 	[26] = "FBD Channel 2 Fatal Error",
264*4882a593Smuzhiyun 	[25] = "FBD Channel 1 Fatal Error",
265*4882a593Smuzhiyun 	[24] = "FBD Channel 0 Fatal Error",
266*4882a593Smuzhiyun 	[23] = "PCI Express Device 7Fatal Error",
267*4882a593Smuzhiyun 	[22] = "PCI Express Device 6 Fatal Error",
268*4882a593Smuzhiyun 	[21] = "PCI Express Device 5 Fatal Error",
269*4882a593Smuzhiyun 	[20] = "PCI Express Device 4 Fatal Error",
270*4882a593Smuzhiyun 	[19] = "PCI Express Device 3 Fatal Error",
271*4882a593Smuzhiyun 	[18] = "PCI Express Device 2 Fatal Error",
272*4882a593Smuzhiyun 	[17] = "PCI Express Device 1 Fatal Error",
273*4882a593Smuzhiyun 	[16] = "ESI Fatal Error",
274*4882a593Smuzhiyun 	[15] = "Internal MCH Non-Fatal Error",
275*4882a593Smuzhiyun 	[14] = "Intel QuickData Technology Device Non Fatal Error",
276*4882a593Smuzhiyun 	[13] = "FSB1 Non-Fatal Error",
277*4882a593Smuzhiyun 	[12] = "FSB 0 Non-Fatal Error",
278*4882a593Smuzhiyun 	[11] = "FBD Channel 3 Non-Fatal Error",
279*4882a593Smuzhiyun 	[10] = "FBD Channel 2 Non-Fatal Error",
280*4882a593Smuzhiyun 	[9]  = "FBD Channel 1 Non-Fatal Error",
281*4882a593Smuzhiyun 	[8]  = "FBD Channel 0 Non-Fatal Error",
282*4882a593Smuzhiyun 	[7]  = "PCI Express Device 7 Non-Fatal Error",
283*4882a593Smuzhiyun 	[6]  = "PCI Express Device 6 Non-Fatal Error",
284*4882a593Smuzhiyun 	[5]  = "PCI Express Device 5 Non-Fatal Error",
285*4882a593Smuzhiyun 	[4]  = "PCI Express Device 4 Non-Fatal Error",
286*4882a593Smuzhiyun 	[3]  = "PCI Express Device 3 Non-Fatal Error",
287*4882a593Smuzhiyun 	[2]  = "PCI Express Device 2 Non-Fatal Error",
288*4882a593Smuzhiyun 	[1]  = "PCI Express Device 1 Non-Fatal Error",
289*4882a593Smuzhiyun 	[0]  = "ESI Non-Fatal Error",
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun #define ferr_global_lo_is_fatal(errno)	((errno < 16) ? 0 : 1)
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun #define NRECMEMA	0xbe
294*4882a593Smuzhiyun   #define NRECMEMA_BANK(v)	(((v) >> 12) & 7)
295*4882a593Smuzhiyun   #define NRECMEMA_RANK(v)	(((v) >> 8) & 15)
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun #define NRECMEMB	0xc0
298*4882a593Smuzhiyun   #define NRECMEMB_IS_WR(v)	((v) & (1 << 31))
299*4882a593Smuzhiyun   #define NRECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
300*4882a593Smuzhiyun   #define NRECMEMB_RAS(v)	((v) & 0xffff)
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun #define REDMEMA		0xdc
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun #define REDMEMB		0x7c
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun #define RECMEMA		0xe0
307*4882a593Smuzhiyun   #define RECMEMA_BANK(v)	(((v) >> 12) & 7)
308*4882a593Smuzhiyun   #define RECMEMA_RANK(v)	(((v) >> 8) & 15)
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun #define RECMEMB		0xe4
311*4882a593Smuzhiyun   #define RECMEMB_IS_WR(v)	((v) & (1 << 31))
312*4882a593Smuzhiyun   #define RECMEMB_CAS(v)	(((v) >> 16) & 0x1fff)
313*4882a593Smuzhiyun   #define RECMEMB_RAS(v)	((v) & 0xffff)
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /********************************************
316*4882a593Smuzhiyun  * i7300 Functions related to error detection
317*4882a593Smuzhiyun  ********************************************/
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun  * get_err_from_table() - Gets the error message from a table
321*4882a593Smuzhiyun  * @table:	table name (array of char *)
322*4882a593Smuzhiyun  * @size:	number of elements at the table
323*4882a593Smuzhiyun  * @pos:	position of the element to be returned
324*4882a593Smuzhiyun  *
325*4882a593Smuzhiyun  * This is a small routine that gets the pos-th element of a table. If the
326*4882a593Smuzhiyun  * element doesn't exist (or it is empty), it returns "reserved".
327*4882a593Smuzhiyun  * Instead of calling it directly, the better is to call via the macro
328*4882a593Smuzhiyun  * GET_ERR_FROM_TABLE(), that automatically checks the table size via
329*4882a593Smuzhiyun  * ARRAY_SIZE() macro
330*4882a593Smuzhiyun  */
get_err_from_table(const char * table[],int size,int pos)331*4882a593Smuzhiyun static const char *get_err_from_table(const char *table[], int size, int pos)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun 	if (unlikely(pos >= size))
334*4882a593Smuzhiyun 		return "Reserved";
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	if (unlikely(!table[pos]))
337*4882a593Smuzhiyun 		return "Reserved";
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	return table[pos];
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun #define GET_ERR_FROM_TABLE(table, pos)				\
343*4882a593Smuzhiyun 	get_err_from_table(table, ARRAY_SIZE(table), pos)
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun  * i7300_process_error_global() - Retrieve the hardware error information from
347*4882a593Smuzhiyun  *				  the hardware global error registers and
348*4882a593Smuzhiyun  *				  sends it to dmesg
349*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
350*4882a593Smuzhiyun  */
i7300_process_error_global(struct mem_ctl_info * mci)351*4882a593Smuzhiyun static void i7300_process_error_global(struct mem_ctl_info *mci)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
354*4882a593Smuzhiyun 	u32 errnum, error_reg;
355*4882a593Smuzhiyun 	unsigned long errors;
356*4882a593Smuzhiyun 	const char *specific;
357*4882a593Smuzhiyun 	bool is_fatal;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	pvt = mci->pvt_info;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* read in the 1st FATAL error register */
362*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
363*4882a593Smuzhiyun 			      FERR_GLOBAL_HI, &error_reg);
364*4882a593Smuzhiyun 	if (unlikely(error_reg)) {
365*4882a593Smuzhiyun 		errors = error_reg;
366*4882a593Smuzhiyun 		errnum = find_first_bit(&errors,
367*4882a593Smuzhiyun 					ARRAY_SIZE(ferr_global_hi_name));
368*4882a593Smuzhiyun 		specific = GET_ERR_FROM_TABLE(ferr_global_hi_name, errnum);
369*4882a593Smuzhiyun 		is_fatal = ferr_global_hi_is_fatal(errnum);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		/* Clear the error bit */
372*4882a593Smuzhiyun 		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
373*4882a593Smuzhiyun 				       FERR_GLOBAL_HI, error_reg);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		goto error_global;
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
379*4882a593Smuzhiyun 			      FERR_GLOBAL_LO, &error_reg);
380*4882a593Smuzhiyun 	if (unlikely(error_reg)) {
381*4882a593Smuzhiyun 		errors = error_reg;
382*4882a593Smuzhiyun 		errnum = find_first_bit(&errors,
383*4882a593Smuzhiyun 					ARRAY_SIZE(ferr_global_lo_name));
384*4882a593Smuzhiyun 		specific = GET_ERR_FROM_TABLE(ferr_global_lo_name, errnum);
385*4882a593Smuzhiyun 		is_fatal = ferr_global_lo_is_fatal(errnum);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		/* Clear the error bit */
388*4882a593Smuzhiyun 		pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
389*4882a593Smuzhiyun 				       FERR_GLOBAL_LO, error_reg);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		goto error_global;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 	return;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun error_global:
396*4882a593Smuzhiyun 	i7300_mc_printk(mci, KERN_EMERG, "%s misc error: %s\n",
397*4882a593Smuzhiyun 			is_fatal ? "Fatal" : "NOT fatal", specific);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun /**
401*4882a593Smuzhiyun  * i7300_process_fbd_error() - Retrieve the hardware error information from
402*4882a593Smuzhiyun  *			       the FBD error registers and sends it via
403*4882a593Smuzhiyun  *			       EDAC error API calls
404*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
405*4882a593Smuzhiyun  */
i7300_process_fbd_error(struct mem_ctl_info * mci)406*4882a593Smuzhiyun static void i7300_process_fbd_error(struct mem_ctl_info *mci)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
409*4882a593Smuzhiyun 	u32 errnum, value, error_reg;
410*4882a593Smuzhiyun 	u16 val16;
411*4882a593Smuzhiyun 	unsigned branch, channel, bank, rank, cas, ras;
412*4882a593Smuzhiyun 	u32 syndrome;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	unsigned long errors;
415*4882a593Smuzhiyun 	const char *specific;
416*4882a593Smuzhiyun 	bool is_wr;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	pvt = mci->pvt_info;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	/* read in the 1st FATAL error register */
421*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
422*4882a593Smuzhiyun 			      FERR_FAT_FBD, &error_reg);
423*4882a593Smuzhiyun 	if (unlikely(error_reg & FERR_FAT_FBD_ERR_MASK)) {
424*4882a593Smuzhiyun 		errors = error_reg & FERR_FAT_FBD_ERR_MASK ;
425*4882a593Smuzhiyun 		errnum = find_first_bit(&errors,
426*4882a593Smuzhiyun 					ARRAY_SIZE(ferr_fat_fbd_name));
427*4882a593Smuzhiyun 		specific = GET_ERR_FROM_TABLE(ferr_fat_fbd_name, errnum);
428*4882a593Smuzhiyun 		branch = (GET_FBD_FAT_IDX(error_reg) == 2) ? 1 : 0;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
431*4882a593Smuzhiyun 				     NRECMEMA, &val16);
432*4882a593Smuzhiyun 		bank = NRECMEMA_BANK(val16);
433*4882a593Smuzhiyun 		rank = NRECMEMA_RANK(val16);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
436*4882a593Smuzhiyun 				NRECMEMB, &value);
437*4882a593Smuzhiyun 		is_wr = NRECMEMB_IS_WR(value);
438*4882a593Smuzhiyun 		cas = NRECMEMB_CAS(value);
439*4882a593Smuzhiyun 		ras = NRECMEMB_RAS(value);
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 		/* Clean the error register */
442*4882a593Smuzhiyun 		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
443*4882a593Smuzhiyun 				FERR_FAT_FBD, error_reg);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
446*4882a593Smuzhiyun 			 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
447*4882a593Smuzhiyun 			 bank, ras, cas, errors, specific);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 1, 0, 0, 0,
450*4882a593Smuzhiyun 				     branch, -1, rank,
451*4882a593Smuzhiyun 				     is_wr ? "Write error" : "Read error",
452*4882a593Smuzhiyun 				     pvt->tmp_prt_buffer);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	/* read in the 1st NON-FATAL error register */
457*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
458*4882a593Smuzhiyun 			      FERR_NF_FBD, &error_reg);
459*4882a593Smuzhiyun 	if (unlikely(error_reg & FERR_NF_FBD_ERR_MASK)) {
460*4882a593Smuzhiyun 		errors = error_reg & FERR_NF_FBD_ERR_MASK;
461*4882a593Smuzhiyun 		errnum = find_first_bit(&errors,
462*4882a593Smuzhiyun 					ARRAY_SIZE(ferr_nf_fbd_name));
463*4882a593Smuzhiyun 		specific = GET_ERR_FROM_TABLE(ferr_nf_fbd_name, errnum);
464*4882a593Smuzhiyun 		branch = (GET_FBD_NF_IDX(error_reg) == 2) ? 1 : 0;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
467*4882a593Smuzhiyun 			REDMEMA, &syndrome);
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 		pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map,
470*4882a593Smuzhiyun 				     RECMEMA, &val16);
471*4882a593Smuzhiyun 		bank = RECMEMA_BANK(val16);
472*4882a593Smuzhiyun 		rank = RECMEMA_RANK(val16);
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
475*4882a593Smuzhiyun 				RECMEMB, &value);
476*4882a593Smuzhiyun 		is_wr = RECMEMB_IS_WR(value);
477*4882a593Smuzhiyun 		cas = RECMEMB_CAS(value);
478*4882a593Smuzhiyun 		ras = RECMEMB_RAS(value);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
481*4882a593Smuzhiyun 				     REDMEMB, &value);
482*4882a593Smuzhiyun 		channel = (branch << 1);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		/* Second channel ? */
485*4882a593Smuzhiyun 		channel += !!(value & BIT(17));
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 		/* Clear the error bit */
488*4882a593Smuzhiyun 		pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
489*4882a593Smuzhiyun 				FERR_NF_FBD, error_reg);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		/* Form out message */
492*4882a593Smuzhiyun 		snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
493*4882a593Smuzhiyun 			 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
494*4882a593Smuzhiyun 			 bank, ras, cas, errors, specific);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 		edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0,
497*4882a593Smuzhiyun 				     syndrome,
498*4882a593Smuzhiyun 				     branch >> 1, channel % 2, rank,
499*4882a593Smuzhiyun 				     is_wr ? "Write error" : "Read error",
500*4882a593Smuzhiyun 				     pvt->tmp_prt_buffer);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 	return;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun /**
506*4882a593Smuzhiyun  * i7300_check_error() - Calls the error checking subroutines
507*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
508*4882a593Smuzhiyun  */
i7300_check_error(struct mem_ctl_info * mci)509*4882a593Smuzhiyun static void i7300_check_error(struct mem_ctl_info *mci)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun 	i7300_process_error_global(mci);
512*4882a593Smuzhiyun 	i7300_process_fbd_error(mci);
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun /**
516*4882a593Smuzhiyun  * i7300_clear_error() - Clears the error registers
517*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
518*4882a593Smuzhiyun  */
i7300_clear_error(struct mem_ctl_info * mci)519*4882a593Smuzhiyun static void i7300_clear_error(struct mem_ctl_info *mci)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct i7300_pvt *pvt = mci->pvt_info;
522*4882a593Smuzhiyun 	u32 value;
523*4882a593Smuzhiyun 	/*
524*4882a593Smuzhiyun 	 * All error values are RWC - we need to read and write 1 to the
525*4882a593Smuzhiyun 	 * bit that we want to cleanup
526*4882a593Smuzhiyun 	 */
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	/* Clear global error registers */
529*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
530*4882a593Smuzhiyun 			      FERR_GLOBAL_HI, &value);
531*4882a593Smuzhiyun 	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
532*4882a593Smuzhiyun 			      FERR_GLOBAL_HI, value);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
535*4882a593Smuzhiyun 			      FERR_GLOBAL_LO, &value);
536*4882a593Smuzhiyun 	pci_write_config_dword(pvt->pci_dev_16_2_fsb_err_regs,
537*4882a593Smuzhiyun 			      FERR_GLOBAL_LO, value);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	/* Clear FBD error registers */
540*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
541*4882a593Smuzhiyun 			      FERR_FAT_FBD, &value);
542*4882a593Smuzhiyun 	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
543*4882a593Smuzhiyun 			      FERR_FAT_FBD, value);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
546*4882a593Smuzhiyun 			      FERR_NF_FBD, &value);
547*4882a593Smuzhiyun 	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
548*4882a593Smuzhiyun 			      FERR_NF_FBD, value);
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun /**
552*4882a593Smuzhiyun  * i7300_enable_error_reporting() - Enable the memory reporting logic at the
553*4882a593Smuzhiyun  *				    hardware
554*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
555*4882a593Smuzhiyun  */
i7300_enable_error_reporting(struct mem_ctl_info * mci)556*4882a593Smuzhiyun static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	struct i7300_pvt *pvt = mci->pvt_info;
559*4882a593Smuzhiyun 	u32 fbd_error_mask;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* Read the FBD Error Mask Register */
562*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
563*4882a593Smuzhiyun 			      EMASK_FBD, &fbd_error_mask);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* Enable with a '0' */
566*4882a593Smuzhiyun 	fbd_error_mask &= ~(EMASK_FBD_ERR_MASK);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	pci_write_config_dword(pvt->pci_dev_16_1_fsb_addr_map,
569*4882a593Smuzhiyun 			       EMASK_FBD, fbd_error_mask);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun /************************************************
573*4882a593Smuzhiyun  * i7300 Functions related to memory enumberation
574*4882a593Smuzhiyun  ************************************************/
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun /**
577*4882a593Smuzhiyun  * decode_mtr() - Decodes the MTR descriptor, filling the edac structs
578*4882a593Smuzhiyun  * @pvt: pointer to the private data struct used by i7300 driver
579*4882a593Smuzhiyun  * @slot: DIMM slot (0 to 7)
580*4882a593Smuzhiyun  * @ch: Channel number within the branch (0 or 1)
581*4882a593Smuzhiyun  * @branch: Branch number (0 or 1)
582*4882a593Smuzhiyun  * @dinfo: Pointer to DIMM info where dimm size is stored
583*4882a593Smuzhiyun  * @dimm: Pointer to the struct dimm_info that corresponds to that element
584*4882a593Smuzhiyun  */
decode_mtr(struct i7300_pvt * pvt,int slot,int ch,int branch,struct i7300_dimm_info * dinfo,struct dimm_info * dimm)585*4882a593Smuzhiyun static int decode_mtr(struct i7300_pvt *pvt,
586*4882a593Smuzhiyun 		      int slot, int ch, int branch,
587*4882a593Smuzhiyun 		      struct i7300_dimm_info *dinfo,
588*4882a593Smuzhiyun 		      struct dimm_info *dimm)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	int mtr, ans, addrBits, channel;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	channel = to_channel(ch, branch);
593*4882a593Smuzhiyun 
594*4882a593Smuzhiyun 	mtr = pvt->mtr[slot][branch];
595*4882a593Smuzhiyun 	ans = MTR_DIMMS_PRESENT(mtr) ? 1 : 0;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	edac_dbg(2, "\tMTR%d CH%d: DIMMs are %sPresent (mtr)\n",
598*4882a593Smuzhiyun 		 slot, channel, ans ? "" : "NOT ");
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	/* Determine if there is a DIMM present in this DIMM slot */
601*4882a593Smuzhiyun 	if (!ans)
602*4882a593Smuzhiyun 		return 0;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	/* Start with the number of bits for a Bank
605*4882a593Smuzhiyun 	* on the DRAM */
606*4882a593Smuzhiyun 	addrBits = MTR_DRAM_BANKS_ADDR_BITS;
607*4882a593Smuzhiyun 	/* Add thenumber of ROW bits */
608*4882a593Smuzhiyun 	addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
609*4882a593Smuzhiyun 	/* add the number of COLUMN bits */
610*4882a593Smuzhiyun 	addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
611*4882a593Smuzhiyun 	/* add the number of RANK bits */
612*4882a593Smuzhiyun 	addrBits += MTR_DIMM_RANKS(mtr);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	addrBits += 6;	/* add 64 bits per DIMM */
615*4882a593Smuzhiyun 	addrBits -= 20;	/* divide by 2^^20 */
616*4882a593Smuzhiyun 	addrBits -= 3;	/* 8 bits per bytes */
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	dinfo->megabytes = 1 << addrBits;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	edac_dbg(2, "\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	edac_dbg(2, "\t\tELECTRICAL THROTTLING is %s\n",
623*4882a593Smuzhiyun 		 MTR_DIMMS_ETHROTTLE(mtr) ? "enabled" : "disabled");
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	edac_dbg(2, "\t\tNUMBANK: %d bank(s)\n", MTR_DRAM_BANKS(mtr));
626*4882a593Smuzhiyun 	edac_dbg(2, "\t\tNUMRANK: %s\n",
627*4882a593Smuzhiyun 		 MTR_DIMM_RANKS(mtr) ? "double" : "single");
628*4882a593Smuzhiyun 	edac_dbg(2, "\t\tNUMROW: %s\n",
629*4882a593Smuzhiyun 		 MTR_DIMM_ROWS(mtr) == 0 ? "8,192 - 13 rows" :
630*4882a593Smuzhiyun 		 MTR_DIMM_ROWS(mtr) == 1 ? "16,384 - 14 rows" :
631*4882a593Smuzhiyun 		 MTR_DIMM_ROWS(mtr) == 2 ? "32,768 - 15 rows" :
632*4882a593Smuzhiyun 		 "65,536 - 16 rows");
633*4882a593Smuzhiyun 	edac_dbg(2, "\t\tNUMCOL: %s\n",
634*4882a593Smuzhiyun 		 MTR_DIMM_COLS(mtr) == 0 ? "1,024 - 10 columns" :
635*4882a593Smuzhiyun 		 MTR_DIMM_COLS(mtr) == 1 ? "2,048 - 11 columns" :
636*4882a593Smuzhiyun 		 MTR_DIMM_COLS(mtr) == 2 ? "4,096 - 12 columns" :
637*4882a593Smuzhiyun 		 "reserved");
638*4882a593Smuzhiyun 	edac_dbg(2, "\t\tSIZE: %d MB\n", dinfo->megabytes);
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	/*
641*4882a593Smuzhiyun 	 * The type of error detection actually depends of the
642*4882a593Smuzhiyun 	 * mode of operation. When it is just one single memory chip, at
643*4882a593Smuzhiyun 	 * socket 0, channel 0, it uses 8-byte-over-32-byte SECDED+ code.
644*4882a593Smuzhiyun 	 * In normal or mirrored mode, it uses Lockstep mode,
645*4882a593Smuzhiyun 	 * with the possibility of using an extended algorithm for x8 memories
646*4882a593Smuzhiyun 	 * See datasheet Sections 7.3.6 to 7.3.8
647*4882a593Smuzhiyun 	 */
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
650*4882a593Smuzhiyun 	dimm->grain = 8;
651*4882a593Smuzhiyun 	dimm->mtype = MEM_FB_DDR2;
652*4882a593Smuzhiyun 	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
653*4882a593Smuzhiyun 		dimm->edac_mode = EDAC_SECDED;
654*4882a593Smuzhiyun 		edac_dbg(2, "\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
655*4882a593Smuzhiyun 	} else {
656*4882a593Smuzhiyun 		edac_dbg(2, "\t\tECC code is on Lockstep mode\n");
657*4882a593Smuzhiyun 		if (MTR_DRAM_WIDTH(mtr) == 8)
658*4882a593Smuzhiyun 			dimm->edac_mode = EDAC_S8ECD8ED;
659*4882a593Smuzhiyun 		else
660*4882a593Smuzhiyun 			dimm->edac_mode = EDAC_S4ECD4ED;
661*4882a593Smuzhiyun 	}
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* ask what device type on this row */
664*4882a593Smuzhiyun 	if (MTR_DRAM_WIDTH(mtr) == 8) {
665*4882a593Smuzhiyun 		edac_dbg(2, "\t\tScrub algorithm for x8 is on %s mode\n",
666*4882a593Smuzhiyun 			 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
667*4882a593Smuzhiyun 			 "enhanced" : "normal");
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 		dimm->dtype = DEV_X8;
670*4882a593Smuzhiyun 	} else
671*4882a593Smuzhiyun 		dimm->dtype = DEV_X4;
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	return mtr;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun /**
677*4882a593Smuzhiyun  * print_dimm_size() - Prints dump of the memory organization
678*4882a593Smuzhiyun  * @pvt: pointer to the private data struct used by i7300 driver
679*4882a593Smuzhiyun  *
680*4882a593Smuzhiyun  * Useful for debug. If debug is disabled, this routine do nothing
681*4882a593Smuzhiyun  */
print_dimm_size(struct i7300_pvt * pvt)682*4882a593Smuzhiyun static void print_dimm_size(struct i7300_pvt *pvt)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun #ifdef CONFIG_EDAC_DEBUG
685*4882a593Smuzhiyun 	struct i7300_dimm_info *dinfo;
686*4882a593Smuzhiyun 	char *p;
687*4882a593Smuzhiyun 	int space, n;
688*4882a593Smuzhiyun 	int channel, slot;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	space = PAGE_SIZE;
691*4882a593Smuzhiyun 	p = pvt->tmp_prt_buffer;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	n = snprintf(p, space, "              ");
694*4882a593Smuzhiyun 	p += n;
695*4882a593Smuzhiyun 	space -= n;
696*4882a593Smuzhiyun 	for (channel = 0; channel < MAX_CHANNELS; channel++) {
697*4882a593Smuzhiyun 		n = snprintf(p, space, "channel %d | ", channel);
698*4882a593Smuzhiyun 		p += n;
699*4882a593Smuzhiyun 		space -= n;
700*4882a593Smuzhiyun 	}
701*4882a593Smuzhiyun 	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
702*4882a593Smuzhiyun 	p = pvt->tmp_prt_buffer;
703*4882a593Smuzhiyun 	space = PAGE_SIZE;
704*4882a593Smuzhiyun 	n = snprintf(p, space, "-------------------------------"
705*4882a593Smuzhiyun 			       "------------------------------");
706*4882a593Smuzhiyun 	p += n;
707*4882a593Smuzhiyun 	space -= n;
708*4882a593Smuzhiyun 	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
709*4882a593Smuzhiyun 	p = pvt->tmp_prt_buffer;
710*4882a593Smuzhiyun 	space = PAGE_SIZE;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	for (slot = 0; slot < MAX_SLOTS; slot++) {
713*4882a593Smuzhiyun 		n = snprintf(p, space, "csrow/SLOT %d  ", slot);
714*4882a593Smuzhiyun 		p += n;
715*4882a593Smuzhiyun 		space -= n;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		for (channel = 0; channel < MAX_CHANNELS; channel++) {
718*4882a593Smuzhiyun 			dinfo = &pvt->dimm_info[slot][channel];
719*4882a593Smuzhiyun 			n = snprintf(p, space, "%4d MB   | ", dinfo->megabytes);
720*4882a593Smuzhiyun 			p += n;
721*4882a593Smuzhiyun 			space -= n;
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
725*4882a593Smuzhiyun 		p = pvt->tmp_prt_buffer;
726*4882a593Smuzhiyun 		space = PAGE_SIZE;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	n = snprintf(p, space, "-------------------------------"
730*4882a593Smuzhiyun 			       "------------------------------");
731*4882a593Smuzhiyun 	p += n;
732*4882a593Smuzhiyun 	space -= n;
733*4882a593Smuzhiyun 	edac_dbg(2, "%s\n", pvt->tmp_prt_buffer);
734*4882a593Smuzhiyun 	p = pvt->tmp_prt_buffer;
735*4882a593Smuzhiyun 	space = PAGE_SIZE;
736*4882a593Smuzhiyun #endif
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun /**
740*4882a593Smuzhiyun  * i7300_init_csrows() - Initialize the 'csrows' table within
741*4882a593Smuzhiyun  *			 the mci control structure with the
742*4882a593Smuzhiyun  *			 addressing of memory.
743*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
744*4882a593Smuzhiyun  */
i7300_init_csrows(struct mem_ctl_info * mci)745*4882a593Smuzhiyun static int i7300_init_csrows(struct mem_ctl_info *mci)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
748*4882a593Smuzhiyun 	struct i7300_dimm_info *dinfo;
749*4882a593Smuzhiyun 	int rc = -ENODEV;
750*4882a593Smuzhiyun 	int mtr;
751*4882a593Smuzhiyun 	int ch, branch, slot, channel, max_channel, max_branch;
752*4882a593Smuzhiyun 	struct dimm_info *dimm;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	pvt = mci->pvt_info;
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	edac_dbg(2, "Memory Technology Registers:\n");
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
759*4882a593Smuzhiyun 		max_branch = 1;
760*4882a593Smuzhiyun 		max_channel = 1;
761*4882a593Smuzhiyun 	} else {
762*4882a593Smuzhiyun 		max_branch = MAX_BRANCHES;
763*4882a593Smuzhiyun 		max_channel = MAX_CH_PER_BRANCH;
764*4882a593Smuzhiyun 	}
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	/* Get the AMB present registers for the four channels */
767*4882a593Smuzhiyun 	for (branch = 0; branch < max_branch; branch++) {
768*4882a593Smuzhiyun 		/* Read and dump branch 0's MTRs */
769*4882a593Smuzhiyun 		channel = to_channel(0, branch);
770*4882a593Smuzhiyun 		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
771*4882a593Smuzhiyun 				     AMBPRESENT_0,
772*4882a593Smuzhiyun 				&pvt->ambpresent[channel]);
773*4882a593Smuzhiyun 		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
774*4882a593Smuzhiyun 			 channel, pvt->ambpresent[channel]);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 		if (max_channel == 1)
777*4882a593Smuzhiyun 			continue;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 		channel = to_channel(1, branch);
780*4882a593Smuzhiyun 		pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
781*4882a593Smuzhiyun 				     AMBPRESENT_1,
782*4882a593Smuzhiyun 				&pvt->ambpresent[channel]);
783*4882a593Smuzhiyun 		edac_dbg(2, "\t\tAMB-present CH%d = 0x%x:\n",
784*4882a593Smuzhiyun 			 channel, pvt->ambpresent[channel]);
785*4882a593Smuzhiyun 	}
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 	/* Get the set of MTR[0-7] regs by each branch */
788*4882a593Smuzhiyun 	for (slot = 0; slot < MAX_SLOTS; slot++) {
789*4882a593Smuzhiyun 		int where = mtr_regs[slot];
790*4882a593Smuzhiyun 		for (branch = 0; branch < max_branch; branch++) {
791*4882a593Smuzhiyun 			pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
792*4882a593Smuzhiyun 					where,
793*4882a593Smuzhiyun 					&pvt->mtr[slot][branch]);
794*4882a593Smuzhiyun 			for (ch = 0; ch < max_channel; ch++) {
795*4882a593Smuzhiyun 				int channel = to_channel(ch, branch);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 				dimm = edac_get_dimm(mci, branch, ch, slot);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 				dinfo = &pvt->dimm_info[slot][channel];
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 				mtr = decode_mtr(pvt, slot, ch, branch,
802*4882a593Smuzhiyun 						 dinfo, dimm);
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 				/* if no DIMMS on this row, continue */
805*4882a593Smuzhiyun 				if (!MTR_DIMMS_PRESENT(mtr))
806*4882a593Smuzhiyun 					continue;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 				rc = 0;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 			}
811*4882a593Smuzhiyun 		}
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	return rc;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun /**
818*4882a593Smuzhiyun  * decode_mir() - Decodes Memory Interleave Register (MIR) info
819*4882a593Smuzhiyun  * @mir_no: number of the MIR register to decode
820*4882a593Smuzhiyun  * @mir: array with the MIR data cached on the driver
821*4882a593Smuzhiyun  */
decode_mir(int mir_no,u16 mir[MAX_MIR])822*4882a593Smuzhiyun static void decode_mir(int mir_no, u16 mir[MAX_MIR])
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	if (mir[mir_no] & 3)
825*4882a593Smuzhiyun 		edac_dbg(2, "MIR%d: limit= 0x%x Branch(es) that participate: %s %s\n",
826*4882a593Smuzhiyun 			 mir_no,
827*4882a593Smuzhiyun 			 (mir[mir_no] >> 4) & 0xfff,
828*4882a593Smuzhiyun 			 (mir[mir_no] & 1) ? "B0" : "",
829*4882a593Smuzhiyun 			 (mir[mir_no] & 2) ? "B1" : "");
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun /**
833*4882a593Smuzhiyun  * i7300_get_mc_regs() - Get the contents of the MC enumeration registers
834*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
835*4882a593Smuzhiyun  *
836*4882a593Smuzhiyun  * Data read is cached internally for its usage when needed
837*4882a593Smuzhiyun  */
i7300_get_mc_regs(struct mem_ctl_info * mci)838*4882a593Smuzhiyun static int i7300_get_mc_regs(struct mem_ctl_info *mci)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
841*4882a593Smuzhiyun 	u32 actual_tolm;
842*4882a593Smuzhiyun 	int i, rc;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	pvt = mci->pvt_info;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_0_fsb_ctlr, AMBASE,
847*4882a593Smuzhiyun 			(u32 *) &pvt->ambase);
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	edac_dbg(2, "AMBASE= 0x%lx\n", (long unsigned int)pvt->ambase);
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	/* Get the Branch Map regs */
852*4882a593Smuzhiyun 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, TOLM, &pvt->tolm);
853*4882a593Smuzhiyun 	pvt->tolm >>= 12;
854*4882a593Smuzhiyun 	edac_dbg(2, "TOLM (number of 256M regions) =%u (0x%x)\n",
855*4882a593Smuzhiyun 		 pvt->tolm, pvt->tolm);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	actual_tolm = (u32) ((1000l * pvt->tolm) >> (30 - 28));
858*4882a593Smuzhiyun 	edac_dbg(2, "Actual TOLM byte addr=%u.%03u GB (0x%x)\n",
859*4882a593Smuzhiyun 		 actual_tolm/1000, actual_tolm % 1000, pvt->tolm << 28);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	/* Get memory controller settings */
862*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS,
863*4882a593Smuzhiyun 			     &pvt->mc_settings);
864*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_dev_16_1_fsb_addr_map, MC_SETTINGS_A,
865*4882a593Smuzhiyun 			     &pvt->mc_settings_a);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	if (IS_SINGLE_MODE(pvt->mc_settings_a))
868*4882a593Smuzhiyun 		edac_dbg(0, "Memory controller operating on single mode\n");
869*4882a593Smuzhiyun 	else
870*4882a593Smuzhiyun 		edac_dbg(0, "Memory controller operating on %smirrored mode\n",
871*4882a593Smuzhiyun 			 IS_MIRRORED(pvt->mc_settings) ? "" : "non-");
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	edac_dbg(0, "Error detection is %s\n",
874*4882a593Smuzhiyun 		 IS_ECC_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
875*4882a593Smuzhiyun 	edac_dbg(0, "Retry is %s\n",
876*4882a593Smuzhiyun 		 IS_RETRY_ENABLED(pvt->mc_settings) ? "enabled" : "disabled");
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/* Get Memory Interleave Range registers */
879*4882a593Smuzhiyun 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR0,
880*4882a593Smuzhiyun 			     &pvt->mir[0]);
881*4882a593Smuzhiyun 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR1,
882*4882a593Smuzhiyun 			     &pvt->mir[1]);
883*4882a593Smuzhiyun 	pci_read_config_word(pvt->pci_dev_16_1_fsb_addr_map, MIR2,
884*4882a593Smuzhiyun 			     &pvt->mir[2]);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	/* Decode the MIR regs */
887*4882a593Smuzhiyun 	for (i = 0; i < MAX_MIR; i++)
888*4882a593Smuzhiyun 		decode_mir(i, pvt->mir);
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun 	rc = i7300_init_csrows(mci);
891*4882a593Smuzhiyun 	if (rc < 0)
892*4882a593Smuzhiyun 		return rc;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	/* Go and determine the size of each DIMM and place in an
895*4882a593Smuzhiyun 	 * orderly matrix */
896*4882a593Smuzhiyun 	print_dimm_size(pvt);
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	return 0;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun /*************************************************
902*4882a593Smuzhiyun  * i7300 Functions related to device probe/release
903*4882a593Smuzhiyun  *************************************************/
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun /**
906*4882a593Smuzhiyun  * i7300_put_devices() - Release the PCI devices
907*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
908*4882a593Smuzhiyun  */
i7300_put_devices(struct mem_ctl_info * mci)909*4882a593Smuzhiyun static void i7300_put_devices(struct mem_ctl_info *mci)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
912*4882a593Smuzhiyun 	int branch;
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	pvt = mci->pvt_info;
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* Decrement usage count for devices */
917*4882a593Smuzhiyun 	for (branch = 0; branch < MAX_CH_PER_BRANCH; branch++)
918*4882a593Smuzhiyun 		pci_dev_put(pvt->pci_dev_2x_0_fbd_branch[branch]);
919*4882a593Smuzhiyun 	pci_dev_put(pvt->pci_dev_16_2_fsb_err_regs);
920*4882a593Smuzhiyun 	pci_dev_put(pvt->pci_dev_16_1_fsb_addr_map);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun /**
924*4882a593Smuzhiyun  * i7300_get_devices() - Find and perform 'get' operation on the MCH's
925*4882a593Smuzhiyun  *			 device/functions we want to reference for this driver
926*4882a593Smuzhiyun  * @mci: struct mem_ctl_info pointer
927*4882a593Smuzhiyun  *
928*4882a593Smuzhiyun  * Access and prepare the several devices for usage:
929*4882a593Smuzhiyun  * I7300 devices used by this driver:
930*4882a593Smuzhiyun  *    Device 16, functions 0,1 and 2:	PCI_DEVICE_ID_INTEL_I7300_MCH_ERR
931*4882a593Smuzhiyun  *    Device 21 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB0
932*4882a593Smuzhiyun  *    Device 22 function 0:		PCI_DEVICE_ID_INTEL_I7300_MCH_FB1
933*4882a593Smuzhiyun  */
i7300_get_devices(struct mem_ctl_info * mci)934*4882a593Smuzhiyun static int i7300_get_devices(struct mem_ctl_info *mci)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
937*4882a593Smuzhiyun 	struct pci_dev *pdev;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	pvt = mci->pvt_info;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	/* Attempt to 'get' the MCH register we want */
942*4882a593Smuzhiyun 	pdev = NULL;
943*4882a593Smuzhiyun 	while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
944*4882a593Smuzhiyun 				      PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
945*4882a593Smuzhiyun 				      pdev))) {
946*4882a593Smuzhiyun 		/* Store device 16 funcs 1 and 2 */
947*4882a593Smuzhiyun 		switch (PCI_FUNC(pdev->devfn)) {
948*4882a593Smuzhiyun 		case 1:
949*4882a593Smuzhiyun 			if (!pvt->pci_dev_16_1_fsb_addr_map)
950*4882a593Smuzhiyun 				pvt->pci_dev_16_1_fsb_addr_map =
951*4882a593Smuzhiyun 							pci_dev_get(pdev);
952*4882a593Smuzhiyun 			break;
953*4882a593Smuzhiyun 		case 2:
954*4882a593Smuzhiyun 			if (!pvt->pci_dev_16_2_fsb_err_regs)
955*4882a593Smuzhiyun 				pvt->pci_dev_16_2_fsb_err_regs =
956*4882a593Smuzhiyun 							pci_dev_get(pdev);
957*4882a593Smuzhiyun 			break;
958*4882a593Smuzhiyun 		}
959*4882a593Smuzhiyun 	}
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	if (!pvt->pci_dev_16_1_fsb_addr_map ||
962*4882a593Smuzhiyun 	    !pvt->pci_dev_16_2_fsb_err_regs) {
963*4882a593Smuzhiyun 		/* At least one device was not found */
964*4882a593Smuzhiyun 		i7300_printk(KERN_ERR,
965*4882a593Smuzhiyun 			"'system address,Process Bus' device not found:"
966*4882a593Smuzhiyun 			"vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
967*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL,
968*4882a593Smuzhiyun 			PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
969*4882a593Smuzhiyun 		goto error;
970*4882a593Smuzhiyun 	}
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s  %x:%x\n",
973*4882a593Smuzhiyun 		 pci_name(pvt->pci_dev_16_0_fsb_ctlr),
974*4882a593Smuzhiyun 		 pvt->pci_dev_16_0_fsb_ctlr->vendor,
975*4882a593Smuzhiyun 		 pvt->pci_dev_16_0_fsb_ctlr->device);
976*4882a593Smuzhiyun 	edac_dbg(1, "Branchmap, control and errors - PCI Bus ID: %s  %x:%x\n",
977*4882a593Smuzhiyun 		 pci_name(pvt->pci_dev_16_1_fsb_addr_map),
978*4882a593Smuzhiyun 		 pvt->pci_dev_16_1_fsb_addr_map->vendor,
979*4882a593Smuzhiyun 		 pvt->pci_dev_16_1_fsb_addr_map->device);
980*4882a593Smuzhiyun 	edac_dbg(1, "FSB Error Regs - PCI Bus ID: %s  %x:%x\n",
981*4882a593Smuzhiyun 		 pci_name(pvt->pci_dev_16_2_fsb_err_regs),
982*4882a593Smuzhiyun 		 pvt->pci_dev_16_2_fsb_err_regs->vendor,
983*4882a593Smuzhiyun 		 pvt->pci_dev_16_2_fsb_err_regs->device);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	pvt->pci_dev_2x_0_fbd_branch[0] = pci_get_device(PCI_VENDOR_ID_INTEL,
986*4882a593Smuzhiyun 					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB0,
987*4882a593Smuzhiyun 					    NULL);
988*4882a593Smuzhiyun 	if (!pvt->pci_dev_2x_0_fbd_branch[0]) {
989*4882a593Smuzhiyun 		i7300_printk(KERN_ERR,
990*4882a593Smuzhiyun 			"MC: 'BRANCH 0' device not found:"
991*4882a593Smuzhiyun 			"vendor 0x%x device 0x%x Func 0 (broken BIOS?)\n",
992*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_FB0);
993*4882a593Smuzhiyun 		goto error;
994*4882a593Smuzhiyun 	}
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	pvt->pci_dev_2x_0_fbd_branch[1] = pci_get_device(PCI_VENDOR_ID_INTEL,
997*4882a593Smuzhiyun 					    PCI_DEVICE_ID_INTEL_I7300_MCH_FB1,
998*4882a593Smuzhiyun 					    NULL);
999*4882a593Smuzhiyun 	if (!pvt->pci_dev_2x_0_fbd_branch[1]) {
1000*4882a593Smuzhiyun 		i7300_printk(KERN_ERR,
1001*4882a593Smuzhiyun 			"MC: 'BRANCH 1' device not found:"
1002*4882a593Smuzhiyun 			"vendor 0x%x device 0x%x Func 0 "
1003*4882a593Smuzhiyun 			"(broken BIOS?)\n",
1004*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL,
1005*4882a593Smuzhiyun 			PCI_DEVICE_ID_INTEL_I7300_MCH_FB1);
1006*4882a593Smuzhiyun 		goto error;
1007*4882a593Smuzhiyun 	}
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	return 0;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun error:
1012*4882a593Smuzhiyun 	i7300_put_devices(mci);
1013*4882a593Smuzhiyun 	return -ENODEV;
1014*4882a593Smuzhiyun }
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun /**
1017*4882a593Smuzhiyun  * i7300_init_one() - Probe for one instance of the device
1018*4882a593Smuzhiyun  * @pdev: struct pci_dev pointer
1019*4882a593Smuzhiyun  * @id: struct pci_device_id pointer - currently unused
1020*4882a593Smuzhiyun  */
i7300_init_one(struct pci_dev * pdev,const struct pci_device_id * id)1021*4882a593Smuzhiyun static int i7300_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
1022*4882a593Smuzhiyun {
1023*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
1024*4882a593Smuzhiyun 	struct edac_mc_layer layers[3];
1025*4882a593Smuzhiyun 	struct i7300_pvt *pvt;
1026*4882a593Smuzhiyun 	int rc;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	/* wake up device */
1029*4882a593Smuzhiyun 	rc = pci_enable_device(pdev);
1030*4882a593Smuzhiyun 	if (rc == -EIO)
1031*4882a593Smuzhiyun 		return rc;
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun 	edac_dbg(0, "MC: pdev bus %u dev=0x%x fn=0x%x\n",
1034*4882a593Smuzhiyun 		 pdev->bus->number,
1035*4882a593Smuzhiyun 		 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* We only are looking for func 0 of the set */
1038*4882a593Smuzhiyun 	if (PCI_FUNC(pdev->devfn) != 0)
1039*4882a593Smuzhiyun 		return -ENODEV;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	/* allocate a new MC control structure */
1042*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_BRANCH;
1043*4882a593Smuzhiyun 	layers[0].size = MAX_BRANCHES;
1044*4882a593Smuzhiyun 	layers[0].is_virt_csrow = false;
1045*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_CHANNEL;
1046*4882a593Smuzhiyun 	layers[1].size = MAX_CH_PER_BRANCH;
1047*4882a593Smuzhiyun 	layers[1].is_virt_csrow = true;
1048*4882a593Smuzhiyun 	layers[2].type = EDAC_MC_LAYER_SLOT;
1049*4882a593Smuzhiyun 	layers[2].size = MAX_SLOTS;
1050*4882a593Smuzhiyun 	layers[2].is_virt_csrow = true;
1051*4882a593Smuzhiyun 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1052*4882a593Smuzhiyun 	if (mci == NULL)
1053*4882a593Smuzhiyun 		return -ENOMEM;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	edac_dbg(0, "MC: mci = %p\n", mci);
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	mci->pdev = &pdev->dev;	/* record ptr  to the generic device */
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	pvt = mci->pvt_info;
1060*4882a593Smuzhiyun 	pvt->pci_dev_16_0_fsb_ctlr = pdev;	/* Record this device in our private */
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	pvt->tmp_prt_buffer = kmalloc(PAGE_SIZE, GFP_KERNEL);
1063*4882a593Smuzhiyun 	if (!pvt->tmp_prt_buffer) {
1064*4882a593Smuzhiyun 		edac_mc_free(mci);
1065*4882a593Smuzhiyun 		return -ENOMEM;
1066*4882a593Smuzhiyun 	}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	/* 'get' the pci devices we want to reserve for our use */
1069*4882a593Smuzhiyun 	if (i7300_get_devices(mci))
1070*4882a593Smuzhiyun 		goto fail0;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	mci->mc_idx = 0;
1073*4882a593Smuzhiyun 	mci->mtype_cap = MEM_FLAG_FB_DDR2;
1074*4882a593Smuzhiyun 	mci->edac_ctl_cap = EDAC_FLAG_NONE;
1075*4882a593Smuzhiyun 	mci->edac_cap = EDAC_FLAG_NONE;
1076*4882a593Smuzhiyun 	mci->mod_name = "i7300_edac.c";
1077*4882a593Smuzhiyun 	mci->ctl_name = i7300_devs[0].ctl_name;
1078*4882a593Smuzhiyun 	mci->dev_name = pci_name(pdev);
1079*4882a593Smuzhiyun 	mci->ctl_page_to_phys = NULL;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	/* Set the function pointer to an actual operation function */
1082*4882a593Smuzhiyun 	mci->edac_check = i7300_check_error;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	/* initialize the MC control structure 'csrows' table
1085*4882a593Smuzhiyun 	 * with the mapping and control information */
1086*4882a593Smuzhiyun 	if (i7300_get_mc_regs(mci)) {
1087*4882a593Smuzhiyun 		edac_dbg(0, "MC: Setting mci->edac_cap to EDAC_FLAG_NONE because i7300_init_csrows() returned nonzero value\n");
1088*4882a593Smuzhiyun 		mci->edac_cap = EDAC_FLAG_NONE;	/* no csrows found */
1089*4882a593Smuzhiyun 	} else {
1090*4882a593Smuzhiyun 		edac_dbg(1, "MC: Enable error reporting now\n");
1091*4882a593Smuzhiyun 		i7300_enable_error_reporting(mci);
1092*4882a593Smuzhiyun 	}
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	/* add this new MC control structure to EDAC's list of MCs */
1095*4882a593Smuzhiyun 	if (edac_mc_add_mc(mci)) {
1096*4882a593Smuzhiyun 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1097*4882a593Smuzhiyun 		/* FIXME: perhaps some code should go here that disables error
1098*4882a593Smuzhiyun 		 * reporting if we just enabled it
1099*4882a593Smuzhiyun 		 */
1100*4882a593Smuzhiyun 		goto fail1;
1101*4882a593Smuzhiyun 	}
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	i7300_clear_error(mci);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	/* allocating generic PCI control info */
1106*4882a593Smuzhiyun 	i7300_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR);
1107*4882a593Smuzhiyun 	if (!i7300_pci) {
1108*4882a593Smuzhiyun 		printk(KERN_WARNING
1109*4882a593Smuzhiyun 			"%s(): Unable to create PCI control\n",
1110*4882a593Smuzhiyun 			__func__);
1111*4882a593Smuzhiyun 		printk(KERN_WARNING
1112*4882a593Smuzhiyun 			"%s(): PCI error report via EDAC not setup\n",
1113*4882a593Smuzhiyun 			__func__);
1114*4882a593Smuzhiyun 	}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	return 0;
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	/* Error exit unwinding stack */
1119*4882a593Smuzhiyun fail1:
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	i7300_put_devices(mci);
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun fail0:
1124*4882a593Smuzhiyun 	kfree(pvt->tmp_prt_buffer);
1125*4882a593Smuzhiyun 	edac_mc_free(mci);
1126*4882a593Smuzhiyun 	return -ENODEV;
1127*4882a593Smuzhiyun }
1128*4882a593Smuzhiyun 
1129*4882a593Smuzhiyun /**
1130*4882a593Smuzhiyun  * i7300_remove_one() - Remove the driver
1131*4882a593Smuzhiyun  * @pdev: struct pci_dev pointer
1132*4882a593Smuzhiyun  */
i7300_remove_one(struct pci_dev * pdev)1133*4882a593Smuzhiyun static void i7300_remove_one(struct pci_dev *pdev)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
1136*4882a593Smuzhiyun 	char *tmp;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	edac_dbg(0, "\n");
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	if (i7300_pci)
1141*4882a593Smuzhiyun 		edac_pci_release_generic_ctl(i7300_pci);
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun 	mci = edac_mc_del_mc(&pdev->dev);
1144*4882a593Smuzhiyun 	if (!mci)
1145*4882a593Smuzhiyun 		return;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	tmp = ((struct i7300_pvt *)mci->pvt_info)->tmp_prt_buffer;
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	/* retrieve references to resources, and free those resources */
1150*4882a593Smuzhiyun 	i7300_put_devices(mci);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	kfree(tmp);
1153*4882a593Smuzhiyun 	edac_mc_free(mci);
1154*4882a593Smuzhiyun }
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun /*
1157*4882a593Smuzhiyun  * pci_device_id: table for which devices we are looking for
1158*4882a593Smuzhiyun  *
1159*4882a593Smuzhiyun  * Has only 8086:360c PCI ID
1160*4882a593Smuzhiyun  */
1161*4882a593Smuzhiyun static const struct pci_device_id i7300_pci_tbl[] = {
1162*4882a593Smuzhiyun 	{PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I7300_MCH_ERR)},
1163*4882a593Smuzhiyun 	{0,}			/* 0 terminated list. */
1164*4882a593Smuzhiyun };
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, i7300_pci_tbl);
1167*4882a593Smuzhiyun 
1168*4882a593Smuzhiyun /*
1169*4882a593Smuzhiyun  * i7300_driver: pci_driver structure for this module
1170*4882a593Smuzhiyun  */
1171*4882a593Smuzhiyun static struct pci_driver i7300_driver = {
1172*4882a593Smuzhiyun 	.name = "i7300_edac",
1173*4882a593Smuzhiyun 	.probe = i7300_init_one,
1174*4882a593Smuzhiyun 	.remove = i7300_remove_one,
1175*4882a593Smuzhiyun 	.id_table = i7300_pci_tbl,
1176*4882a593Smuzhiyun };
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun /**
1179*4882a593Smuzhiyun  * i7300_init() - Registers the driver
1180*4882a593Smuzhiyun  */
i7300_init(void)1181*4882a593Smuzhiyun static int __init i7300_init(void)
1182*4882a593Smuzhiyun {
1183*4882a593Smuzhiyun 	int pci_rc;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	edac_dbg(2, "\n");
1186*4882a593Smuzhiyun 
1187*4882a593Smuzhiyun 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1188*4882a593Smuzhiyun 	opstate_init();
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	pci_rc = pci_register_driver(&i7300_driver);
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	return (pci_rc < 0) ? pci_rc : 0;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun /**
1196*4882a593Smuzhiyun  * i7300_init() - Unregisters the driver
1197*4882a593Smuzhiyun  */
i7300_exit(void)1198*4882a593Smuzhiyun static void __exit i7300_exit(void)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun 	edac_dbg(2, "\n");
1201*4882a593Smuzhiyun 	pci_unregister_driver(&i7300_driver);
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun module_init(i7300_init);
1205*4882a593Smuzhiyun module_exit(i7300_exit);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1208*4882a593Smuzhiyun MODULE_AUTHOR("Mauro Carvalho Chehab");
1209*4882a593Smuzhiyun MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
1210*4882a593Smuzhiyun MODULE_DESCRIPTION("MC Driver for Intel I7300 memory controllers - "
1211*4882a593Smuzhiyun 		   I7300_REVISION);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun module_param(edac_op_state, int, 0444);
1214*4882a593Smuzhiyun MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1215