xref: /OK3568_Linux_fs/kernel/drivers/edac/sb_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Intel Sandy Bridge -EN/-EP/-EX Memory Controller kernel module
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This driver supports the memory controllers found on the Intel
5*4882a593Smuzhiyun  * processor family Sandy Bridge.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (c) 2011 by:
8*4882a593Smuzhiyun  *	 Mauro Carvalho Chehab
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/init.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/pci_ids.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/delay.h>
17*4882a593Smuzhiyun #include <linux/edac.h>
18*4882a593Smuzhiyun #include <linux/mmzone.h>
19*4882a593Smuzhiyun #include <linux/smp.h>
20*4882a593Smuzhiyun #include <linux/bitmap.h>
21*4882a593Smuzhiyun #include <linux/math64.h>
22*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
23*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
24*4882a593Smuzhiyun #include <asm/intel-family.h>
25*4882a593Smuzhiyun #include <asm/processor.h>
26*4882a593Smuzhiyun #include <asm/mce.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "edac_module.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /* Static vars */
31*4882a593Smuzhiyun static LIST_HEAD(sbridge_edac_list);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /*
34*4882a593Smuzhiyun  * Alter this version for the module when modifications are made
35*4882a593Smuzhiyun  */
36*4882a593Smuzhiyun #define SBRIDGE_REVISION    " Ver: 1.1.2 "
37*4882a593Smuzhiyun #define EDAC_MOD_STR	    "sb_edac"
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Debug macros
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun #define sbridge_printk(level, fmt, arg...)			\
43*4882a593Smuzhiyun 	edac_printk(level, "sbridge", fmt, ##arg)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define sbridge_mc_printk(mci, level, fmt, arg...)		\
46*4882a593Smuzhiyun 	edac_mc_chipset_printk(mci, level, "sbridge", fmt, ##arg)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /*
49*4882a593Smuzhiyun  * Get a bit field at register value <v>, from bit <lo> to bit <hi>
50*4882a593Smuzhiyun  */
51*4882a593Smuzhiyun #define GET_BITFIELD(v, lo, hi)	\
52*4882a593Smuzhiyun 	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* Devices 12 Function 6, Offsets 0x80 to 0xcc */
55*4882a593Smuzhiyun static const u32 sbridge_dram_rule[] = {
56*4882a593Smuzhiyun 	0x80, 0x88, 0x90, 0x98, 0xa0,
57*4882a593Smuzhiyun 	0xa8, 0xb0, 0xb8, 0xc0, 0xc8,
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun static const u32 ibridge_dram_rule[] = {
61*4882a593Smuzhiyun 	0x60, 0x68, 0x70, 0x78, 0x80,
62*4882a593Smuzhiyun 	0x88, 0x90, 0x98, 0xa0,	0xa8,
63*4882a593Smuzhiyun 	0xb0, 0xb8, 0xc0, 0xc8, 0xd0,
64*4882a593Smuzhiyun 	0xd8, 0xe0, 0xe8, 0xf0, 0xf8,
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun static const u32 knl_dram_rule[] = {
68*4882a593Smuzhiyun 	0x60, 0x68, 0x70, 0x78, 0x80, /* 0-4 */
69*4882a593Smuzhiyun 	0x88, 0x90, 0x98, 0xa0, 0xa8, /* 5-9 */
70*4882a593Smuzhiyun 	0xb0, 0xb8, 0xc0, 0xc8, 0xd0, /* 10-14 */
71*4882a593Smuzhiyun 	0xd8, 0xe0, 0xe8, 0xf0, 0xf8, /* 15-19 */
72*4882a593Smuzhiyun 	0x100, 0x108, 0x110, 0x118,   /* 20-23 */
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define DRAM_RULE_ENABLE(reg)	GET_BITFIELD(reg, 0,  0)
76*4882a593Smuzhiyun #define A7MODE(reg)		GET_BITFIELD(reg, 26, 26)
77*4882a593Smuzhiyun 
show_dram_attr(u32 attr)78*4882a593Smuzhiyun static char *show_dram_attr(u32 attr)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	switch (attr) {
81*4882a593Smuzhiyun 		case 0:
82*4882a593Smuzhiyun 			return "DRAM";
83*4882a593Smuzhiyun 		case 1:
84*4882a593Smuzhiyun 			return "MMCFG";
85*4882a593Smuzhiyun 		case 2:
86*4882a593Smuzhiyun 			return "NXM";
87*4882a593Smuzhiyun 		default:
88*4882a593Smuzhiyun 			return "unknown";
89*4882a593Smuzhiyun 	}
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun static const u32 sbridge_interleave_list[] = {
93*4882a593Smuzhiyun 	0x84, 0x8c, 0x94, 0x9c, 0xa4,
94*4882a593Smuzhiyun 	0xac, 0xb4, 0xbc, 0xc4, 0xcc,
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun static const u32 ibridge_interleave_list[] = {
98*4882a593Smuzhiyun 	0x64, 0x6c, 0x74, 0x7c, 0x84,
99*4882a593Smuzhiyun 	0x8c, 0x94, 0x9c, 0xa4, 0xac,
100*4882a593Smuzhiyun 	0xb4, 0xbc, 0xc4, 0xcc, 0xd4,
101*4882a593Smuzhiyun 	0xdc, 0xe4, 0xec, 0xf4, 0xfc,
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun static const u32 knl_interleave_list[] = {
105*4882a593Smuzhiyun 	0x64, 0x6c, 0x74, 0x7c, 0x84, /* 0-4 */
106*4882a593Smuzhiyun 	0x8c, 0x94, 0x9c, 0xa4, 0xac, /* 5-9 */
107*4882a593Smuzhiyun 	0xb4, 0xbc, 0xc4, 0xcc, 0xd4, /* 10-14 */
108*4882a593Smuzhiyun 	0xdc, 0xe4, 0xec, 0xf4, 0xfc, /* 15-19 */
109*4882a593Smuzhiyun 	0x104, 0x10c, 0x114, 0x11c,   /* 20-23 */
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun #define MAX_INTERLEAVE							\
112*4882a593Smuzhiyun 	(max_t(unsigned int, ARRAY_SIZE(sbridge_interleave_list),	\
113*4882a593Smuzhiyun 	       max_t(unsigned int, ARRAY_SIZE(ibridge_interleave_list),	\
114*4882a593Smuzhiyun 		     ARRAY_SIZE(knl_interleave_list))))
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun struct interleave_pkg {
117*4882a593Smuzhiyun 	unsigned char start;
118*4882a593Smuzhiyun 	unsigned char end;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun static const struct interleave_pkg sbridge_interleave_pkg[] = {
122*4882a593Smuzhiyun 	{ 0, 2 },
123*4882a593Smuzhiyun 	{ 3, 5 },
124*4882a593Smuzhiyun 	{ 8, 10 },
125*4882a593Smuzhiyun 	{ 11, 13 },
126*4882a593Smuzhiyun 	{ 16, 18 },
127*4882a593Smuzhiyun 	{ 19, 21 },
128*4882a593Smuzhiyun 	{ 24, 26 },
129*4882a593Smuzhiyun 	{ 27, 29 },
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static const struct interleave_pkg ibridge_interleave_pkg[] = {
133*4882a593Smuzhiyun 	{ 0, 3 },
134*4882a593Smuzhiyun 	{ 4, 7 },
135*4882a593Smuzhiyun 	{ 8, 11 },
136*4882a593Smuzhiyun 	{ 12, 15 },
137*4882a593Smuzhiyun 	{ 16, 19 },
138*4882a593Smuzhiyun 	{ 20, 23 },
139*4882a593Smuzhiyun 	{ 24, 27 },
140*4882a593Smuzhiyun 	{ 28, 31 },
141*4882a593Smuzhiyun };
142*4882a593Smuzhiyun 
sad_pkg(const struct interleave_pkg * table,u32 reg,int interleave)143*4882a593Smuzhiyun static inline int sad_pkg(const struct interleave_pkg *table, u32 reg,
144*4882a593Smuzhiyun 			  int interleave)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	return GET_BITFIELD(reg, table[interleave].start,
147*4882a593Smuzhiyun 			    table[interleave].end);
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun /* Devices 12 Function 7 */
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #define TOLM		0x80
153*4882a593Smuzhiyun #define TOHM		0x84
154*4882a593Smuzhiyun #define HASWELL_TOLM	0xd0
155*4882a593Smuzhiyun #define HASWELL_TOHM_0	0xd4
156*4882a593Smuzhiyun #define HASWELL_TOHM_1	0xd8
157*4882a593Smuzhiyun #define KNL_TOLM	0xd0
158*4882a593Smuzhiyun #define KNL_TOHM_0	0xd4
159*4882a593Smuzhiyun #define KNL_TOHM_1	0xd8
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define GET_TOLM(reg)		((GET_BITFIELD(reg, 0,  3) << 28) | 0x3ffffff)
162*4882a593Smuzhiyun #define GET_TOHM(reg)		((GET_BITFIELD(reg, 0, 20) << 25) | 0x3ffffff)
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /* Device 13 Function 6 */
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #define SAD_TARGET	0xf0
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun #define SOURCE_ID(reg)		GET_BITFIELD(reg, 9, 11)
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun #define SOURCE_ID_KNL(reg)	GET_BITFIELD(reg, 12, 14)
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun #define SAD_CONTROL	0xf4
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun /* Device 14 function 0 */
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun static const u32 tad_dram_rule[] = {
177*4882a593Smuzhiyun 	0x40, 0x44, 0x48, 0x4c,
178*4882a593Smuzhiyun 	0x50, 0x54, 0x58, 0x5c,
179*4882a593Smuzhiyun 	0x60, 0x64, 0x68, 0x6c,
180*4882a593Smuzhiyun };
181*4882a593Smuzhiyun #define MAX_TAD	ARRAY_SIZE(tad_dram_rule)
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #define TAD_LIMIT(reg)		((GET_BITFIELD(reg, 12, 31) << 26) | 0x3ffffff)
184*4882a593Smuzhiyun #define TAD_SOCK(reg)		GET_BITFIELD(reg, 10, 11)
185*4882a593Smuzhiyun #define TAD_CH(reg)		GET_BITFIELD(reg,  8,  9)
186*4882a593Smuzhiyun #define TAD_TGT3(reg)		GET_BITFIELD(reg,  6,  7)
187*4882a593Smuzhiyun #define TAD_TGT2(reg)		GET_BITFIELD(reg,  4,  5)
188*4882a593Smuzhiyun #define TAD_TGT1(reg)		GET_BITFIELD(reg,  2,  3)
189*4882a593Smuzhiyun #define TAD_TGT0(reg)		GET_BITFIELD(reg,  0,  1)
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* Device 15, function 0 */
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #define MCMTR			0x7c
194*4882a593Smuzhiyun #define KNL_MCMTR		0x624
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun #define IS_ECC_ENABLED(mcmtr)		GET_BITFIELD(mcmtr, 2, 2)
197*4882a593Smuzhiyun #define IS_LOCKSTEP_ENABLED(mcmtr)	GET_BITFIELD(mcmtr, 1, 1)
198*4882a593Smuzhiyun #define IS_CLOSE_PG(mcmtr)		GET_BITFIELD(mcmtr, 0, 0)
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun /* Device 15, function 1 */
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun #define RASENABLES		0xac
203*4882a593Smuzhiyun #define IS_MIRROR_ENABLED(reg)		GET_BITFIELD(reg, 0, 0)
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun /* Device 15, functions 2-5 */
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun static const int mtr_regs[] = {
208*4882a593Smuzhiyun 	0x80, 0x84, 0x88,
209*4882a593Smuzhiyun };
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun static const int knl_mtr_reg = 0xb60;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun #define RANK_DISABLE(mtr)		GET_BITFIELD(mtr, 16, 19)
214*4882a593Smuzhiyun #define IS_DIMM_PRESENT(mtr)		GET_BITFIELD(mtr, 14, 14)
215*4882a593Smuzhiyun #define RANK_CNT_BITS(mtr)		GET_BITFIELD(mtr, 12, 13)
216*4882a593Smuzhiyun #define RANK_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 2, 4)
217*4882a593Smuzhiyun #define COL_WIDTH_BITS(mtr)		GET_BITFIELD(mtr, 0, 1)
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun static const u32 tad_ch_nilv_offset[] = {
220*4882a593Smuzhiyun 	0x90, 0x94, 0x98, 0x9c,
221*4882a593Smuzhiyun 	0xa0, 0xa4, 0xa8, 0xac,
222*4882a593Smuzhiyun 	0xb0, 0xb4, 0xb8, 0xbc,
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun #define CHN_IDX_OFFSET(reg)		GET_BITFIELD(reg, 28, 29)
225*4882a593Smuzhiyun #define TAD_OFFSET(reg)			(GET_BITFIELD(reg,  6, 25) << 26)
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun static const u32 rir_way_limit[] = {
228*4882a593Smuzhiyun 	0x108, 0x10c, 0x110, 0x114, 0x118,
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun #define MAX_RIR_RANGES ARRAY_SIZE(rir_way_limit)
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #define IS_RIR_VALID(reg)	GET_BITFIELD(reg, 31, 31)
233*4882a593Smuzhiyun #define RIR_WAY(reg)		GET_BITFIELD(reg, 28, 29)
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun #define MAX_RIR_WAY	8
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
238*4882a593Smuzhiyun 	{ 0x120, 0x124, 0x128, 0x12c, 0x130, 0x134, 0x138, 0x13c },
239*4882a593Smuzhiyun 	{ 0x140, 0x144, 0x148, 0x14c, 0x150, 0x154, 0x158, 0x15c },
240*4882a593Smuzhiyun 	{ 0x160, 0x164, 0x168, 0x16c, 0x170, 0x174, 0x178, 0x17c },
241*4882a593Smuzhiyun 	{ 0x180, 0x184, 0x188, 0x18c, 0x190, 0x194, 0x198, 0x19c },
242*4882a593Smuzhiyun 	{ 0x1a0, 0x1a4, 0x1a8, 0x1ac, 0x1b0, 0x1b4, 0x1b8, 0x1bc },
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun #define RIR_RNK_TGT(type, reg) (((type) == BROADWELL) ? \
246*4882a593Smuzhiyun 	GET_BITFIELD(reg, 20, 23) : GET_BITFIELD(reg, 16, 19))
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun #define RIR_OFFSET(type, reg) (((type) == HASWELL || (type) == BROADWELL) ? \
249*4882a593Smuzhiyun 	GET_BITFIELD(reg,  2, 15) : GET_BITFIELD(reg,  2, 14))
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun /* Device 16, functions 2-7 */
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun  * FIXME: Implement the error count reads directly
255*4882a593Smuzhiyun  */
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun #define RANK_ODD_OV(reg)		GET_BITFIELD(reg, 31, 31)
258*4882a593Smuzhiyun #define RANK_ODD_ERR_CNT(reg)		GET_BITFIELD(reg, 16, 30)
259*4882a593Smuzhiyun #define RANK_EVEN_OV(reg)		GET_BITFIELD(reg, 15, 15)
260*4882a593Smuzhiyun #define RANK_EVEN_ERR_CNT(reg)		GET_BITFIELD(reg,  0, 14)
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun #if 0 /* Currently unused*/
263*4882a593Smuzhiyun static const u32 correrrcnt[] = {
264*4882a593Smuzhiyun 	0x104, 0x108, 0x10c, 0x110,
265*4882a593Smuzhiyun };
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun static const u32 correrrthrsld[] = {
268*4882a593Smuzhiyun 	0x11c, 0x120, 0x124, 0x128,
269*4882a593Smuzhiyun };
270*4882a593Smuzhiyun #endif
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun #define RANK_ODD_ERR_THRSLD(reg)	GET_BITFIELD(reg, 16, 30)
273*4882a593Smuzhiyun #define RANK_EVEN_ERR_THRSLD(reg)	GET_BITFIELD(reg,  0, 14)
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun /* Device 17, function 0 */
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #define SB_RANK_CFG_A		0x0328
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun #define IB_RANK_CFG_A		0x0320
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun /*
283*4882a593Smuzhiyun  * sbridge structs
284*4882a593Smuzhiyun  */
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun #define NUM_CHANNELS		6	/* Max channels per MC */
287*4882a593Smuzhiyun #define MAX_DIMMS		3	/* Max DIMMS per channel */
288*4882a593Smuzhiyun #define KNL_MAX_CHAS		38	/* KNL max num. of Cache Home Agents */
289*4882a593Smuzhiyun #define KNL_MAX_CHANNELS	6	/* KNL max num. of PCI channels */
290*4882a593Smuzhiyun #define KNL_MAX_EDCS		8	/* Embedded DRAM controllers */
291*4882a593Smuzhiyun #define CHANNEL_UNSPECIFIED	0xf	/* Intel IA32 SDM 15-14 */
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun enum type {
294*4882a593Smuzhiyun 	SANDY_BRIDGE,
295*4882a593Smuzhiyun 	IVY_BRIDGE,
296*4882a593Smuzhiyun 	HASWELL,
297*4882a593Smuzhiyun 	BROADWELL,
298*4882a593Smuzhiyun 	KNIGHTS_LANDING,
299*4882a593Smuzhiyun };
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun enum domain {
302*4882a593Smuzhiyun 	IMC0 = 0,
303*4882a593Smuzhiyun 	IMC1,
304*4882a593Smuzhiyun 	SOCK,
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun enum mirroring_mode {
308*4882a593Smuzhiyun 	NON_MIRRORING,
309*4882a593Smuzhiyun 	ADDR_RANGE_MIRRORING,
310*4882a593Smuzhiyun 	FULL_MIRRORING,
311*4882a593Smuzhiyun };
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun struct sbridge_pvt;
314*4882a593Smuzhiyun struct sbridge_info {
315*4882a593Smuzhiyun 	enum type	type;
316*4882a593Smuzhiyun 	u32		mcmtr;
317*4882a593Smuzhiyun 	u32		rankcfgr;
318*4882a593Smuzhiyun 	u64		(*get_tolm)(struct sbridge_pvt *pvt);
319*4882a593Smuzhiyun 	u64		(*get_tohm)(struct sbridge_pvt *pvt);
320*4882a593Smuzhiyun 	u64		(*rir_limit)(u32 reg);
321*4882a593Smuzhiyun 	u64		(*sad_limit)(u32 reg);
322*4882a593Smuzhiyun 	u32		(*interleave_mode)(u32 reg);
323*4882a593Smuzhiyun 	u32		(*dram_attr)(u32 reg);
324*4882a593Smuzhiyun 	const u32	*dram_rule;
325*4882a593Smuzhiyun 	const u32	*interleave_list;
326*4882a593Smuzhiyun 	const struct interleave_pkg *interleave_pkg;
327*4882a593Smuzhiyun 	u8		max_sad;
328*4882a593Smuzhiyun 	u8		(*get_node_id)(struct sbridge_pvt *pvt);
329*4882a593Smuzhiyun 	u8		(*get_ha)(u8 bank);
330*4882a593Smuzhiyun 	enum mem_type	(*get_memory_type)(struct sbridge_pvt *pvt);
331*4882a593Smuzhiyun 	enum dev_type	(*get_width)(struct sbridge_pvt *pvt, u32 mtr);
332*4882a593Smuzhiyun 	struct pci_dev	*pci_vtd;
333*4882a593Smuzhiyun };
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun struct sbridge_channel {
336*4882a593Smuzhiyun 	u32		ranks;
337*4882a593Smuzhiyun 	u32		dimms;
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun struct pci_id_descr {
341*4882a593Smuzhiyun 	int			dev_id;
342*4882a593Smuzhiyun 	int			optional;
343*4882a593Smuzhiyun 	enum domain		dom;
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun struct pci_id_table {
347*4882a593Smuzhiyun 	const struct pci_id_descr	*descr;
348*4882a593Smuzhiyun 	int				n_devs_per_imc;
349*4882a593Smuzhiyun 	int				n_devs_per_sock;
350*4882a593Smuzhiyun 	int				n_imcs_per_sock;
351*4882a593Smuzhiyun 	enum type			type;
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun struct sbridge_dev {
355*4882a593Smuzhiyun 	struct list_head	list;
356*4882a593Smuzhiyun 	int			seg;
357*4882a593Smuzhiyun 	u8			bus, mc;
358*4882a593Smuzhiyun 	u8			node_id, source_id;
359*4882a593Smuzhiyun 	struct pci_dev		**pdev;
360*4882a593Smuzhiyun 	enum domain		dom;
361*4882a593Smuzhiyun 	int			n_devs;
362*4882a593Smuzhiyun 	int			i_devs;
363*4882a593Smuzhiyun 	struct mem_ctl_info	*mci;
364*4882a593Smuzhiyun };
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun struct knl_pvt {
367*4882a593Smuzhiyun 	struct pci_dev          *pci_cha[KNL_MAX_CHAS];
368*4882a593Smuzhiyun 	struct pci_dev          *pci_channel[KNL_MAX_CHANNELS];
369*4882a593Smuzhiyun 	struct pci_dev          *pci_mc0;
370*4882a593Smuzhiyun 	struct pci_dev          *pci_mc1;
371*4882a593Smuzhiyun 	struct pci_dev          *pci_mc0_misc;
372*4882a593Smuzhiyun 	struct pci_dev          *pci_mc1_misc;
373*4882a593Smuzhiyun 	struct pci_dev          *pci_mc_info; /* tolm, tohm */
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun struct sbridge_pvt {
377*4882a593Smuzhiyun 	/* Devices per socket */
378*4882a593Smuzhiyun 	struct pci_dev		*pci_ddrio;
379*4882a593Smuzhiyun 	struct pci_dev		*pci_sad0, *pci_sad1;
380*4882a593Smuzhiyun 	struct pci_dev		*pci_br0, *pci_br1;
381*4882a593Smuzhiyun 	/* Devices per memory controller */
382*4882a593Smuzhiyun 	struct pci_dev		*pci_ha, *pci_ta, *pci_ras;
383*4882a593Smuzhiyun 	struct pci_dev		*pci_tad[NUM_CHANNELS];
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	struct sbridge_dev	*sbridge_dev;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	struct sbridge_info	info;
388*4882a593Smuzhiyun 	struct sbridge_channel	channel[NUM_CHANNELS];
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	/* Memory type detection */
391*4882a593Smuzhiyun 	bool			is_cur_addr_mirrored, is_lockstep, is_close_pg;
392*4882a593Smuzhiyun 	bool			is_chan_hash;
393*4882a593Smuzhiyun 	enum mirroring_mode	mirror_mode;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* Memory description */
396*4882a593Smuzhiyun 	u64			tolm, tohm;
397*4882a593Smuzhiyun 	struct knl_pvt knl;
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun #define PCI_DESCR(device_id, opt, domain)	\
401*4882a593Smuzhiyun 	.dev_id = (device_id),		\
402*4882a593Smuzhiyun 	.optional = opt,	\
403*4882a593Smuzhiyun 	.dom = domain
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun static const struct pci_id_descr pci_dev_descr_sbridge[] = {
406*4882a593Smuzhiyun 		/* Processor Home Agent */
407*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0,   0, IMC0) },
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		/* Memory controller */
410*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA,    0, IMC0) },
411*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS,   0, IMC0) },
412*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0,  0, IMC0) },
413*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1,  0, IMC0) },
414*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2,  0, IMC0) },
415*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3,  0, IMC0) },
416*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO, 1, SOCK) },
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 		/* System Address Decoder */
419*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0,      0, SOCK) },
420*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1,      0, SOCK) },
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 		/* Broadcast Registers */
423*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_SBRIDGE_BR,        0, SOCK) },
424*4882a593Smuzhiyun };
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun #define PCI_ID_TABLE_ENTRY(A, N, M, T) {	\
427*4882a593Smuzhiyun 	.descr = A,			\
428*4882a593Smuzhiyun 	.n_devs_per_imc = N,	\
429*4882a593Smuzhiyun 	.n_devs_per_sock = ARRAY_SIZE(A),	\
430*4882a593Smuzhiyun 	.n_imcs_per_sock = M,	\
431*4882a593Smuzhiyun 	.type = T			\
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun static const struct pci_id_table pci_dev_descr_sbridge_table[] = {
435*4882a593Smuzhiyun 	PCI_ID_TABLE_ENTRY(pci_dev_descr_sbridge, ARRAY_SIZE(pci_dev_descr_sbridge), 1, SANDY_BRIDGE),
436*4882a593Smuzhiyun 	{0,}			/* 0 terminated list. */
437*4882a593Smuzhiyun };
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun /* This changes depending if 1HA or 2HA:
440*4882a593Smuzhiyun  * 1HA:
441*4882a593Smuzhiyun  *	0x0eb8 (17.0) is DDRIO0
442*4882a593Smuzhiyun  * 2HA:
443*4882a593Smuzhiyun  *	0x0ebc (17.4) is DDRIO0
444*4882a593Smuzhiyun  */
445*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0	0x0eb8
446*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0	0x0ebc
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun /* pci ids */
449*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0		0x0ea0
450*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA		0x0ea8
451*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS		0x0e71
452*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0	0x0eaa
453*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1	0x0eab
454*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2	0x0eac
455*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3	0x0ead
456*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_SAD			0x0ec8
457*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR0			0x0ec9
458*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_BR1			0x0eca
459*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1		0x0e60
460*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA		0x0e68
461*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS		0x0e79
462*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0	0x0e6a
463*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1	0x0e6b
464*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2	0x0e6c
465*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3	0x0e6d
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun static const struct pci_id_descr pci_dev_descr_ibridge[] = {
468*4882a593Smuzhiyun 		/* Processor Home Agent */
469*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0,        0, IMC0) },
470*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1,        1, IMC1) },
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		/* Memory controller */
473*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA,     0, IMC0) },
474*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS,    0, IMC0) },
475*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0,   0, IMC0) },
476*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1,   0, IMC0) },
477*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2,   0, IMC0) },
478*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3,   0, IMC0) },
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 		/* Optional, mode 2HA */
481*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA,     1, IMC1) },
482*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS,    1, IMC1) },
483*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0,   1, IMC1) },
484*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1,   1, IMC1) },
485*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2,   1, IMC1) },
486*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3,   1, IMC1) },
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0, 1, SOCK) },
489*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0, 1, SOCK) },
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		/* System Address Decoder */
492*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_SAD,            0, SOCK) },
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		/* Broadcast Registers */
495*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR0,            1, SOCK) },
496*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_BR1,            0, SOCK) },
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun };
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun static const struct pci_id_table pci_dev_descr_ibridge_table[] = {
501*4882a593Smuzhiyun 	PCI_ID_TABLE_ENTRY(pci_dev_descr_ibridge, 12, 2, IVY_BRIDGE),
502*4882a593Smuzhiyun 	{0,}			/* 0 terminated list. */
503*4882a593Smuzhiyun };
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun /* Haswell support */
506*4882a593Smuzhiyun /* EN processor:
507*4882a593Smuzhiyun  *	- 1 IMC
508*4882a593Smuzhiyun  *	- 3 DDR3 channels, 2 DPC per channel
509*4882a593Smuzhiyun  * EP processor:
510*4882a593Smuzhiyun  *	- 1 or 2 IMC
511*4882a593Smuzhiyun  *	- 4 DDR4 channels, 3 DPC per channel
512*4882a593Smuzhiyun  * EP 4S processor:
513*4882a593Smuzhiyun  *	- 2 IMC
514*4882a593Smuzhiyun  *	- 4 DDR4 channels, 3 DPC per channel
515*4882a593Smuzhiyun  * EX processor:
516*4882a593Smuzhiyun  *	- 2 IMC
517*4882a593Smuzhiyun  *	- each IMC interfaces with a SMI 2 channel
518*4882a593Smuzhiyun  *	- each SMI channel interfaces with a scalable memory buffer
519*4882a593Smuzhiyun  *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
520*4882a593Smuzhiyun  */
521*4882a593Smuzhiyun #define HASWELL_DDRCRCLKCONTROLS 0xa10 /* Ditto on Broadwell */
522*4882a593Smuzhiyun #define HASWELL_HASYSDEFEATURE2 0x84
523*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC 0x2f28
524*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0	0x2fa0
525*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1	0x2f60
526*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA	0x2fa8
527*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM	0x2f71
528*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA	0x2f68
529*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM	0x2f79
530*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0 0x2ffc
531*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1 0x2ffd
532*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0 0x2faa
533*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1 0x2fab
534*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2 0x2fac
535*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3 0x2fad
536*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0 0x2f6a
537*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1 0x2f6b
538*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2 0x2f6c
539*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3 0x2f6d
540*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0 0x2fbd
541*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1 0x2fbf
542*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2 0x2fb9
543*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3 0x2fbb
544*4882a593Smuzhiyun static const struct pci_id_descr pci_dev_descr_haswell[] = {
545*4882a593Smuzhiyun 	/* first item must be the HA */
546*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0,      0, IMC0) },
547*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1,      1, IMC1) },
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA,   0, IMC0) },
550*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM,   0, IMC0) },
551*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0, 0, IMC0) },
552*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1, 0, IMC0) },
553*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2, 1, IMC0) },
554*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3, 1, IMC0) },
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA,   1, IMC1) },
557*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM,   1, IMC1) },
558*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0, 1, IMC1) },
559*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1, 1, IMC1) },
560*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2, 1, IMC1) },
561*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3, 1, IMC1) },
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0, 0, SOCK) },
564*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1, 0, SOCK) },
565*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0,   1, SOCK) },
566*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1,   1, SOCK) },
567*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2,   1, SOCK) },
568*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3,   1, SOCK) },
569*4882a593Smuzhiyun };
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun static const struct pci_id_table pci_dev_descr_haswell_table[] = {
572*4882a593Smuzhiyun 	PCI_ID_TABLE_ENTRY(pci_dev_descr_haswell, 13, 2, HASWELL),
573*4882a593Smuzhiyun 	{0,}			/* 0 terminated list. */
574*4882a593Smuzhiyun };
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun /* Knight's Landing Support */
577*4882a593Smuzhiyun /*
578*4882a593Smuzhiyun  * KNL's memory channels are swizzled between memory controllers.
579*4882a593Smuzhiyun  * MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
580*4882a593Smuzhiyun  */
581*4882a593Smuzhiyun #define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun /* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
584*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_MC       0x7840
585*4882a593Smuzhiyun /* DRAM channel stuff; bank addrs, dimmmtr, etc.. 2-8-2 - 2-9-4 (6 of these) */
586*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN     0x7843
587*4882a593Smuzhiyun /* kdrwdbu TAD limits/offsets, MCMTR - 2-10-1, 2-11-1 (2 of these) */
588*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_TA       0x7844
589*4882a593Smuzhiyun /* CHA broadcast registers, dram rules - 1-29-0 (1 of these) */
590*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0     0x782a
591*4882a593Smuzhiyun /* SAD target - 1-29-1 (1 of these) */
592*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1     0x782b
593*4882a593Smuzhiyun /* Caching / Home Agent */
594*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_CHA      0x782c
595*4882a593Smuzhiyun /* Device with TOLM and TOHM, 0-5-0 (1 of these) */
596*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM    0x7810
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun /*
599*4882a593Smuzhiyun  * KNL differs from SB, IB, and Haswell in that it has multiple
600*4882a593Smuzhiyun  * instances of the same device with the same device ID, so we handle that
601*4882a593Smuzhiyun  * by creating as many copies in the table as we expect to find.
602*4882a593Smuzhiyun  * (Like device ID must be grouped together.)
603*4882a593Smuzhiyun  */
604*4882a593Smuzhiyun 
605*4882a593Smuzhiyun static const struct pci_id_descr pci_dev_descr_knl[] = {
606*4882a593Smuzhiyun 	[0 ... 1]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_MC,    0, IMC0)},
607*4882a593Smuzhiyun 	[2 ... 7]   = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN,  0, IMC0) },
608*4882a593Smuzhiyun 	[8]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TA,    0, IMC0) },
609*4882a593Smuzhiyun 	[9]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM, 0, IMC0) },
610*4882a593Smuzhiyun 	[10]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0,  0, SOCK) },
611*4882a593Smuzhiyun 	[11]	    = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1,  0, SOCK) },
612*4882a593Smuzhiyun 	[12 ... 49] = { PCI_DESCR(PCI_DEVICE_ID_INTEL_KNL_IMC_CHA,   0, SOCK) },
613*4882a593Smuzhiyun };
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun static const struct pci_id_table pci_dev_descr_knl_table[] = {
616*4882a593Smuzhiyun 	PCI_ID_TABLE_ENTRY(pci_dev_descr_knl, ARRAY_SIZE(pci_dev_descr_knl), 1, KNIGHTS_LANDING),
617*4882a593Smuzhiyun 	{0,}
618*4882a593Smuzhiyun };
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun  * Broadwell support
622*4882a593Smuzhiyun  *
623*4882a593Smuzhiyun  * DE processor:
624*4882a593Smuzhiyun  *	- 1 IMC
625*4882a593Smuzhiyun  *	- 2 DDR3 channels, 2 DPC per channel
626*4882a593Smuzhiyun  * EP processor:
627*4882a593Smuzhiyun  *	- 1 or 2 IMC
628*4882a593Smuzhiyun  *	- 4 DDR4 channels, 3 DPC per channel
629*4882a593Smuzhiyun  * EP 4S processor:
630*4882a593Smuzhiyun  *	- 2 IMC
631*4882a593Smuzhiyun  *	- 4 DDR4 channels, 3 DPC per channel
632*4882a593Smuzhiyun  * EX processor:
633*4882a593Smuzhiyun  *	- 2 IMC
634*4882a593Smuzhiyun  *	- each IMC interfaces with a SMI 2 channel
635*4882a593Smuzhiyun  *	- each SMI channel interfaces with a scalable memory buffer
636*4882a593Smuzhiyun  *	- each scalable memory buffer supports 4 DDR3/DDR4 channels, 3 DPC
637*4882a593Smuzhiyun  */
638*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC 0x6f28
639*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0	0x6fa0
640*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1	0x6f60
641*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA	0x6fa8
642*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM	0x6f71
643*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA	0x6f68
644*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM	0x6f79
645*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0 0x6ffc
646*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1 0x6ffd
647*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0 0x6faa
648*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1 0x6fab
649*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2 0x6fac
650*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3 0x6fad
651*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0 0x6f6a
652*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1 0x6f6b
653*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2 0x6f6c
654*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3 0x6f6d
655*4882a593Smuzhiyun #define PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0 0x6faf
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun static const struct pci_id_descr pci_dev_descr_broadwell[] = {
658*4882a593Smuzhiyun 	/* first item must be the HA */
659*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0,      0, IMC0) },
660*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1,      1, IMC1) },
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA,   0, IMC0) },
663*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM,   0, IMC0) },
664*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0, 0, IMC0) },
665*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1, 0, IMC0) },
666*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2, 1, IMC0) },
667*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3, 1, IMC0) },
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA,   1, IMC1) },
670*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM,   1, IMC1) },
671*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0, 1, IMC1) },
672*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1, 1, IMC1) },
673*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2, 1, IMC1) },
674*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3, 1, IMC1) },
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0, 0, SOCK) },
677*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1, 0, SOCK) },
678*4882a593Smuzhiyun 	{ PCI_DESCR(PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0,   1, SOCK) },
679*4882a593Smuzhiyun };
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun static const struct pci_id_table pci_dev_descr_broadwell_table[] = {
682*4882a593Smuzhiyun 	PCI_ID_TABLE_ENTRY(pci_dev_descr_broadwell, 10, 2, BROADWELL),
683*4882a593Smuzhiyun 	{0,}			/* 0 terminated list. */
684*4882a593Smuzhiyun };
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun /****************************************************************************
688*4882a593Smuzhiyun 			Ancillary status routines
689*4882a593Smuzhiyun  ****************************************************************************/
690*4882a593Smuzhiyun 
numrank(enum type type,u32 mtr)691*4882a593Smuzhiyun static inline int numrank(enum type type, u32 mtr)
692*4882a593Smuzhiyun {
693*4882a593Smuzhiyun 	int ranks = (1 << RANK_CNT_BITS(mtr));
694*4882a593Smuzhiyun 	int max = 4;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	if (type == HASWELL || type == BROADWELL || type == KNIGHTS_LANDING)
697*4882a593Smuzhiyun 		max = 8;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (ranks > max) {
700*4882a593Smuzhiyun 		edac_dbg(0, "Invalid number of ranks: %d (max = %i) raw value = %x (%04x)\n",
701*4882a593Smuzhiyun 			 ranks, max, (unsigned int)RANK_CNT_BITS(mtr), mtr);
702*4882a593Smuzhiyun 		return -EINVAL;
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	return ranks;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
numrow(u32 mtr)708*4882a593Smuzhiyun static inline int numrow(u32 mtr)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun 	int rows = (RANK_WIDTH_BITS(mtr) + 12);
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 	if (rows < 13 || rows > 18) {
713*4882a593Smuzhiyun 		edac_dbg(0, "Invalid number of rows: %d (should be between 14 and 17) raw value = %x (%04x)\n",
714*4882a593Smuzhiyun 			 rows, (unsigned int)RANK_WIDTH_BITS(mtr), mtr);
715*4882a593Smuzhiyun 		return -EINVAL;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	return 1 << rows;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
numcol(u32 mtr)721*4882a593Smuzhiyun static inline int numcol(u32 mtr)
722*4882a593Smuzhiyun {
723*4882a593Smuzhiyun 	int cols = (COL_WIDTH_BITS(mtr) + 10);
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	if (cols > 12) {
726*4882a593Smuzhiyun 		edac_dbg(0, "Invalid number of cols: %d (max = 4) raw value = %x (%04x)\n",
727*4882a593Smuzhiyun 			 cols, (unsigned int)COL_WIDTH_BITS(mtr), mtr);
728*4882a593Smuzhiyun 		return -EINVAL;
729*4882a593Smuzhiyun 	}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	return 1 << cols;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun 
get_sbridge_dev(int seg,u8 bus,enum domain dom,int multi_bus,struct sbridge_dev * prev)734*4882a593Smuzhiyun static struct sbridge_dev *get_sbridge_dev(int seg, u8 bus, enum domain dom,
735*4882a593Smuzhiyun 					   int multi_bus,
736*4882a593Smuzhiyun 					   struct sbridge_dev *prev)
737*4882a593Smuzhiyun {
738*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/*
741*4882a593Smuzhiyun 	 * If we have devices scattered across several busses that pertain
742*4882a593Smuzhiyun 	 * to the same memory controller, we'll lump them all together.
743*4882a593Smuzhiyun 	 */
744*4882a593Smuzhiyun 	if (multi_bus) {
745*4882a593Smuzhiyun 		return list_first_entry_or_null(&sbridge_edac_list,
746*4882a593Smuzhiyun 				struct sbridge_dev, list);
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	sbridge_dev = list_entry(prev ? prev->list.next
750*4882a593Smuzhiyun 				      : sbridge_edac_list.next, struct sbridge_dev, list);
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	list_for_each_entry_from(sbridge_dev, &sbridge_edac_list, list) {
753*4882a593Smuzhiyun 		if ((sbridge_dev->seg == seg) && (sbridge_dev->bus == bus) &&
754*4882a593Smuzhiyun 				(dom == SOCK || dom == sbridge_dev->dom))
755*4882a593Smuzhiyun 			return sbridge_dev;
756*4882a593Smuzhiyun 	}
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	return NULL;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
alloc_sbridge_dev(int seg,u8 bus,enum domain dom,const struct pci_id_table * table)761*4882a593Smuzhiyun static struct sbridge_dev *alloc_sbridge_dev(int seg, u8 bus, enum domain dom,
762*4882a593Smuzhiyun 					     const struct pci_id_table *table)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	sbridge_dev = kzalloc(sizeof(*sbridge_dev), GFP_KERNEL);
767*4882a593Smuzhiyun 	if (!sbridge_dev)
768*4882a593Smuzhiyun 		return NULL;
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 	sbridge_dev->pdev = kcalloc(table->n_devs_per_imc,
771*4882a593Smuzhiyun 				    sizeof(*sbridge_dev->pdev),
772*4882a593Smuzhiyun 				    GFP_KERNEL);
773*4882a593Smuzhiyun 	if (!sbridge_dev->pdev) {
774*4882a593Smuzhiyun 		kfree(sbridge_dev);
775*4882a593Smuzhiyun 		return NULL;
776*4882a593Smuzhiyun 	}
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	sbridge_dev->seg = seg;
779*4882a593Smuzhiyun 	sbridge_dev->bus = bus;
780*4882a593Smuzhiyun 	sbridge_dev->dom = dom;
781*4882a593Smuzhiyun 	sbridge_dev->n_devs = table->n_devs_per_imc;
782*4882a593Smuzhiyun 	list_add_tail(&sbridge_dev->list, &sbridge_edac_list);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	return sbridge_dev;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
free_sbridge_dev(struct sbridge_dev * sbridge_dev)787*4882a593Smuzhiyun static void free_sbridge_dev(struct sbridge_dev *sbridge_dev)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	list_del(&sbridge_dev->list);
790*4882a593Smuzhiyun 	kfree(sbridge_dev->pdev);
791*4882a593Smuzhiyun 	kfree(sbridge_dev);
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun 
sbridge_get_tolm(struct sbridge_pvt * pvt)794*4882a593Smuzhiyun static u64 sbridge_get_tolm(struct sbridge_pvt *pvt)
795*4882a593Smuzhiyun {
796*4882a593Smuzhiyun 	u32 reg;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	/* Address range is 32:28 */
799*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_sad1, TOLM, &reg);
800*4882a593Smuzhiyun 	return GET_TOLM(reg);
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
sbridge_get_tohm(struct sbridge_pvt * pvt)803*4882a593Smuzhiyun static u64 sbridge_get_tohm(struct sbridge_pvt *pvt)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun 	u32 reg;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_sad1, TOHM, &reg);
808*4882a593Smuzhiyun 	return GET_TOHM(reg);
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
ibridge_get_tolm(struct sbridge_pvt * pvt)811*4882a593Smuzhiyun static u64 ibridge_get_tolm(struct sbridge_pvt *pvt)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	u32 reg;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_br1, TOLM, &reg);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	return GET_TOLM(reg);
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
ibridge_get_tohm(struct sbridge_pvt * pvt)820*4882a593Smuzhiyun static u64 ibridge_get_tohm(struct sbridge_pvt *pvt)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun 	u32 reg;
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_br1, TOHM, &reg);
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	return GET_TOHM(reg);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun 
rir_limit(u32 reg)829*4882a593Smuzhiyun static u64 rir_limit(u32 reg)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	return ((u64)GET_BITFIELD(reg,  1, 10) << 29) | 0x1fffffff;
832*4882a593Smuzhiyun }
833*4882a593Smuzhiyun 
sad_limit(u32 reg)834*4882a593Smuzhiyun static u64 sad_limit(u32 reg)
835*4882a593Smuzhiyun {
836*4882a593Smuzhiyun 	return (GET_BITFIELD(reg, 6, 25) << 26) | 0x3ffffff;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
interleave_mode(u32 reg)839*4882a593Smuzhiyun static u32 interleave_mode(u32 reg)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 1, 1);
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun 
dram_attr(u32 reg)844*4882a593Smuzhiyun static u32 dram_attr(u32 reg)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 2, 3);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun 
knl_sad_limit(u32 reg)849*4882a593Smuzhiyun static u64 knl_sad_limit(u32 reg)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun 	return (GET_BITFIELD(reg, 7, 26) << 26) | 0x3ffffff;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun 
knl_interleave_mode(u32 reg)854*4882a593Smuzhiyun static u32 knl_interleave_mode(u32 reg)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 1, 2);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun static const char * const knl_intlv_mode[] = {
860*4882a593Smuzhiyun 	"[8:6]", "[10:8]", "[14:12]", "[32:30]"
861*4882a593Smuzhiyun };
862*4882a593Smuzhiyun 
get_intlv_mode_str(u32 reg,enum type t)863*4882a593Smuzhiyun static const char *get_intlv_mode_str(u32 reg, enum type t)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun 	if (t == KNIGHTS_LANDING)
866*4882a593Smuzhiyun 		return knl_intlv_mode[knl_interleave_mode(reg)];
867*4882a593Smuzhiyun 	else
868*4882a593Smuzhiyun 		return interleave_mode(reg) ? "[8:6]" : "[8:6]XOR[18:16]";
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
dram_attr_knl(u32 reg)871*4882a593Smuzhiyun static u32 dram_attr_knl(u32 reg)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 3, 4);
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 
get_memory_type(struct sbridge_pvt * pvt)877*4882a593Smuzhiyun static enum mem_type get_memory_type(struct sbridge_pvt *pvt)
878*4882a593Smuzhiyun {
879*4882a593Smuzhiyun 	u32 reg;
880*4882a593Smuzhiyun 	enum mem_type mtype;
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	if (pvt->pci_ddrio) {
883*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_ddrio, pvt->info.rankcfgr,
884*4882a593Smuzhiyun 				      &reg);
885*4882a593Smuzhiyun 		if (GET_BITFIELD(reg, 11, 11))
886*4882a593Smuzhiyun 			/* FIXME: Can also be LRDIMM */
887*4882a593Smuzhiyun 			mtype = MEM_RDDR3;
888*4882a593Smuzhiyun 		else
889*4882a593Smuzhiyun 			mtype = MEM_DDR3;
890*4882a593Smuzhiyun 	} else
891*4882a593Smuzhiyun 		mtype = MEM_UNKNOWN;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	return mtype;
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
haswell_get_memory_type(struct sbridge_pvt * pvt)896*4882a593Smuzhiyun static enum mem_type haswell_get_memory_type(struct sbridge_pvt *pvt)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	u32 reg;
899*4882a593Smuzhiyun 	bool registered = false;
900*4882a593Smuzhiyun 	enum mem_type mtype = MEM_UNKNOWN;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	if (!pvt->pci_ddrio)
903*4882a593Smuzhiyun 		goto out;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_ddrio,
906*4882a593Smuzhiyun 			      HASWELL_DDRCRCLKCONTROLS, &reg);
907*4882a593Smuzhiyun 	/* Is_Rdimm */
908*4882a593Smuzhiyun 	if (GET_BITFIELD(reg, 16, 16))
909*4882a593Smuzhiyun 		registered = true;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_ta, MCMTR, &reg);
912*4882a593Smuzhiyun 	if (GET_BITFIELD(reg, 14, 14)) {
913*4882a593Smuzhiyun 		if (registered)
914*4882a593Smuzhiyun 			mtype = MEM_RDDR4;
915*4882a593Smuzhiyun 		else
916*4882a593Smuzhiyun 			mtype = MEM_DDR4;
917*4882a593Smuzhiyun 	} else {
918*4882a593Smuzhiyun 		if (registered)
919*4882a593Smuzhiyun 			mtype = MEM_RDDR3;
920*4882a593Smuzhiyun 		else
921*4882a593Smuzhiyun 			mtype = MEM_DDR3;
922*4882a593Smuzhiyun 	}
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun out:
925*4882a593Smuzhiyun 	return mtype;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun 
knl_get_width(struct sbridge_pvt * pvt,u32 mtr)928*4882a593Smuzhiyun static enum dev_type knl_get_width(struct sbridge_pvt *pvt, u32 mtr)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	/* for KNL value is fixed */
931*4882a593Smuzhiyun 	return DEV_X16;
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun 
sbridge_get_width(struct sbridge_pvt * pvt,u32 mtr)934*4882a593Smuzhiyun static enum dev_type sbridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun 	/* there's no way to figure out */
937*4882a593Smuzhiyun 	return DEV_UNKNOWN;
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun 
__ibridge_get_width(u32 mtr)940*4882a593Smuzhiyun static enum dev_type __ibridge_get_width(u32 mtr)
941*4882a593Smuzhiyun {
942*4882a593Smuzhiyun 	enum dev_type type = DEV_UNKNOWN;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	switch (mtr) {
945*4882a593Smuzhiyun 	case 2:
946*4882a593Smuzhiyun 		type = DEV_X16;
947*4882a593Smuzhiyun 		break;
948*4882a593Smuzhiyun 	case 1:
949*4882a593Smuzhiyun 		type = DEV_X8;
950*4882a593Smuzhiyun 		break;
951*4882a593Smuzhiyun 	case 0:
952*4882a593Smuzhiyun 		type = DEV_X4;
953*4882a593Smuzhiyun 		break;
954*4882a593Smuzhiyun 	}
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	return type;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
ibridge_get_width(struct sbridge_pvt * pvt,u32 mtr)959*4882a593Smuzhiyun static enum dev_type ibridge_get_width(struct sbridge_pvt *pvt, u32 mtr)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun 	/*
962*4882a593Smuzhiyun 	 * ddr3_width on the documentation but also valid for DDR4 on
963*4882a593Smuzhiyun 	 * Haswell
964*4882a593Smuzhiyun 	 */
965*4882a593Smuzhiyun 	return __ibridge_get_width(GET_BITFIELD(mtr, 7, 8));
966*4882a593Smuzhiyun }
967*4882a593Smuzhiyun 
broadwell_get_width(struct sbridge_pvt * pvt,u32 mtr)968*4882a593Smuzhiyun static enum dev_type broadwell_get_width(struct sbridge_pvt *pvt, u32 mtr)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	/* ddr3_width on the documentation but also valid for DDR4 */
971*4882a593Smuzhiyun 	return __ibridge_get_width(GET_BITFIELD(mtr, 8, 9));
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun 
knl_get_memory_type(struct sbridge_pvt * pvt)974*4882a593Smuzhiyun static enum mem_type knl_get_memory_type(struct sbridge_pvt *pvt)
975*4882a593Smuzhiyun {
976*4882a593Smuzhiyun 	/* DDR4 RDIMMS and LRDIMMS are supported */
977*4882a593Smuzhiyun 	return MEM_RDDR4;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
get_node_id(struct sbridge_pvt * pvt)980*4882a593Smuzhiyun static u8 get_node_id(struct sbridge_pvt *pvt)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun 	u32 reg;
983*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_br0, SAD_CONTROL, &reg);
984*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 0, 2);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun 
haswell_get_node_id(struct sbridge_pvt * pvt)987*4882a593Smuzhiyun static u8 haswell_get_node_id(struct sbridge_pvt *pvt)
988*4882a593Smuzhiyun {
989*4882a593Smuzhiyun 	u32 reg;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
992*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 0, 3);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun 
knl_get_node_id(struct sbridge_pvt * pvt)995*4882a593Smuzhiyun static u8 knl_get_node_id(struct sbridge_pvt *pvt)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	u32 reg;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_sad1, SAD_CONTROL, &reg);
1000*4882a593Smuzhiyun 	return GET_BITFIELD(reg, 0, 2);
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun /*
1004*4882a593Smuzhiyun  * Use the reporting bank number to determine which memory
1005*4882a593Smuzhiyun  * controller (also known as "ha" for "home agent"). Sandy
1006*4882a593Smuzhiyun  * Bridge only has one memory controller per socket, so the
1007*4882a593Smuzhiyun  * answer is always zero.
1008*4882a593Smuzhiyun  */
sbridge_get_ha(u8 bank)1009*4882a593Smuzhiyun static u8 sbridge_get_ha(u8 bank)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	return 0;
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun /*
1015*4882a593Smuzhiyun  * On Ivy Bridge, Haswell and Broadwell the error may be in a
1016*4882a593Smuzhiyun  * home agent bank (7, 8), or one of the per-channel memory
1017*4882a593Smuzhiyun  * controller banks (9 .. 16).
1018*4882a593Smuzhiyun  */
ibridge_get_ha(u8 bank)1019*4882a593Smuzhiyun static u8 ibridge_get_ha(u8 bank)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	switch (bank) {
1022*4882a593Smuzhiyun 	case 7 ... 8:
1023*4882a593Smuzhiyun 		return bank - 7;
1024*4882a593Smuzhiyun 	case 9 ... 16:
1025*4882a593Smuzhiyun 		return (bank - 9) / 4;
1026*4882a593Smuzhiyun 	default:
1027*4882a593Smuzhiyun 		return 0xff;
1028*4882a593Smuzhiyun 	}
1029*4882a593Smuzhiyun }
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun /* Not used, but included for safety/symmetry */
knl_get_ha(u8 bank)1032*4882a593Smuzhiyun static u8 knl_get_ha(u8 bank)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	return 0xff;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
haswell_get_tolm(struct sbridge_pvt * pvt)1037*4882a593Smuzhiyun static u64 haswell_get_tolm(struct sbridge_pvt *pvt)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	u32 reg;
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOLM, &reg);
1042*4882a593Smuzhiyun 	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
haswell_get_tohm(struct sbridge_pvt * pvt)1045*4882a593Smuzhiyun static u64 haswell_get_tohm(struct sbridge_pvt *pvt)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	u64 rc;
1048*4882a593Smuzhiyun 	u32 reg;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_0, &reg);
1051*4882a593Smuzhiyun 	rc = GET_BITFIELD(reg, 26, 31);
1052*4882a593Smuzhiyun 	pci_read_config_dword(pvt->info.pci_vtd, HASWELL_TOHM_1, &reg);
1053*4882a593Smuzhiyun 	rc = ((reg << 6) | rc) << 26;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 	return rc | 0x3ffffff;
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
knl_get_tolm(struct sbridge_pvt * pvt)1058*4882a593Smuzhiyun static u64 knl_get_tolm(struct sbridge_pvt *pvt)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun 	u32 reg;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOLM, &reg);
1063*4882a593Smuzhiyun 	return (GET_BITFIELD(reg, 26, 31) << 26) | 0x3ffffff;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun 
knl_get_tohm(struct sbridge_pvt * pvt)1066*4882a593Smuzhiyun static u64 knl_get_tohm(struct sbridge_pvt *pvt)
1067*4882a593Smuzhiyun {
1068*4882a593Smuzhiyun 	u64 rc;
1069*4882a593Smuzhiyun 	u32 reg_lo, reg_hi;
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_0, &reg_lo);
1072*4882a593Smuzhiyun 	pci_read_config_dword(pvt->knl.pci_mc_info, KNL_TOHM_1, &reg_hi);
1073*4882a593Smuzhiyun 	rc = ((u64)reg_hi << 32) | reg_lo;
1074*4882a593Smuzhiyun 	return rc | 0x3ffffff;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 
haswell_rir_limit(u32 reg)1078*4882a593Smuzhiyun static u64 haswell_rir_limit(u32 reg)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun 	return (((u64)GET_BITFIELD(reg,  1, 11) + 1) << 29) - 1;
1081*4882a593Smuzhiyun }
1082*4882a593Smuzhiyun 
sad_pkg_socket(u8 pkg)1083*4882a593Smuzhiyun static inline u8 sad_pkg_socket(u8 pkg)
1084*4882a593Smuzhiyun {
1085*4882a593Smuzhiyun 	/* on Ivy Bridge, nodeID is SASS, where A is HA and S is node id */
1086*4882a593Smuzhiyun 	return ((pkg >> 3) << 2) | (pkg & 0x3);
1087*4882a593Smuzhiyun }
1088*4882a593Smuzhiyun 
sad_pkg_ha(u8 pkg)1089*4882a593Smuzhiyun static inline u8 sad_pkg_ha(u8 pkg)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	return (pkg >> 2) & 0x1;
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun 
haswell_chan_hash(int idx,u64 addr)1094*4882a593Smuzhiyun static int haswell_chan_hash(int idx, u64 addr)
1095*4882a593Smuzhiyun {
1096*4882a593Smuzhiyun 	int i;
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	/*
1099*4882a593Smuzhiyun 	 * XOR even bits from 12:26 to bit0 of idx,
1100*4882a593Smuzhiyun 	 *     odd bits from 13:27 to bit1
1101*4882a593Smuzhiyun 	 */
1102*4882a593Smuzhiyun 	for (i = 12; i < 28; i += 2)
1103*4882a593Smuzhiyun 		idx ^= (addr >> i) & 3;
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 	return idx;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun /* Low bits of TAD limit, and some metadata. */
1109*4882a593Smuzhiyun static const u32 knl_tad_dram_limit_lo[] = {
1110*4882a593Smuzhiyun 	0x400, 0x500, 0x600, 0x700,
1111*4882a593Smuzhiyun 	0x800, 0x900, 0xa00, 0xb00,
1112*4882a593Smuzhiyun };
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun /* Low bits of TAD offset. */
1115*4882a593Smuzhiyun static const u32 knl_tad_dram_offset_lo[] = {
1116*4882a593Smuzhiyun 	0x404, 0x504, 0x604, 0x704,
1117*4882a593Smuzhiyun 	0x804, 0x904, 0xa04, 0xb04,
1118*4882a593Smuzhiyun };
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun /* High 16 bits of TAD limit and offset. */
1121*4882a593Smuzhiyun static const u32 knl_tad_dram_hi[] = {
1122*4882a593Smuzhiyun 	0x408, 0x508, 0x608, 0x708,
1123*4882a593Smuzhiyun 	0x808, 0x908, 0xa08, 0xb08,
1124*4882a593Smuzhiyun };
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun /* Number of ways a tad entry is interleaved. */
1127*4882a593Smuzhiyun static const u32 knl_tad_ways[] = {
1128*4882a593Smuzhiyun 	8, 6, 4, 3, 2, 1,
1129*4882a593Smuzhiyun };
1130*4882a593Smuzhiyun 
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun  * Retrieve the n'th Target Address Decode table entry
1133*4882a593Smuzhiyun  * from the memory controller's TAD table.
1134*4882a593Smuzhiyun  *
1135*4882a593Smuzhiyun  * @pvt:	driver private data
1136*4882a593Smuzhiyun  * @entry:	which entry you want to retrieve
1137*4882a593Smuzhiyun  * @mc:		which memory controller (0 or 1)
1138*4882a593Smuzhiyun  * @offset:	output tad range offset
1139*4882a593Smuzhiyun  * @limit:	output address of first byte above tad range
1140*4882a593Smuzhiyun  * @ways:	output number of interleave ways
1141*4882a593Smuzhiyun  *
1142*4882a593Smuzhiyun  * The offset value has curious semantics.  It's a sort of running total
1143*4882a593Smuzhiyun  * of the sizes of all the memory regions that aren't mapped in this
1144*4882a593Smuzhiyun  * tad table.
1145*4882a593Smuzhiyun  */
knl_get_tad(const struct sbridge_pvt * pvt,const int entry,const int mc,u64 * offset,u64 * limit,int * ways)1146*4882a593Smuzhiyun static int knl_get_tad(const struct sbridge_pvt *pvt,
1147*4882a593Smuzhiyun 		const int entry,
1148*4882a593Smuzhiyun 		const int mc,
1149*4882a593Smuzhiyun 		u64 *offset,
1150*4882a593Smuzhiyun 		u64 *limit,
1151*4882a593Smuzhiyun 		int *ways)
1152*4882a593Smuzhiyun {
1153*4882a593Smuzhiyun 	u32 reg_limit_lo, reg_offset_lo, reg_hi;
1154*4882a593Smuzhiyun 	struct pci_dev *pci_mc;
1155*4882a593Smuzhiyun 	int way_id;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	switch (mc) {
1158*4882a593Smuzhiyun 	case 0:
1159*4882a593Smuzhiyun 		pci_mc = pvt->knl.pci_mc0;
1160*4882a593Smuzhiyun 		break;
1161*4882a593Smuzhiyun 	case 1:
1162*4882a593Smuzhiyun 		pci_mc = pvt->knl.pci_mc1;
1163*4882a593Smuzhiyun 		break;
1164*4882a593Smuzhiyun 	default:
1165*4882a593Smuzhiyun 		WARN_ON(1);
1166*4882a593Smuzhiyun 		return -EINVAL;
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	pci_read_config_dword(pci_mc,
1170*4882a593Smuzhiyun 			knl_tad_dram_limit_lo[entry], &reg_limit_lo);
1171*4882a593Smuzhiyun 	pci_read_config_dword(pci_mc,
1172*4882a593Smuzhiyun 			knl_tad_dram_offset_lo[entry], &reg_offset_lo);
1173*4882a593Smuzhiyun 	pci_read_config_dword(pci_mc,
1174*4882a593Smuzhiyun 			knl_tad_dram_hi[entry], &reg_hi);
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	/* Is this TAD entry enabled? */
1177*4882a593Smuzhiyun 	if (!GET_BITFIELD(reg_limit_lo, 0, 0))
1178*4882a593Smuzhiyun 		return -ENODEV;
1179*4882a593Smuzhiyun 
1180*4882a593Smuzhiyun 	way_id = GET_BITFIELD(reg_limit_lo, 3, 5);
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 	if (way_id < ARRAY_SIZE(knl_tad_ways)) {
1183*4882a593Smuzhiyun 		*ways = knl_tad_ways[way_id];
1184*4882a593Smuzhiyun 	} else {
1185*4882a593Smuzhiyun 		*ways = 0;
1186*4882a593Smuzhiyun 		sbridge_printk(KERN_ERR,
1187*4882a593Smuzhiyun 				"Unexpected value %d in mc_tad_limit_lo wayness field\n",
1188*4882a593Smuzhiyun 				way_id);
1189*4882a593Smuzhiyun 		return -ENODEV;
1190*4882a593Smuzhiyun 	}
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 	/*
1193*4882a593Smuzhiyun 	 * The least significant 6 bits of base and limit are truncated.
1194*4882a593Smuzhiyun 	 * For limit, we fill the missing bits with 1s.
1195*4882a593Smuzhiyun 	 */
1196*4882a593Smuzhiyun 	*offset = ((u64) GET_BITFIELD(reg_offset_lo, 6, 31) << 6) |
1197*4882a593Smuzhiyun 				((u64) GET_BITFIELD(reg_hi, 0,  15) << 32);
1198*4882a593Smuzhiyun 	*limit = ((u64) GET_BITFIELD(reg_limit_lo,  6, 31) << 6) | 63 |
1199*4882a593Smuzhiyun 				((u64) GET_BITFIELD(reg_hi, 16, 31) << 32);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	return 0;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun /* Determine which memory controller is responsible for a given channel. */
knl_channel_mc(int channel)1205*4882a593Smuzhiyun static int knl_channel_mc(int channel)
1206*4882a593Smuzhiyun {
1207*4882a593Smuzhiyun 	WARN_ON(channel < 0 || channel >= 6);
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	return channel < 3 ? 1 : 0;
1210*4882a593Smuzhiyun }
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun /*
1213*4882a593Smuzhiyun  * Get the Nth entry from EDC_ROUTE_TABLE register.
1214*4882a593Smuzhiyun  * (This is the per-tile mapping of logical interleave targets to
1215*4882a593Smuzhiyun  *  physical EDC modules.)
1216*4882a593Smuzhiyun  *
1217*4882a593Smuzhiyun  * entry 0: 0:2
1218*4882a593Smuzhiyun  *       1: 3:5
1219*4882a593Smuzhiyun  *       2: 6:8
1220*4882a593Smuzhiyun  *       3: 9:11
1221*4882a593Smuzhiyun  *       4: 12:14
1222*4882a593Smuzhiyun  *       5: 15:17
1223*4882a593Smuzhiyun  *       6: 18:20
1224*4882a593Smuzhiyun  *       7: 21:23
1225*4882a593Smuzhiyun  * reserved: 24:31
1226*4882a593Smuzhiyun  */
knl_get_edc_route(int entry,u32 reg)1227*4882a593Smuzhiyun static u32 knl_get_edc_route(int entry, u32 reg)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun 	WARN_ON(entry >= KNL_MAX_EDCS);
1230*4882a593Smuzhiyun 	return GET_BITFIELD(reg, entry*3, (entry*3)+2);
1231*4882a593Smuzhiyun }
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun /*
1234*4882a593Smuzhiyun  * Get the Nth entry from MC_ROUTE_TABLE register.
1235*4882a593Smuzhiyun  * (This is the per-tile mapping of logical interleave targets to
1236*4882a593Smuzhiyun  *  physical DRAM channels modules.)
1237*4882a593Smuzhiyun  *
1238*4882a593Smuzhiyun  * entry 0: mc 0:2   channel 18:19
1239*4882a593Smuzhiyun  *       1: mc 3:5   channel 20:21
1240*4882a593Smuzhiyun  *       2: mc 6:8   channel 22:23
1241*4882a593Smuzhiyun  *       3: mc 9:11  channel 24:25
1242*4882a593Smuzhiyun  *       4: mc 12:14 channel 26:27
1243*4882a593Smuzhiyun  *       5: mc 15:17 channel 28:29
1244*4882a593Smuzhiyun  * reserved: 30:31
1245*4882a593Smuzhiyun  *
1246*4882a593Smuzhiyun  * Though we have 3 bits to identify the MC, we should only see
1247*4882a593Smuzhiyun  * the values 0 or 1.
1248*4882a593Smuzhiyun  */
1249*4882a593Smuzhiyun 
knl_get_mc_route(int entry,u32 reg)1250*4882a593Smuzhiyun static u32 knl_get_mc_route(int entry, u32 reg)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun 	int mc, chan;
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	WARN_ON(entry >= KNL_MAX_CHANNELS);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
1257*4882a593Smuzhiyun 	chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	return knl_channel_remap(mc, chan);
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun /*
1263*4882a593Smuzhiyun  * Render the EDC_ROUTE register in human-readable form.
1264*4882a593Smuzhiyun  * Output string s should be at least KNL_MAX_EDCS*2 bytes.
1265*4882a593Smuzhiyun  */
knl_show_edc_route(u32 reg,char * s)1266*4882a593Smuzhiyun static void knl_show_edc_route(u32 reg, char *s)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun 	int i;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_EDCS; i++) {
1271*4882a593Smuzhiyun 		s[i*2] = knl_get_edc_route(i, reg) + '0';
1272*4882a593Smuzhiyun 		s[i*2+1] = '-';
1273*4882a593Smuzhiyun 	}
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun 	s[KNL_MAX_EDCS*2 - 1] = '\0';
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun /*
1279*4882a593Smuzhiyun  * Render the MC_ROUTE register in human-readable form.
1280*4882a593Smuzhiyun  * Output string s should be at least KNL_MAX_CHANNELS*2 bytes.
1281*4882a593Smuzhiyun  */
knl_show_mc_route(u32 reg,char * s)1282*4882a593Smuzhiyun static void knl_show_mc_route(u32 reg, char *s)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	int i;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
1287*4882a593Smuzhiyun 		s[i*2] = knl_get_mc_route(i, reg) + '0';
1288*4882a593Smuzhiyun 		s[i*2+1] = '-';
1289*4882a593Smuzhiyun 	}
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	s[KNL_MAX_CHANNELS*2 - 1] = '\0';
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun #define KNL_EDC_ROUTE 0xb8
1295*4882a593Smuzhiyun #define KNL_MC_ROUTE 0xb4
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun /* Is this dram rule backed by regular DRAM in flat mode? */
1298*4882a593Smuzhiyun #define KNL_EDRAM(reg) GET_BITFIELD(reg, 29, 29)
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun /* Is this dram rule cached? */
1301*4882a593Smuzhiyun #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun /* Is this rule backed by edc ? */
1304*4882a593Smuzhiyun #define KNL_EDRAM_ONLY(reg) GET_BITFIELD(reg, 29, 29)
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun /* Is this rule backed by DRAM, cacheable in EDRAM? */
1307*4882a593Smuzhiyun #define KNL_CACHEABLE(reg) GET_BITFIELD(reg, 28, 28)
1308*4882a593Smuzhiyun 
1309*4882a593Smuzhiyun /* Is this rule mod3? */
1310*4882a593Smuzhiyun #define KNL_MOD3(reg) GET_BITFIELD(reg, 27, 27)
1311*4882a593Smuzhiyun 
1312*4882a593Smuzhiyun /*
1313*4882a593Smuzhiyun  * Figure out how big our RAM modules are.
1314*4882a593Smuzhiyun  *
1315*4882a593Smuzhiyun  * The DIMMMTR register in KNL doesn't tell us the size of the DIMMs, so we
1316*4882a593Smuzhiyun  * have to figure this out from the SAD rules, interleave lists, route tables,
1317*4882a593Smuzhiyun  * and TAD rules.
1318*4882a593Smuzhiyun  *
1319*4882a593Smuzhiyun  * SAD rules can have holes in them (e.g. the 3G-4G hole), so we have to
1320*4882a593Smuzhiyun  * inspect the TAD rules to figure out how large the SAD regions really are.
1321*4882a593Smuzhiyun  *
1322*4882a593Smuzhiyun  * When we know the real size of a SAD region and how many ways it's
1323*4882a593Smuzhiyun  * interleaved, we know the individual contribution of each channel to
1324*4882a593Smuzhiyun  * TAD is size/ways.
1325*4882a593Smuzhiyun  *
1326*4882a593Smuzhiyun  * Finally, we have to check whether each channel participates in each SAD
1327*4882a593Smuzhiyun  * region.
1328*4882a593Smuzhiyun  *
1329*4882a593Smuzhiyun  * Fortunately, KNL only supports one DIMM per channel, so once we know how
1330*4882a593Smuzhiyun  * much memory the channel uses, we know the DIMM is at least that large.
1331*4882a593Smuzhiyun  * (The BIOS might possibly choose not to map all available memory, in which
1332*4882a593Smuzhiyun  * case we will underreport the size of the DIMM.)
1333*4882a593Smuzhiyun  *
1334*4882a593Smuzhiyun  * In theory, we could try to determine the EDC sizes as well, but that would
1335*4882a593Smuzhiyun  * only work in flat mode, not in cache mode.
1336*4882a593Smuzhiyun  *
1337*4882a593Smuzhiyun  * @mc_sizes: Output sizes of channels (must have space for KNL_MAX_CHANNELS
1338*4882a593Smuzhiyun  *            elements)
1339*4882a593Smuzhiyun  */
knl_get_dimm_capacity(struct sbridge_pvt * pvt,u64 * mc_sizes)1340*4882a593Smuzhiyun static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1341*4882a593Smuzhiyun {
1342*4882a593Smuzhiyun 	u64 sad_base, sad_limit = 0;
1343*4882a593Smuzhiyun 	u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1344*4882a593Smuzhiyun 	int sad_rule = 0;
1345*4882a593Smuzhiyun 	int tad_rule = 0;
1346*4882a593Smuzhiyun 	int intrlv_ways, tad_ways;
1347*4882a593Smuzhiyun 	u32 first_pkg, pkg;
1348*4882a593Smuzhiyun 	int i;
1349*4882a593Smuzhiyun 	u64 sad_actual_size[2]; /* sad size accounting for holes, per mc */
1350*4882a593Smuzhiyun 	u32 dram_rule, interleave_reg;
1351*4882a593Smuzhiyun 	u32 mc_route_reg[KNL_MAX_CHAS];
1352*4882a593Smuzhiyun 	u32 edc_route_reg[KNL_MAX_CHAS];
1353*4882a593Smuzhiyun 	int edram_only;
1354*4882a593Smuzhiyun 	char edc_route_string[KNL_MAX_EDCS*2];
1355*4882a593Smuzhiyun 	char mc_route_string[KNL_MAX_CHANNELS*2];
1356*4882a593Smuzhiyun 	int cur_reg_start;
1357*4882a593Smuzhiyun 	int mc;
1358*4882a593Smuzhiyun 	int channel;
1359*4882a593Smuzhiyun 	int participants[KNL_MAX_CHANNELS];
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_CHANNELS; i++)
1362*4882a593Smuzhiyun 		mc_sizes[i] = 0;
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	/* Read the EDC route table in each CHA. */
1365*4882a593Smuzhiyun 	cur_reg_start = 0;
1366*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_CHAS; i++) {
1367*4882a593Smuzhiyun 		pci_read_config_dword(pvt->knl.pci_cha[i],
1368*4882a593Smuzhiyun 				KNL_EDC_ROUTE, &edc_route_reg[i]);
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 		if (i > 0 && edc_route_reg[i] != edc_route_reg[i-1]) {
1371*4882a593Smuzhiyun 			knl_show_edc_route(edc_route_reg[i-1],
1372*4882a593Smuzhiyun 					edc_route_string);
1373*4882a593Smuzhiyun 			if (cur_reg_start == i-1)
1374*4882a593Smuzhiyun 				edac_dbg(0, "edc route table for CHA %d: %s\n",
1375*4882a593Smuzhiyun 					cur_reg_start, edc_route_string);
1376*4882a593Smuzhiyun 			else
1377*4882a593Smuzhiyun 				edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1378*4882a593Smuzhiyun 					cur_reg_start, i-1, edc_route_string);
1379*4882a593Smuzhiyun 			cur_reg_start = i;
1380*4882a593Smuzhiyun 		}
1381*4882a593Smuzhiyun 	}
1382*4882a593Smuzhiyun 	knl_show_edc_route(edc_route_reg[i-1], edc_route_string);
1383*4882a593Smuzhiyun 	if (cur_reg_start == i-1)
1384*4882a593Smuzhiyun 		edac_dbg(0, "edc route table for CHA %d: %s\n",
1385*4882a593Smuzhiyun 			cur_reg_start, edc_route_string);
1386*4882a593Smuzhiyun 	else
1387*4882a593Smuzhiyun 		edac_dbg(0, "edc route table for CHA %d-%d: %s\n",
1388*4882a593Smuzhiyun 			cur_reg_start, i-1, edc_route_string);
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	/* Read the MC route table in each CHA. */
1391*4882a593Smuzhiyun 	cur_reg_start = 0;
1392*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_CHAS; i++) {
1393*4882a593Smuzhiyun 		pci_read_config_dword(pvt->knl.pci_cha[i],
1394*4882a593Smuzhiyun 			KNL_MC_ROUTE, &mc_route_reg[i]);
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 		if (i > 0 && mc_route_reg[i] != mc_route_reg[i-1]) {
1397*4882a593Smuzhiyun 			knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1398*4882a593Smuzhiyun 			if (cur_reg_start == i-1)
1399*4882a593Smuzhiyun 				edac_dbg(0, "mc route table for CHA %d: %s\n",
1400*4882a593Smuzhiyun 					cur_reg_start, mc_route_string);
1401*4882a593Smuzhiyun 			else
1402*4882a593Smuzhiyun 				edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1403*4882a593Smuzhiyun 					cur_reg_start, i-1, mc_route_string);
1404*4882a593Smuzhiyun 			cur_reg_start = i;
1405*4882a593Smuzhiyun 		}
1406*4882a593Smuzhiyun 	}
1407*4882a593Smuzhiyun 	knl_show_mc_route(mc_route_reg[i-1], mc_route_string);
1408*4882a593Smuzhiyun 	if (cur_reg_start == i-1)
1409*4882a593Smuzhiyun 		edac_dbg(0, "mc route table for CHA %d: %s\n",
1410*4882a593Smuzhiyun 			cur_reg_start, mc_route_string);
1411*4882a593Smuzhiyun 	else
1412*4882a593Smuzhiyun 		edac_dbg(0, "mc route table for CHA %d-%d: %s\n",
1413*4882a593Smuzhiyun 			cur_reg_start, i-1, mc_route_string);
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	/* Process DRAM rules */
1416*4882a593Smuzhiyun 	for (sad_rule = 0; sad_rule < pvt->info.max_sad; sad_rule++) {
1417*4882a593Smuzhiyun 		/* previous limit becomes the new base */
1418*4882a593Smuzhiyun 		sad_base = sad_limit;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_sad0,
1421*4882a593Smuzhiyun 			pvt->info.dram_rule[sad_rule], &dram_rule);
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 		if (!DRAM_RULE_ENABLE(dram_rule))
1424*4882a593Smuzhiyun 			break;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 		edram_only = KNL_EDRAM_ONLY(dram_rule);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 		sad_limit = pvt->info.sad_limit(dram_rule)+1;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_sad0,
1431*4882a593Smuzhiyun 			pvt->info.interleave_list[sad_rule], &interleave_reg);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 		/*
1434*4882a593Smuzhiyun 		 * Find out how many ways this dram rule is interleaved.
1435*4882a593Smuzhiyun 		 * We stop when we see the first channel again.
1436*4882a593Smuzhiyun 		 */
1437*4882a593Smuzhiyun 		first_pkg = sad_pkg(pvt->info.interleave_pkg,
1438*4882a593Smuzhiyun 						interleave_reg, 0);
1439*4882a593Smuzhiyun 		for (intrlv_ways = 1; intrlv_ways < 8; intrlv_ways++) {
1440*4882a593Smuzhiyun 			pkg = sad_pkg(pvt->info.interleave_pkg,
1441*4882a593Smuzhiyun 						interleave_reg, intrlv_ways);
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 			if ((pkg & 0x8) == 0) {
1444*4882a593Smuzhiyun 				/*
1445*4882a593Smuzhiyun 				 * 0 bit means memory is non-local,
1446*4882a593Smuzhiyun 				 * which KNL doesn't support
1447*4882a593Smuzhiyun 				 */
1448*4882a593Smuzhiyun 				edac_dbg(0, "Unexpected interleave target %d\n",
1449*4882a593Smuzhiyun 					pkg);
1450*4882a593Smuzhiyun 				return -1;
1451*4882a593Smuzhiyun 			}
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 			if (pkg == first_pkg)
1454*4882a593Smuzhiyun 				break;
1455*4882a593Smuzhiyun 		}
1456*4882a593Smuzhiyun 		if (KNL_MOD3(dram_rule))
1457*4882a593Smuzhiyun 			intrlv_ways *= 3;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 		edac_dbg(3, "dram rule %d (base 0x%llx, limit 0x%llx), %d way interleave%s\n",
1460*4882a593Smuzhiyun 			sad_rule,
1461*4882a593Smuzhiyun 			sad_base,
1462*4882a593Smuzhiyun 			sad_limit,
1463*4882a593Smuzhiyun 			intrlv_ways,
1464*4882a593Smuzhiyun 			edram_only ? ", EDRAM" : "");
1465*4882a593Smuzhiyun 
1466*4882a593Smuzhiyun 		/*
1467*4882a593Smuzhiyun 		 * Find out how big the SAD region really is by iterating
1468*4882a593Smuzhiyun 		 * over TAD tables (SAD regions may contain holes).
1469*4882a593Smuzhiyun 		 * Each memory controller might have a different TAD table, so
1470*4882a593Smuzhiyun 		 * we have to look at both.
1471*4882a593Smuzhiyun 		 *
1472*4882a593Smuzhiyun 		 * Livespace is the memory that's mapped in this TAD table,
1473*4882a593Smuzhiyun 		 * deadspace is the holes (this could be the MMIO hole, or it
1474*4882a593Smuzhiyun 		 * could be memory that's mapped by the other TAD table but
1475*4882a593Smuzhiyun 		 * not this one).
1476*4882a593Smuzhiyun 		 */
1477*4882a593Smuzhiyun 		for (mc = 0; mc < 2; mc++) {
1478*4882a593Smuzhiyun 			sad_actual_size[mc] = 0;
1479*4882a593Smuzhiyun 			tad_livespace = 0;
1480*4882a593Smuzhiyun 			for (tad_rule = 0;
1481*4882a593Smuzhiyun 					tad_rule < ARRAY_SIZE(
1482*4882a593Smuzhiyun 						knl_tad_dram_limit_lo);
1483*4882a593Smuzhiyun 					tad_rule++) {
1484*4882a593Smuzhiyun 				if (knl_get_tad(pvt,
1485*4882a593Smuzhiyun 						tad_rule,
1486*4882a593Smuzhiyun 						mc,
1487*4882a593Smuzhiyun 						&tad_deadspace,
1488*4882a593Smuzhiyun 						&tad_limit,
1489*4882a593Smuzhiyun 						&tad_ways))
1490*4882a593Smuzhiyun 					break;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 				tad_size = (tad_limit+1) -
1493*4882a593Smuzhiyun 					(tad_livespace + tad_deadspace);
1494*4882a593Smuzhiyun 				tad_livespace += tad_size;
1495*4882a593Smuzhiyun 				tad_base = (tad_limit+1) - tad_size;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 				if (tad_base < sad_base) {
1498*4882a593Smuzhiyun 					if (tad_limit > sad_base)
1499*4882a593Smuzhiyun 						edac_dbg(0, "TAD region overlaps lower SAD boundary -- TAD tables may be configured incorrectly.\n");
1500*4882a593Smuzhiyun 				} else if (tad_base < sad_limit) {
1501*4882a593Smuzhiyun 					if (tad_limit+1 > sad_limit) {
1502*4882a593Smuzhiyun 						edac_dbg(0, "TAD region overlaps upper SAD boundary -- TAD tables may be configured incorrectly.\n");
1503*4882a593Smuzhiyun 					} else {
1504*4882a593Smuzhiyun 						/* TAD region is completely inside SAD region */
1505*4882a593Smuzhiyun 						edac_dbg(3, "TAD region %d 0x%llx - 0x%llx (%lld bytes) table%d\n",
1506*4882a593Smuzhiyun 							tad_rule, tad_base,
1507*4882a593Smuzhiyun 							tad_limit, tad_size,
1508*4882a593Smuzhiyun 							mc);
1509*4882a593Smuzhiyun 						sad_actual_size[mc] += tad_size;
1510*4882a593Smuzhiyun 					}
1511*4882a593Smuzhiyun 				}
1512*4882a593Smuzhiyun 			}
1513*4882a593Smuzhiyun 		}
1514*4882a593Smuzhiyun 
1515*4882a593Smuzhiyun 		for (mc = 0; mc < 2; mc++) {
1516*4882a593Smuzhiyun 			edac_dbg(3, " total TAD DRAM footprint in table%d : 0x%llx (%lld bytes)\n",
1517*4882a593Smuzhiyun 				mc, sad_actual_size[mc], sad_actual_size[mc]);
1518*4882a593Smuzhiyun 		}
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 		/* Ignore EDRAM rule */
1521*4882a593Smuzhiyun 		if (edram_only)
1522*4882a593Smuzhiyun 			continue;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 		/* Figure out which channels participate in interleave. */
1525*4882a593Smuzhiyun 		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++)
1526*4882a593Smuzhiyun 			participants[channel] = 0;
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 		/* For each channel, does at least one CHA have
1529*4882a593Smuzhiyun 		 * this channel mapped to the given target?
1530*4882a593Smuzhiyun 		 */
1531*4882a593Smuzhiyun 		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1532*4882a593Smuzhiyun 			int target;
1533*4882a593Smuzhiyun 			int cha;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 			for (target = 0; target < KNL_MAX_CHANNELS; target++) {
1536*4882a593Smuzhiyun 				for (cha = 0; cha < KNL_MAX_CHAS; cha++) {
1537*4882a593Smuzhiyun 					if (knl_get_mc_route(target,
1538*4882a593Smuzhiyun 						mc_route_reg[cha]) == channel
1539*4882a593Smuzhiyun 						&& !participants[channel]) {
1540*4882a593Smuzhiyun 						participants[channel] = 1;
1541*4882a593Smuzhiyun 						break;
1542*4882a593Smuzhiyun 					}
1543*4882a593Smuzhiyun 				}
1544*4882a593Smuzhiyun 			}
1545*4882a593Smuzhiyun 		}
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) {
1548*4882a593Smuzhiyun 			mc = knl_channel_mc(channel);
1549*4882a593Smuzhiyun 			if (participants[channel]) {
1550*4882a593Smuzhiyun 				edac_dbg(4, "mc channel %d contributes %lld bytes via sad entry %d\n",
1551*4882a593Smuzhiyun 					channel,
1552*4882a593Smuzhiyun 					sad_actual_size[mc]/intrlv_ways,
1553*4882a593Smuzhiyun 					sad_rule);
1554*4882a593Smuzhiyun 				mc_sizes[channel] +=
1555*4882a593Smuzhiyun 					sad_actual_size[mc]/intrlv_ways;
1556*4882a593Smuzhiyun 			}
1557*4882a593Smuzhiyun 		}
1558*4882a593Smuzhiyun 	}
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	return 0;
1561*4882a593Smuzhiyun }
1562*4882a593Smuzhiyun 
get_source_id(struct mem_ctl_info * mci)1563*4882a593Smuzhiyun static void get_source_id(struct mem_ctl_info *mci)
1564*4882a593Smuzhiyun {
1565*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
1566*4882a593Smuzhiyun 	u32 reg;
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 	if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL ||
1569*4882a593Smuzhiyun 	    pvt->info.type == KNIGHTS_LANDING)
1570*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_sad1, SAD_TARGET, &reg);
1571*4882a593Smuzhiyun 	else
1572*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_br0, SAD_TARGET, &reg);
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 	if (pvt->info.type == KNIGHTS_LANDING)
1575*4882a593Smuzhiyun 		pvt->sbridge_dev->source_id = SOURCE_ID_KNL(reg);
1576*4882a593Smuzhiyun 	else
1577*4882a593Smuzhiyun 		pvt->sbridge_dev->source_id = SOURCE_ID(reg);
1578*4882a593Smuzhiyun }
1579*4882a593Smuzhiyun 
__populate_dimms(struct mem_ctl_info * mci,u64 knl_mc_sizes[KNL_MAX_CHANNELS],enum edac_type mode)1580*4882a593Smuzhiyun static int __populate_dimms(struct mem_ctl_info *mci,
1581*4882a593Smuzhiyun 			    u64 knl_mc_sizes[KNL_MAX_CHANNELS],
1582*4882a593Smuzhiyun 			    enum edac_type mode)
1583*4882a593Smuzhiyun {
1584*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
1585*4882a593Smuzhiyun 	int channels = pvt->info.type == KNIGHTS_LANDING ? KNL_MAX_CHANNELS
1586*4882a593Smuzhiyun 							 : NUM_CHANNELS;
1587*4882a593Smuzhiyun 	unsigned int i, j, banks, ranks, rows, cols, npages;
1588*4882a593Smuzhiyun 	struct dimm_info *dimm;
1589*4882a593Smuzhiyun 	enum mem_type mtype;
1590*4882a593Smuzhiyun 	u64 size;
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	mtype = pvt->info.get_memory_type(pvt);
1593*4882a593Smuzhiyun 	if (mtype == MEM_RDDR3 || mtype == MEM_RDDR4)
1594*4882a593Smuzhiyun 		edac_dbg(0, "Memory is registered\n");
1595*4882a593Smuzhiyun 	else if (mtype == MEM_UNKNOWN)
1596*4882a593Smuzhiyun 		edac_dbg(0, "Cannot determine memory type\n");
1597*4882a593Smuzhiyun 	else
1598*4882a593Smuzhiyun 		edac_dbg(0, "Memory is unregistered\n");
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	if (mtype == MEM_DDR4 || mtype == MEM_RDDR4)
1601*4882a593Smuzhiyun 		banks = 16;
1602*4882a593Smuzhiyun 	else
1603*4882a593Smuzhiyun 		banks = 8;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	for (i = 0; i < channels; i++) {
1606*4882a593Smuzhiyun 		u32 mtr;
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun 		int max_dimms_per_channel;
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun 		if (pvt->info.type == KNIGHTS_LANDING) {
1611*4882a593Smuzhiyun 			max_dimms_per_channel = 1;
1612*4882a593Smuzhiyun 			if (!pvt->knl.pci_channel[i])
1613*4882a593Smuzhiyun 				continue;
1614*4882a593Smuzhiyun 		} else {
1615*4882a593Smuzhiyun 			max_dimms_per_channel = ARRAY_SIZE(mtr_regs);
1616*4882a593Smuzhiyun 			if (!pvt->pci_tad[i])
1617*4882a593Smuzhiyun 				continue;
1618*4882a593Smuzhiyun 		}
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 		for (j = 0; j < max_dimms_per_channel; j++) {
1621*4882a593Smuzhiyun 			dimm = edac_get_dimm(mci, i, j, 0);
1622*4882a593Smuzhiyun 			if (pvt->info.type == KNIGHTS_LANDING) {
1623*4882a593Smuzhiyun 				pci_read_config_dword(pvt->knl.pci_channel[i],
1624*4882a593Smuzhiyun 					knl_mtr_reg, &mtr);
1625*4882a593Smuzhiyun 			} else {
1626*4882a593Smuzhiyun 				pci_read_config_dword(pvt->pci_tad[i],
1627*4882a593Smuzhiyun 					mtr_regs[j], &mtr);
1628*4882a593Smuzhiyun 			}
1629*4882a593Smuzhiyun 			edac_dbg(4, "Channel #%d  MTR%d = %x\n", i, j, mtr);
1630*4882a593Smuzhiyun 			if (IS_DIMM_PRESENT(mtr)) {
1631*4882a593Smuzhiyun 				if (!IS_ECC_ENABLED(pvt->info.mcmtr)) {
1632*4882a593Smuzhiyun 					sbridge_printk(KERN_ERR, "CPU SrcID #%d, Ha #%d, Channel #%d has DIMMs, but ECC is disabled\n",
1633*4882a593Smuzhiyun 						       pvt->sbridge_dev->source_id,
1634*4882a593Smuzhiyun 						       pvt->sbridge_dev->dom, i);
1635*4882a593Smuzhiyun 					return -ENODEV;
1636*4882a593Smuzhiyun 				}
1637*4882a593Smuzhiyun 				pvt->channel[i].dimms++;
1638*4882a593Smuzhiyun 
1639*4882a593Smuzhiyun 				ranks = numrank(pvt->info.type, mtr);
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 				if (pvt->info.type == KNIGHTS_LANDING) {
1642*4882a593Smuzhiyun 					/* For DDR4, this is fixed. */
1643*4882a593Smuzhiyun 					cols = 1 << 10;
1644*4882a593Smuzhiyun 					rows = knl_mc_sizes[i] /
1645*4882a593Smuzhiyun 						((u64) cols * ranks * banks * 8);
1646*4882a593Smuzhiyun 				} else {
1647*4882a593Smuzhiyun 					rows = numrow(mtr);
1648*4882a593Smuzhiyun 					cols = numcol(mtr);
1649*4882a593Smuzhiyun 				}
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 				size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
1652*4882a593Smuzhiyun 				npages = MiB_TO_PAGES(size);
1653*4882a593Smuzhiyun 
1654*4882a593Smuzhiyun 				edac_dbg(0, "mc#%d: ha %d channel %d, dimm %d, %lld MiB (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
1655*4882a593Smuzhiyun 					 pvt->sbridge_dev->mc, pvt->sbridge_dev->dom, i, j,
1656*4882a593Smuzhiyun 					 size, npages,
1657*4882a593Smuzhiyun 					 banks, ranks, rows, cols);
1658*4882a593Smuzhiyun 
1659*4882a593Smuzhiyun 				dimm->nr_pages = npages;
1660*4882a593Smuzhiyun 				dimm->grain = 32;
1661*4882a593Smuzhiyun 				dimm->dtype = pvt->info.get_width(pvt, mtr);
1662*4882a593Smuzhiyun 				dimm->mtype = mtype;
1663*4882a593Smuzhiyun 				dimm->edac_mode = mode;
1664*4882a593Smuzhiyun 				snprintf(dimm->label, sizeof(dimm->label),
1665*4882a593Smuzhiyun 						 "CPU_SrcID#%u_Ha#%u_Chan#%u_DIMM#%u",
1666*4882a593Smuzhiyun 						 pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom, i, j);
1667*4882a593Smuzhiyun 			}
1668*4882a593Smuzhiyun 		}
1669*4882a593Smuzhiyun 	}
1670*4882a593Smuzhiyun 
1671*4882a593Smuzhiyun 	return 0;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun 
get_dimm_config(struct mem_ctl_info * mci)1674*4882a593Smuzhiyun static int get_dimm_config(struct mem_ctl_info *mci)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
1677*4882a593Smuzhiyun 	u64 knl_mc_sizes[KNL_MAX_CHANNELS];
1678*4882a593Smuzhiyun 	enum edac_type mode;
1679*4882a593Smuzhiyun 	u32 reg;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	pvt->sbridge_dev->node_id = pvt->info.get_node_id(pvt);
1682*4882a593Smuzhiyun 	edac_dbg(0, "mc#%d: Node ID: %d, source ID: %d\n",
1683*4882a593Smuzhiyun 		 pvt->sbridge_dev->mc,
1684*4882a593Smuzhiyun 		 pvt->sbridge_dev->node_id,
1685*4882a593Smuzhiyun 		 pvt->sbridge_dev->source_id);
1686*4882a593Smuzhiyun 
1687*4882a593Smuzhiyun 	/* KNL doesn't support mirroring or lockstep,
1688*4882a593Smuzhiyun 	 * and is always closed page
1689*4882a593Smuzhiyun 	 */
1690*4882a593Smuzhiyun 	if (pvt->info.type == KNIGHTS_LANDING) {
1691*4882a593Smuzhiyun 		mode = EDAC_S4ECD4ED;
1692*4882a593Smuzhiyun 		pvt->mirror_mode = NON_MIRRORING;
1693*4882a593Smuzhiyun 		pvt->is_cur_addr_mirrored = false;
1694*4882a593Smuzhiyun 
1695*4882a593Smuzhiyun 		if (knl_get_dimm_capacity(pvt, knl_mc_sizes) != 0)
1696*4882a593Smuzhiyun 			return -1;
1697*4882a593Smuzhiyun 		if (pci_read_config_dword(pvt->pci_ta, KNL_MCMTR, &pvt->info.mcmtr)) {
1698*4882a593Smuzhiyun 			edac_dbg(0, "Failed to read KNL_MCMTR register\n");
1699*4882a593Smuzhiyun 			return -ENODEV;
1700*4882a593Smuzhiyun 		}
1701*4882a593Smuzhiyun 	} else {
1702*4882a593Smuzhiyun 		if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
1703*4882a593Smuzhiyun 			if (pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg)) {
1704*4882a593Smuzhiyun 				edac_dbg(0, "Failed to read HASWELL_HASYSDEFEATURE2 register\n");
1705*4882a593Smuzhiyun 				return -ENODEV;
1706*4882a593Smuzhiyun 			}
1707*4882a593Smuzhiyun 			pvt->is_chan_hash = GET_BITFIELD(reg, 21, 21);
1708*4882a593Smuzhiyun 			if (GET_BITFIELD(reg, 28, 28)) {
1709*4882a593Smuzhiyun 				pvt->mirror_mode = ADDR_RANGE_MIRRORING;
1710*4882a593Smuzhiyun 				edac_dbg(0, "Address range partial memory mirroring is enabled\n");
1711*4882a593Smuzhiyun 				goto next;
1712*4882a593Smuzhiyun 			}
1713*4882a593Smuzhiyun 		}
1714*4882a593Smuzhiyun 		if (pci_read_config_dword(pvt->pci_ras, RASENABLES, &reg)) {
1715*4882a593Smuzhiyun 			edac_dbg(0, "Failed to read RASENABLES register\n");
1716*4882a593Smuzhiyun 			return -ENODEV;
1717*4882a593Smuzhiyun 		}
1718*4882a593Smuzhiyun 		if (IS_MIRROR_ENABLED(reg)) {
1719*4882a593Smuzhiyun 			pvt->mirror_mode = FULL_MIRRORING;
1720*4882a593Smuzhiyun 			edac_dbg(0, "Full memory mirroring is enabled\n");
1721*4882a593Smuzhiyun 		} else {
1722*4882a593Smuzhiyun 			pvt->mirror_mode = NON_MIRRORING;
1723*4882a593Smuzhiyun 			edac_dbg(0, "Memory mirroring is disabled\n");
1724*4882a593Smuzhiyun 		}
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun next:
1727*4882a593Smuzhiyun 		if (pci_read_config_dword(pvt->pci_ta, MCMTR, &pvt->info.mcmtr)) {
1728*4882a593Smuzhiyun 			edac_dbg(0, "Failed to read MCMTR register\n");
1729*4882a593Smuzhiyun 			return -ENODEV;
1730*4882a593Smuzhiyun 		}
1731*4882a593Smuzhiyun 		if (IS_LOCKSTEP_ENABLED(pvt->info.mcmtr)) {
1732*4882a593Smuzhiyun 			edac_dbg(0, "Lockstep is enabled\n");
1733*4882a593Smuzhiyun 			mode = EDAC_S8ECD8ED;
1734*4882a593Smuzhiyun 			pvt->is_lockstep = true;
1735*4882a593Smuzhiyun 		} else {
1736*4882a593Smuzhiyun 			edac_dbg(0, "Lockstep is disabled\n");
1737*4882a593Smuzhiyun 			mode = EDAC_S4ECD4ED;
1738*4882a593Smuzhiyun 			pvt->is_lockstep = false;
1739*4882a593Smuzhiyun 		}
1740*4882a593Smuzhiyun 		if (IS_CLOSE_PG(pvt->info.mcmtr)) {
1741*4882a593Smuzhiyun 			edac_dbg(0, "address map is on closed page mode\n");
1742*4882a593Smuzhiyun 			pvt->is_close_pg = true;
1743*4882a593Smuzhiyun 		} else {
1744*4882a593Smuzhiyun 			edac_dbg(0, "address map is on open page mode\n");
1745*4882a593Smuzhiyun 			pvt->is_close_pg = false;
1746*4882a593Smuzhiyun 		}
1747*4882a593Smuzhiyun 	}
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	return __populate_dimms(mci, knl_mc_sizes, mode);
1750*4882a593Smuzhiyun }
1751*4882a593Smuzhiyun 
get_memory_layout(const struct mem_ctl_info * mci)1752*4882a593Smuzhiyun static void get_memory_layout(const struct mem_ctl_info *mci)
1753*4882a593Smuzhiyun {
1754*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
1755*4882a593Smuzhiyun 	int i, j, k, n_sads, n_tads, sad_interl;
1756*4882a593Smuzhiyun 	u32 reg;
1757*4882a593Smuzhiyun 	u64 limit, prv = 0;
1758*4882a593Smuzhiyun 	u64 tmp_mb;
1759*4882a593Smuzhiyun 	u32 gb, mb;
1760*4882a593Smuzhiyun 	u32 rir_way;
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	/*
1763*4882a593Smuzhiyun 	 * Step 1) Get TOLM/TOHM ranges
1764*4882a593Smuzhiyun 	 */
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	pvt->tolm = pvt->info.get_tolm(pvt);
1767*4882a593Smuzhiyun 	tmp_mb = (1 + pvt->tolm) >> 20;
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 	gb = div_u64_rem(tmp_mb, 1024, &mb);
1770*4882a593Smuzhiyun 	edac_dbg(0, "TOLM: %u.%03u GB (0x%016Lx)\n",
1771*4882a593Smuzhiyun 		gb, (mb*1000)/1024, (u64)pvt->tolm);
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	/* Address range is already 45:25 */
1774*4882a593Smuzhiyun 	pvt->tohm = pvt->info.get_tohm(pvt);
1775*4882a593Smuzhiyun 	tmp_mb = (1 + pvt->tohm) >> 20;
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 	gb = div_u64_rem(tmp_mb, 1024, &mb);
1778*4882a593Smuzhiyun 	edac_dbg(0, "TOHM: %u.%03u GB (0x%016Lx)\n",
1779*4882a593Smuzhiyun 		gb, (mb*1000)/1024, (u64)pvt->tohm);
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	/*
1782*4882a593Smuzhiyun 	 * Step 2) Get SAD range and SAD Interleave list
1783*4882a593Smuzhiyun 	 * TAD registers contain the interleave wayness. However, it
1784*4882a593Smuzhiyun 	 * seems simpler to just discover it indirectly, with the
1785*4882a593Smuzhiyun 	 * algorithm bellow.
1786*4882a593Smuzhiyun 	 */
1787*4882a593Smuzhiyun 	prv = 0;
1788*4882a593Smuzhiyun 	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1789*4882a593Smuzhiyun 		/* SAD_LIMIT Address range is 45:26 */
1790*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1791*4882a593Smuzhiyun 				      &reg);
1792*4882a593Smuzhiyun 		limit = pvt->info.sad_limit(reg);
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 		if (!DRAM_RULE_ENABLE(reg))
1795*4882a593Smuzhiyun 			continue;
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 		if (limit <= prv)
1798*4882a593Smuzhiyun 			break;
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		tmp_mb = (limit + 1) >> 20;
1801*4882a593Smuzhiyun 		gb = div_u64_rem(tmp_mb, 1024, &mb);
1802*4882a593Smuzhiyun 		edac_dbg(0, "SAD#%d %s up to %u.%03u GB (0x%016Lx) Interleave: %s reg=0x%08x\n",
1803*4882a593Smuzhiyun 			 n_sads,
1804*4882a593Smuzhiyun 			 show_dram_attr(pvt->info.dram_attr(reg)),
1805*4882a593Smuzhiyun 			 gb, (mb*1000)/1024,
1806*4882a593Smuzhiyun 			 ((u64)tmp_mb) << 20L,
1807*4882a593Smuzhiyun 			 get_intlv_mode_str(reg, pvt->info.type),
1808*4882a593Smuzhiyun 			 reg);
1809*4882a593Smuzhiyun 		prv = limit;
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1812*4882a593Smuzhiyun 				      &reg);
1813*4882a593Smuzhiyun 		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1814*4882a593Smuzhiyun 		for (j = 0; j < 8; j++) {
1815*4882a593Smuzhiyun 			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, j);
1816*4882a593Smuzhiyun 			if (j > 0 && sad_interl == pkg)
1817*4882a593Smuzhiyun 				break;
1818*4882a593Smuzhiyun 
1819*4882a593Smuzhiyun 			edac_dbg(0, "SAD#%d, interleave #%d: %d\n",
1820*4882a593Smuzhiyun 				 n_sads, j, pkg);
1821*4882a593Smuzhiyun 		}
1822*4882a593Smuzhiyun 	}
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun 	if (pvt->info.type == KNIGHTS_LANDING)
1825*4882a593Smuzhiyun 		return;
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	/*
1828*4882a593Smuzhiyun 	 * Step 3) Get TAD range
1829*4882a593Smuzhiyun 	 */
1830*4882a593Smuzhiyun 	prv = 0;
1831*4882a593Smuzhiyun 	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
1832*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_ha, tad_dram_rule[n_tads], &reg);
1833*4882a593Smuzhiyun 		limit = TAD_LIMIT(reg);
1834*4882a593Smuzhiyun 		if (limit <= prv)
1835*4882a593Smuzhiyun 			break;
1836*4882a593Smuzhiyun 		tmp_mb = (limit + 1) >> 20;
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 		gb = div_u64_rem(tmp_mb, 1024, &mb);
1839*4882a593Smuzhiyun 		edac_dbg(0, "TAD#%d: up to %u.%03u GB (0x%016Lx), socket interleave %d, memory interleave %d, TGT: %d, %d, %d, %d, reg=0x%08x\n",
1840*4882a593Smuzhiyun 			 n_tads, gb, (mb*1000)/1024,
1841*4882a593Smuzhiyun 			 ((u64)tmp_mb) << 20L,
1842*4882a593Smuzhiyun 			 (u32)(1 << TAD_SOCK(reg)),
1843*4882a593Smuzhiyun 			 (u32)TAD_CH(reg) + 1,
1844*4882a593Smuzhiyun 			 (u32)TAD_TGT0(reg),
1845*4882a593Smuzhiyun 			 (u32)TAD_TGT1(reg),
1846*4882a593Smuzhiyun 			 (u32)TAD_TGT2(reg),
1847*4882a593Smuzhiyun 			 (u32)TAD_TGT3(reg),
1848*4882a593Smuzhiyun 			 reg);
1849*4882a593Smuzhiyun 		prv = limit;
1850*4882a593Smuzhiyun 	}
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	/*
1853*4882a593Smuzhiyun 	 * Step 4) Get TAD offsets, per each channel
1854*4882a593Smuzhiyun 	 */
1855*4882a593Smuzhiyun 	for (i = 0; i < NUM_CHANNELS; i++) {
1856*4882a593Smuzhiyun 		if (!pvt->channel[i].dimms)
1857*4882a593Smuzhiyun 			continue;
1858*4882a593Smuzhiyun 		for (j = 0; j < n_tads; j++) {
1859*4882a593Smuzhiyun 			pci_read_config_dword(pvt->pci_tad[i],
1860*4882a593Smuzhiyun 					      tad_ch_nilv_offset[j],
1861*4882a593Smuzhiyun 					      &reg);
1862*4882a593Smuzhiyun 			tmp_mb = TAD_OFFSET(reg) >> 20;
1863*4882a593Smuzhiyun 			gb = div_u64_rem(tmp_mb, 1024, &mb);
1864*4882a593Smuzhiyun 			edac_dbg(0, "TAD CH#%d, offset #%d: %u.%03u GB (0x%016Lx), reg=0x%08x\n",
1865*4882a593Smuzhiyun 				 i, j,
1866*4882a593Smuzhiyun 				 gb, (mb*1000)/1024,
1867*4882a593Smuzhiyun 				 ((u64)tmp_mb) << 20L,
1868*4882a593Smuzhiyun 				 reg);
1869*4882a593Smuzhiyun 		}
1870*4882a593Smuzhiyun 	}
1871*4882a593Smuzhiyun 
1872*4882a593Smuzhiyun 	/*
1873*4882a593Smuzhiyun 	 * Step 6) Get RIR Wayness/Limit, per each channel
1874*4882a593Smuzhiyun 	 */
1875*4882a593Smuzhiyun 	for (i = 0; i < NUM_CHANNELS; i++) {
1876*4882a593Smuzhiyun 		if (!pvt->channel[i].dimms)
1877*4882a593Smuzhiyun 			continue;
1878*4882a593Smuzhiyun 		for (j = 0; j < MAX_RIR_RANGES; j++) {
1879*4882a593Smuzhiyun 			pci_read_config_dword(pvt->pci_tad[i],
1880*4882a593Smuzhiyun 					      rir_way_limit[j],
1881*4882a593Smuzhiyun 					      &reg);
1882*4882a593Smuzhiyun 
1883*4882a593Smuzhiyun 			if (!IS_RIR_VALID(reg))
1884*4882a593Smuzhiyun 				continue;
1885*4882a593Smuzhiyun 
1886*4882a593Smuzhiyun 			tmp_mb = pvt->info.rir_limit(reg) >> 20;
1887*4882a593Smuzhiyun 			rir_way = 1 << RIR_WAY(reg);
1888*4882a593Smuzhiyun 			gb = div_u64_rem(tmp_mb, 1024, &mb);
1889*4882a593Smuzhiyun 			edac_dbg(0, "CH#%d RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d, reg=0x%08x\n",
1890*4882a593Smuzhiyun 				 i, j,
1891*4882a593Smuzhiyun 				 gb, (mb*1000)/1024,
1892*4882a593Smuzhiyun 				 ((u64)tmp_mb) << 20L,
1893*4882a593Smuzhiyun 				 rir_way,
1894*4882a593Smuzhiyun 				 reg);
1895*4882a593Smuzhiyun 
1896*4882a593Smuzhiyun 			for (k = 0; k < rir_way; k++) {
1897*4882a593Smuzhiyun 				pci_read_config_dword(pvt->pci_tad[i],
1898*4882a593Smuzhiyun 						      rir_offset[j][k],
1899*4882a593Smuzhiyun 						      &reg);
1900*4882a593Smuzhiyun 				tmp_mb = RIR_OFFSET(pvt->info.type, reg) << 6;
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 				gb = div_u64_rem(tmp_mb, 1024, &mb);
1903*4882a593Smuzhiyun 				edac_dbg(0, "CH#%d RIR#%d INTL#%d, offset %u.%03u GB (0x%016Lx), tgt: %d, reg=0x%08x\n",
1904*4882a593Smuzhiyun 					 i, j, k,
1905*4882a593Smuzhiyun 					 gb, (mb*1000)/1024,
1906*4882a593Smuzhiyun 					 ((u64)tmp_mb) << 20L,
1907*4882a593Smuzhiyun 					 (u32)RIR_RNK_TGT(pvt->info.type, reg),
1908*4882a593Smuzhiyun 					 reg);
1909*4882a593Smuzhiyun 			}
1910*4882a593Smuzhiyun 		}
1911*4882a593Smuzhiyun 	}
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun 
get_mci_for_node_id(u8 node_id,u8 ha)1914*4882a593Smuzhiyun static struct mem_ctl_info *get_mci_for_node_id(u8 node_id, u8 ha)
1915*4882a593Smuzhiyun {
1916*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev;
1917*4882a593Smuzhiyun 
1918*4882a593Smuzhiyun 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
1919*4882a593Smuzhiyun 		if (sbridge_dev->node_id == node_id && sbridge_dev->dom == ha)
1920*4882a593Smuzhiyun 			return sbridge_dev->mci;
1921*4882a593Smuzhiyun 	}
1922*4882a593Smuzhiyun 	return NULL;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun 
get_memory_error_data(struct mem_ctl_info * mci,u64 addr,u8 * socket,u8 * ha,long * channel_mask,u8 * rank,char ** area_type,char * msg)1925*4882a593Smuzhiyun static int get_memory_error_data(struct mem_ctl_info *mci,
1926*4882a593Smuzhiyun 				 u64 addr,
1927*4882a593Smuzhiyun 				 u8 *socket, u8 *ha,
1928*4882a593Smuzhiyun 				 long *channel_mask,
1929*4882a593Smuzhiyun 				 u8 *rank,
1930*4882a593Smuzhiyun 				 char **area_type, char *msg)
1931*4882a593Smuzhiyun {
1932*4882a593Smuzhiyun 	struct mem_ctl_info	*new_mci;
1933*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
1934*4882a593Smuzhiyun 	struct pci_dev		*pci_ha;
1935*4882a593Smuzhiyun 	int			n_rir, n_sads, n_tads, sad_way, sck_xch;
1936*4882a593Smuzhiyun 	int			sad_interl, idx, base_ch;
1937*4882a593Smuzhiyun 	int			interleave_mode, shiftup = 0;
1938*4882a593Smuzhiyun 	unsigned int		sad_interleave[MAX_INTERLEAVE];
1939*4882a593Smuzhiyun 	u32			reg, dram_rule;
1940*4882a593Smuzhiyun 	u8			ch_way, sck_way, pkg, sad_ha = 0;
1941*4882a593Smuzhiyun 	u32			tad_offset;
1942*4882a593Smuzhiyun 	u32			rir_way;
1943*4882a593Smuzhiyun 	u32			mb, gb;
1944*4882a593Smuzhiyun 	u64			ch_addr, offset, limit = 0, prv = 0;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 
1947*4882a593Smuzhiyun 	/*
1948*4882a593Smuzhiyun 	 * Step 0) Check if the address is at special memory ranges
1949*4882a593Smuzhiyun 	 * The check bellow is probably enough to fill all cases where
1950*4882a593Smuzhiyun 	 * the error is not inside a memory, except for the legacy
1951*4882a593Smuzhiyun 	 * range (e. g. VGA addresses). It is unlikely, however, that the
1952*4882a593Smuzhiyun 	 * memory controller would generate an error on that range.
1953*4882a593Smuzhiyun 	 */
1954*4882a593Smuzhiyun 	if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
1955*4882a593Smuzhiyun 		sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
1956*4882a593Smuzhiyun 		return -EINVAL;
1957*4882a593Smuzhiyun 	}
1958*4882a593Smuzhiyun 	if (addr >= (u64)pvt->tohm) {
1959*4882a593Smuzhiyun 		sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
1960*4882a593Smuzhiyun 		return -EINVAL;
1961*4882a593Smuzhiyun 	}
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	/*
1964*4882a593Smuzhiyun 	 * Step 1) Get socket
1965*4882a593Smuzhiyun 	 */
1966*4882a593Smuzhiyun 	for (n_sads = 0; n_sads < pvt->info.max_sad; n_sads++) {
1967*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_sad0, pvt->info.dram_rule[n_sads],
1968*4882a593Smuzhiyun 				      &reg);
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 		if (!DRAM_RULE_ENABLE(reg))
1971*4882a593Smuzhiyun 			continue;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 		limit = pvt->info.sad_limit(reg);
1974*4882a593Smuzhiyun 		if (limit <= prv) {
1975*4882a593Smuzhiyun 			sprintf(msg, "Can't discover the memory socket");
1976*4882a593Smuzhiyun 			return -EINVAL;
1977*4882a593Smuzhiyun 		}
1978*4882a593Smuzhiyun 		if  (addr <= limit)
1979*4882a593Smuzhiyun 			break;
1980*4882a593Smuzhiyun 		prv = limit;
1981*4882a593Smuzhiyun 	}
1982*4882a593Smuzhiyun 	if (n_sads == pvt->info.max_sad) {
1983*4882a593Smuzhiyun 		sprintf(msg, "Can't discover the memory socket");
1984*4882a593Smuzhiyun 		return -EINVAL;
1985*4882a593Smuzhiyun 	}
1986*4882a593Smuzhiyun 	dram_rule = reg;
1987*4882a593Smuzhiyun 	*area_type = show_dram_attr(pvt->info.dram_attr(dram_rule));
1988*4882a593Smuzhiyun 	interleave_mode = pvt->info.interleave_mode(dram_rule);
1989*4882a593Smuzhiyun 
1990*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_sad0, pvt->info.interleave_list[n_sads],
1991*4882a593Smuzhiyun 			      &reg);
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	if (pvt->info.type == SANDY_BRIDGE) {
1994*4882a593Smuzhiyun 		sad_interl = sad_pkg(pvt->info.interleave_pkg, reg, 0);
1995*4882a593Smuzhiyun 		for (sad_way = 0; sad_way < 8; sad_way++) {
1996*4882a593Smuzhiyun 			u32 pkg = sad_pkg(pvt->info.interleave_pkg, reg, sad_way);
1997*4882a593Smuzhiyun 			if (sad_way > 0 && sad_interl == pkg)
1998*4882a593Smuzhiyun 				break;
1999*4882a593Smuzhiyun 			sad_interleave[sad_way] = pkg;
2000*4882a593Smuzhiyun 			edac_dbg(0, "SAD interleave #%d: %d\n",
2001*4882a593Smuzhiyun 				 sad_way, sad_interleave[sad_way]);
2002*4882a593Smuzhiyun 		}
2003*4882a593Smuzhiyun 		edac_dbg(0, "mc#%d: Error detected on SAD#%d: address 0x%016Lx < 0x%016Lx, Interleave [%d:6]%s\n",
2004*4882a593Smuzhiyun 			 pvt->sbridge_dev->mc,
2005*4882a593Smuzhiyun 			 n_sads,
2006*4882a593Smuzhiyun 			 addr,
2007*4882a593Smuzhiyun 			 limit,
2008*4882a593Smuzhiyun 			 sad_way + 7,
2009*4882a593Smuzhiyun 			 !interleave_mode ? "" : "XOR[18:16]");
2010*4882a593Smuzhiyun 		if (interleave_mode)
2011*4882a593Smuzhiyun 			idx = ((addr >> 6) ^ (addr >> 16)) & 7;
2012*4882a593Smuzhiyun 		else
2013*4882a593Smuzhiyun 			idx = (addr >> 6) & 7;
2014*4882a593Smuzhiyun 		switch (sad_way) {
2015*4882a593Smuzhiyun 		case 1:
2016*4882a593Smuzhiyun 			idx = 0;
2017*4882a593Smuzhiyun 			break;
2018*4882a593Smuzhiyun 		case 2:
2019*4882a593Smuzhiyun 			idx = idx & 1;
2020*4882a593Smuzhiyun 			break;
2021*4882a593Smuzhiyun 		case 4:
2022*4882a593Smuzhiyun 			idx = idx & 3;
2023*4882a593Smuzhiyun 			break;
2024*4882a593Smuzhiyun 		case 8:
2025*4882a593Smuzhiyun 			break;
2026*4882a593Smuzhiyun 		default:
2027*4882a593Smuzhiyun 			sprintf(msg, "Can't discover socket interleave");
2028*4882a593Smuzhiyun 			return -EINVAL;
2029*4882a593Smuzhiyun 		}
2030*4882a593Smuzhiyun 		*socket = sad_interleave[idx];
2031*4882a593Smuzhiyun 		edac_dbg(0, "SAD interleave index: %d (wayness %d) = CPU socket %d\n",
2032*4882a593Smuzhiyun 			 idx, sad_way, *socket);
2033*4882a593Smuzhiyun 	} else if (pvt->info.type == HASWELL || pvt->info.type == BROADWELL) {
2034*4882a593Smuzhiyun 		int bits, a7mode = A7MODE(dram_rule);
2035*4882a593Smuzhiyun 
2036*4882a593Smuzhiyun 		if (a7mode) {
2037*4882a593Smuzhiyun 			/* A7 mode swaps P9 with P6 */
2038*4882a593Smuzhiyun 			bits = GET_BITFIELD(addr, 7, 8) << 1;
2039*4882a593Smuzhiyun 			bits |= GET_BITFIELD(addr, 9, 9);
2040*4882a593Smuzhiyun 		} else
2041*4882a593Smuzhiyun 			bits = GET_BITFIELD(addr, 6, 8);
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 		if (interleave_mode == 0) {
2044*4882a593Smuzhiyun 			/* interleave mode will XOR {8,7,6} with {18,17,16} */
2045*4882a593Smuzhiyun 			idx = GET_BITFIELD(addr, 16, 18);
2046*4882a593Smuzhiyun 			idx ^= bits;
2047*4882a593Smuzhiyun 		} else
2048*4882a593Smuzhiyun 			idx = bits;
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2051*4882a593Smuzhiyun 		*socket = sad_pkg_socket(pkg);
2052*4882a593Smuzhiyun 		sad_ha = sad_pkg_ha(pkg);
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 		if (a7mode) {
2055*4882a593Smuzhiyun 			/* MCChanShiftUpEnable */
2056*4882a593Smuzhiyun 			pci_read_config_dword(pvt->pci_ha, HASWELL_HASYSDEFEATURE2, &reg);
2057*4882a593Smuzhiyun 			shiftup = GET_BITFIELD(reg, 22, 22);
2058*4882a593Smuzhiyun 		}
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %i, shiftup: %i\n",
2061*4882a593Smuzhiyun 			 idx, *socket, sad_ha, shiftup);
2062*4882a593Smuzhiyun 	} else {
2063*4882a593Smuzhiyun 		/* Ivy Bridge's SAD mode doesn't support XOR interleave mode */
2064*4882a593Smuzhiyun 		idx = (addr >> 6) & 7;
2065*4882a593Smuzhiyun 		pkg = sad_pkg(pvt->info.interleave_pkg, reg, idx);
2066*4882a593Smuzhiyun 		*socket = sad_pkg_socket(pkg);
2067*4882a593Smuzhiyun 		sad_ha = sad_pkg_ha(pkg);
2068*4882a593Smuzhiyun 		edac_dbg(0, "SAD interleave package: %d = CPU socket %d, HA %d\n",
2069*4882a593Smuzhiyun 			 idx, *socket, sad_ha);
2070*4882a593Smuzhiyun 	}
2071*4882a593Smuzhiyun 
2072*4882a593Smuzhiyun 	*ha = sad_ha;
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	/*
2075*4882a593Smuzhiyun 	 * Move to the proper node structure, in order to access the
2076*4882a593Smuzhiyun 	 * right PCI registers
2077*4882a593Smuzhiyun 	 */
2078*4882a593Smuzhiyun 	new_mci = get_mci_for_node_id(*socket, sad_ha);
2079*4882a593Smuzhiyun 	if (!new_mci) {
2080*4882a593Smuzhiyun 		sprintf(msg, "Struct for socket #%u wasn't initialized",
2081*4882a593Smuzhiyun 			*socket);
2082*4882a593Smuzhiyun 		return -EINVAL;
2083*4882a593Smuzhiyun 	}
2084*4882a593Smuzhiyun 	mci = new_mci;
2085*4882a593Smuzhiyun 	pvt = mci->pvt_info;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 	/*
2088*4882a593Smuzhiyun 	 * Step 2) Get memory channel
2089*4882a593Smuzhiyun 	 */
2090*4882a593Smuzhiyun 	prv = 0;
2091*4882a593Smuzhiyun 	pci_ha = pvt->pci_ha;
2092*4882a593Smuzhiyun 	for (n_tads = 0; n_tads < MAX_TAD; n_tads++) {
2093*4882a593Smuzhiyun 		pci_read_config_dword(pci_ha, tad_dram_rule[n_tads], &reg);
2094*4882a593Smuzhiyun 		limit = TAD_LIMIT(reg);
2095*4882a593Smuzhiyun 		if (limit <= prv) {
2096*4882a593Smuzhiyun 			sprintf(msg, "Can't discover the memory channel");
2097*4882a593Smuzhiyun 			return -EINVAL;
2098*4882a593Smuzhiyun 		}
2099*4882a593Smuzhiyun 		if  (addr <= limit)
2100*4882a593Smuzhiyun 			break;
2101*4882a593Smuzhiyun 		prv = limit;
2102*4882a593Smuzhiyun 	}
2103*4882a593Smuzhiyun 	if (n_tads == MAX_TAD) {
2104*4882a593Smuzhiyun 		sprintf(msg, "Can't discover the memory channel");
2105*4882a593Smuzhiyun 		return -EINVAL;
2106*4882a593Smuzhiyun 	}
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 	ch_way = TAD_CH(reg) + 1;
2109*4882a593Smuzhiyun 	sck_way = TAD_SOCK(reg);
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	if (ch_way == 3)
2112*4882a593Smuzhiyun 		idx = addr >> 6;
2113*4882a593Smuzhiyun 	else {
2114*4882a593Smuzhiyun 		idx = (addr >> (6 + sck_way + shiftup)) & 0x3;
2115*4882a593Smuzhiyun 		if (pvt->is_chan_hash)
2116*4882a593Smuzhiyun 			idx = haswell_chan_hash(idx, addr);
2117*4882a593Smuzhiyun 	}
2118*4882a593Smuzhiyun 	idx = idx % ch_way;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	/*
2121*4882a593Smuzhiyun 	 * FIXME: Shouldn't we use CHN_IDX_OFFSET() here, when ch_way == 3 ???
2122*4882a593Smuzhiyun 	 */
2123*4882a593Smuzhiyun 	switch (idx) {
2124*4882a593Smuzhiyun 	case 0:
2125*4882a593Smuzhiyun 		base_ch = TAD_TGT0(reg);
2126*4882a593Smuzhiyun 		break;
2127*4882a593Smuzhiyun 	case 1:
2128*4882a593Smuzhiyun 		base_ch = TAD_TGT1(reg);
2129*4882a593Smuzhiyun 		break;
2130*4882a593Smuzhiyun 	case 2:
2131*4882a593Smuzhiyun 		base_ch = TAD_TGT2(reg);
2132*4882a593Smuzhiyun 		break;
2133*4882a593Smuzhiyun 	case 3:
2134*4882a593Smuzhiyun 		base_ch = TAD_TGT3(reg);
2135*4882a593Smuzhiyun 		break;
2136*4882a593Smuzhiyun 	default:
2137*4882a593Smuzhiyun 		sprintf(msg, "Can't discover the TAD target");
2138*4882a593Smuzhiyun 		return -EINVAL;
2139*4882a593Smuzhiyun 	}
2140*4882a593Smuzhiyun 	*channel_mask = 1 << base_ch;
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_tad[base_ch], tad_ch_nilv_offset[n_tads], &tad_offset);
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 	if (pvt->mirror_mode == FULL_MIRRORING ||
2145*4882a593Smuzhiyun 	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && n_tads == 0)) {
2146*4882a593Smuzhiyun 		*channel_mask |= 1 << ((base_ch + 2) % 4);
2147*4882a593Smuzhiyun 		switch(ch_way) {
2148*4882a593Smuzhiyun 		case 2:
2149*4882a593Smuzhiyun 		case 4:
2150*4882a593Smuzhiyun 			sck_xch = (1 << sck_way) * (ch_way >> 1);
2151*4882a593Smuzhiyun 			break;
2152*4882a593Smuzhiyun 		default:
2153*4882a593Smuzhiyun 			sprintf(msg, "Invalid mirror set. Can't decode addr");
2154*4882a593Smuzhiyun 			return -EINVAL;
2155*4882a593Smuzhiyun 		}
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun 		pvt->is_cur_addr_mirrored = true;
2158*4882a593Smuzhiyun 	} else {
2159*4882a593Smuzhiyun 		sck_xch = (1 << sck_way) * ch_way;
2160*4882a593Smuzhiyun 		pvt->is_cur_addr_mirrored = false;
2161*4882a593Smuzhiyun 	}
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	if (pvt->is_lockstep)
2164*4882a593Smuzhiyun 		*channel_mask |= 1 << ((base_ch + 1) % 4);
2165*4882a593Smuzhiyun 
2166*4882a593Smuzhiyun 	offset = TAD_OFFSET(tad_offset);
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 	edac_dbg(0, "TAD#%d: address 0x%016Lx < 0x%016Lx, socket interleave %d, channel interleave %d (offset 0x%08Lx), index %d, base ch: %d, ch mask: 0x%02lx\n",
2169*4882a593Smuzhiyun 		 n_tads,
2170*4882a593Smuzhiyun 		 addr,
2171*4882a593Smuzhiyun 		 limit,
2172*4882a593Smuzhiyun 		 sck_way,
2173*4882a593Smuzhiyun 		 ch_way,
2174*4882a593Smuzhiyun 		 offset,
2175*4882a593Smuzhiyun 		 idx,
2176*4882a593Smuzhiyun 		 base_ch,
2177*4882a593Smuzhiyun 		 *channel_mask);
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	/* Calculate channel address */
2180*4882a593Smuzhiyun 	/* Remove the TAD offset */
2181*4882a593Smuzhiyun 
2182*4882a593Smuzhiyun 	if (offset > addr) {
2183*4882a593Smuzhiyun 		sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
2184*4882a593Smuzhiyun 			offset, addr);
2185*4882a593Smuzhiyun 		return -EINVAL;
2186*4882a593Smuzhiyun 	}
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun 	ch_addr = addr - offset;
2189*4882a593Smuzhiyun 	ch_addr >>= (6 + shiftup);
2190*4882a593Smuzhiyun 	ch_addr /= sck_xch;
2191*4882a593Smuzhiyun 	ch_addr <<= (6 + shiftup);
2192*4882a593Smuzhiyun 	ch_addr |= addr & ((1 << (6 + shiftup)) - 1);
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	/*
2195*4882a593Smuzhiyun 	 * Step 3) Decode rank
2196*4882a593Smuzhiyun 	 */
2197*4882a593Smuzhiyun 	for (n_rir = 0; n_rir < MAX_RIR_RANGES; n_rir++) {
2198*4882a593Smuzhiyun 		pci_read_config_dword(pvt->pci_tad[base_ch], rir_way_limit[n_rir], &reg);
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 		if (!IS_RIR_VALID(reg))
2201*4882a593Smuzhiyun 			continue;
2202*4882a593Smuzhiyun 
2203*4882a593Smuzhiyun 		limit = pvt->info.rir_limit(reg);
2204*4882a593Smuzhiyun 		gb = div_u64_rem(limit >> 20, 1024, &mb);
2205*4882a593Smuzhiyun 		edac_dbg(0, "RIR#%d, limit: %u.%03u GB (0x%016Lx), way: %d\n",
2206*4882a593Smuzhiyun 			 n_rir,
2207*4882a593Smuzhiyun 			 gb, (mb*1000)/1024,
2208*4882a593Smuzhiyun 			 limit,
2209*4882a593Smuzhiyun 			 1 << RIR_WAY(reg));
2210*4882a593Smuzhiyun 		if  (ch_addr <= limit)
2211*4882a593Smuzhiyun 			break;
2212*4882a593Smuzhiyun 	}
2213*4882a593Smuzhiyun 	if (n_rir == MAX_RIR_RANGES) {
2214*4882a593Smuzhiyun 		sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
2215*4882a593Smuzhiyun 			ch_addr);
2216*4882a593Smuzhiyun 		return -EINVAL;
2217*4882a593Smuzhiyun 	}
2218*4882a593Smuzhiyun 	rir_way = RIR_WAY(reg);
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	if (pvt->is_close_pg)
2221*4882a593Smuzhiyun 		idx = (ch_addr >> 6);
2222*4882a593Smuzhiyun 	else
2223*4882a593Smuzhiyun 		idx = (ch_addr >> 13);	/* FIXME: Datasheet says to shift by 15 */
2224*4882a593Smuzhiyun 	idx %= 1 << rir_way;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 	pci_read_config_dword(pvt->pci_tad[base_ch], rir_offset[n_rir][idx], &reg);
2227*4882a593Smuzhiyun 	*rank = RIR_RNK_TGT(pvt->info.type, reg);
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	edac_dbg(0, "RIR#%d: channel address 0x%08Lx < 0x%08Lx, RIR interleave %d, index %d\n",
2230*4882a593Smuzhiyun 		 n_rir,
2231*4882a593Smuzhiyun 		 ch_addr,
2232*4882a593Smuzhiyun 		 limit,
2233*4882a593Smuzhiyun 		 rir_way,
2234*4882a593Smuzhiyun 		 idx);
2235*4882a593Smuzhiyun 
2236*4882a593Smuzhiyun 	return 0;
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun 
get_memory_error_data_from_mce(struct mem_ctl_info * mci,const struct mce * m,u8 * socket,u8 * ha,long * channel_mask,char * msg)2239*4882a593Smuzhiyun static int get_memory_error_data_from_mce(struct mem_ctl_info *mci,
2240*4882a593Smuzhiyun 					  const struct mce *m, u8 *socket,
2241*4882a593Smuzhiyun 					  u8 *ha, long *channel_mask,
2242*4882a593Smuzhiyun 					  char *msg)
2243*4882a593Smuzhiyun {
2244*4882a593Smuzhiyun 	u32 reg, channel = GET_BITFIELD(m->status, 0, 3);
2245*4882a593Smuzhiyun 	struct mem_ctl_info *new_mci;
2246*4882a593Smuzhiyun 	struct sbridge_pvt *pvt;
2247*4882a593Smuzhiyun 	struct pci_dev *pci_ha;
2248*4882a593Smuzhiyun 	bool tad0;
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	if (channel >= NUM_CHANNELS) {
2251*4882a593Smuzhiyun 		sprintf(msg, "Invalid channel 0x%x", channel);
2252*4882a593Smuzhiyun 		return -EINVAL;
2253*4882a593Smuzhiyun 	}
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 	pvt = mci->pvt_info;
2256*4882a593Smuzhiyun 	if (!pvt->info.get_ha) {
2257*4882a593Smuzhiyun 		sprintf(msg, "No get_ha()");
2258*4882a593Smuzhiyun 		return -EINVAL;
2259*4882a593Smuzhiyun 	}
2260*4882a593Smuzhiyun 	*ha = pvt->info.get_ha(m->bank);
2261*4882a593Smuzhiyun 	if (*ha != 0 && *ha != 1) {
2262*4882a593Smuzhiyun 		sprintf(msg, "Impossible bank %d", m->bank);
2263*4882a593Smuzhiyun 		return -EINVAL;
2264*4882a593Smuzhiyun 	}
2265*4882a593Smuzhiyun 
2266*4882a593Smuzhiyun 	*socket = m->socketid;
2267*4882a593Smuzhiyun 	new_mci = get_mci_for_node_id(*socket, *ha);
2268*4882a593Smuzhiyun 	if (!new_mci) {
2269*4882a593Smuzhiyun 		strcpy(msg, "mci socket got corrupted!");
2270*4882a593Smuzhiyun 		return -EINVAL;
2271*4882a593Smuzhiyun 	}
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	pvt = new_mci->pvt_info;
2274*4882a593Smuzhiyun 	pci_ha = pvt->pci_ha;
2275*4882a593Smuzhiyun 	pci_read_config_dword(pci_ha, tad_dram_rule[0], &reg);
2276*4882a593Smuzhiyun 	tad0 = m->addr <= TAD_LIMIT(reg);
2277*4882a593Smuzhiyun 
2278*4882a593Smuzhiyun 	*channel_mask = 1 << channel;
2279*4882a593Smuzhiyun 	if (pvt->mirror_mode == FULL_MIRRORING ||
2280*4882a593Smuzhiyun 	    (pvt->mirror_mode == ADDR_RANGE_MIRRORING && tad0)) {
2281*4882a593Smuzhiyun 		*channel_mask |= 1 << ((channel + 2) % 4);
2282*4882a593Smuzhiyun 		pvt->is_cur_addr_mirrored = true;
2283*4882a593Smuzhiyun 	} else {
2284*4882a593Smuzhiyun 		pvt->is_cur_addr_mirrored = false;
2285*4882a593Smuzhiyun 	}
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if (pvt->is_lockstep)
2288*4882a593Smuzhiyun 		*channel_mask |= 1 << ((channel + 1) % 4);
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	return 0;
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun /****************************************************************************
2294*4882a593Smuzhiyun 	Device initialization routines: put/get, init/exit
2295*4882a593Smuzhiyun  ****************************************************************************/
2296*4882a593Smuzhiyun 
2297*4882a593Smuzhiyun /*
2298*4882a593Smuzhiyun  *	sbridge_put_all_devices	'put' all the devices that we have
2299*4882a593Smuzhiyun  *				reserved via 'get'
2300*4882a593Smuzhiyun  */
sbridge_put_devices(struct sbridge_dev * sbridge_dev)2301*4882a593Smuzhiyun static void sbridge_put_devices(struct sbridge_dev *sbridge_dev)
2302*4882a593Smuzhiyun {
2303*4882a593Smuzhiyun 	int i;
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun 	edac_dbg(0, "\n");
2306*4882a593Smuzhiyun 	for (i = 0; i < sbridge_dev->n_devs; i++) {
2307*4882a593Smuzhiyun 		struct pci_dev *pdev = sbridge_dev->pdev[i];
2308*4882a593Smuzhiyun 		if (!pdev)
2309*4882a593Smuzhiyun 			continue;
2310*4882a593Smuzhiyun 		edac_dbg(0, "Removing dev %02x:%02x.%d\n",
2311*4882a593Smuzhiyun 			 pdev->bus->number,
2312*4882a593Smuzhiyun 			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));
2313*4882a593Smuzhiyun 		pci_dev_put(pdev);
2314*4882a593Smuzhiyun 	}
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun 
sbridge_put_all_devices(void)2317*4882a593Smuzhiyun static void sbridge_put_all_devices(void)
2318*4882a593Smuzhiyun {
2319*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev, *tmp;
2320*4882a593Smuzhiyun 
2321*4882a593Smuzhiyun 	list_for_each_entry_safe(sbridge_dev, tmp, &sbridge_edac_list, list) {
2322*4882a593Smuzhiyun 		sbridge_put_devices(sbridge_dev);
2323*4882a593Smuzhiyun 		free_sbridge_dev(sbridge_dev);
2324*4882a593Smuzhiyun 	}
2325*4882a593Smuzhiyun }
2326*4882a593Smuzhiyun 
sbridge_get_onedevice(struct pci_dev ** prev,u8 * num_mc,const struct pci_id_table * table,const unsigned devno,const int multi_bus)2327*4882a593Smuzhiyun static int sbridge_get_onedevice(struct pci_dev **prev,
2328*4882a593Smuzhiyun 				 u8 *num_mc,
2329*4882a593Smuzhiyun 				 const struct pci_id_table *table,
2330*4882a593Smuzhiyun 				 const unsigned devno,
2331*4882a593Smuzhiyun 				 const int multi_bus)
2332*4882a593Smuzhiyun {
2333*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev = NULL;
2334*4882a593Smuzhiyun 	const struct pci_id_descr *dev_descr = &table->descr[devno];
2335*4882a593Smuzhiyun 	struct pci_dev *pdev = NULL;
2336*4882a593Smuzhiyun 	int seg = 0;
2337*4882a593Smuzhiyun 	u8 bus = 0;
2338*4882a593Smuzhiyun 	int i = 0;
2339*4882a593Smuzhiyun 
2340*4882a593Smuzhiyun 	sbridge_printk(KERN_DEBUG,
2341*4882a593Smuzhiyun 		"Seeking for: PCI ID %04x:%04x\n",
2342*4882a593Smuzhiyun 		PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
2345*4882a593Smuzhiyun 			      dev_descr->dev_id, *prev);
2346*4882a593Smuzhiyun 
2347*4882a593Smuzhiyun 	if (!pdev) {
2348*4882a593Smuzhiyun 		if (*prev) {
2349*4882a593Smuzhiyun 			*prev = pdev;
2350*4882a593Smuzhiyun 			return 0;
2351*4882a593Smuzhiyun 		}
2352*4882a593Smuzhiyun 
2353*4882a593Smuzhiyun 		if (dev_descr->optional)
2354*4882a593Smuzhiyun 			return 0;
2355*4882a593Smuzhiyun 
2356*4882a593Smuzhiyun 		/* if the HA wasn't found */
2357*4882a593Smuzhiyun 		if (devno == 0)
2358*4882a593Smuzhiyun 			return -ENODEV;
2359*4882a593Smuzhiyun 
2360*4882a593Smuzhiyun 		sbridge_printk(KERN_INFO,
2361*4882a593Smuzhiyun 			"Device not found: %04x:%04x\n",
2362*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 		/* End of list, leave */
2365*4882a593Smuzhiyun 		return -ENODEV;
2366*4882a593Smuzhiyun 	}
2367*4882a593Smuzhiyun 	seg = pci_domain_nr(pdev->bus);
2368*4882a593Smuzhiyun 	bus = pdev->bus->number;
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun next_imc:
2371*4882a593Smuzhiyun 	sbridge_dev = get_sbridge_dev(seg, bus, dev_descr->dom,
2372*4882a593Smuzhiyun 				      multi_bus, sbridge_dev);
2373*4882a593Smuzhiyun 	if (!sbridge_dev) {
2374*4882a593Smuzhiyun 		/* If the HA1 wasn't found, don't create EDAC second memory controller */
2375*4882a593Smuzhiyun 		if (dev_descr->dom == IMC1 && devno != 1) {
2376*4882a593Smuzhiyun 			edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n",
2377*4882a593Smuzhiyun 				 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2378*4882a593Smuzhiyun 			pci_dev_put(pdev);
2379*4882a593Smuzhiyun 			return 0;
2380*4882a593Smuzhiyun 		}
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 		if (dev_descr->dom == SOCK)
2383*4882a593Smuzhiyun 			goto out_imc;
2384*4882a593Smuzhiyun 
2385*4882a593Smuzhiyun 		sbridge_dev = alloc_sbridge_dev(seg, bus, dev_descr->dom, table);
2386*4882a593Smuzhiyun 		if (!sbridge_dev) {
2387*4882a593Smuzhiyun 			pci_dev_put(pdev);
2388*4882a593Smuzhiyun 			return -ENOMEM;
2389*4882a593Smuzhiyun 		}
2390*4882a593Smuzhiyun 		(*num_mc)++;
2391*4882a593Smuzhiyun 	}
2392*4882a593Smuzhiyun 
2393*4882a593Smuzhiyun 	if (sbridge_dev->pdev[sbridge_dev->i_devs]) {
2394*4882a593Smuzhiyun 		sbridge_printk(KERN_ERR,
2395*4882a593Smuzhiyun 			"Duplicated device for %04x:%04x\n",
2396*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2397*4882a593Smuzhiyun 		pci_dev_put(pdev);
2398*4882a593Smuzhiyun 		return -ENODEV;
2399*4882a593Smuzhiyun 	}
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	sbridge_dev->pdev[sbridge_dev->i_devs++] = pdev;
2402*4882a593Smuzhiyun 
2403*4882a593Smuzhiyun 	/* pdev belongs to more than one IMC, do extra gets */
2404*4882a593Smuzhiyun 	if (++i > 1)
2405*4882a593Smuzhiyun 		pci_dev_get(pdev);
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 	if (dev_descr->dom == SOCK && i < table->n_imcs_per_sock)
2408*4882a593Smuzhiyun 		goto next_imc;
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun out_imc:
2411*4882a593Smuzhiyun 	/* Be sure that the device is enabled */
2412*4882a593Smuzhiyun 	if (unlikely(pci_enable_device(pdev) < 0)) {
2413*4882a593Smuzhiyun 		sbridge_printk(KERN_ERR,
2414*4882a593Smuzhiyun 			"Couldn't enable %04x:%04x\n",
2415*4882a593Smuzhiyun 			PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2416*4882a593Smuzhiyun 		return -ENODEV;
2417*4882a593Smuzhiyun 	}
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	edac_dbg(0, "Detected %04x:%04x\n",
2420*4882a593Smuzhiyun 		 PCI_VENDOR_ID_INTEL, dev_descr->dev_id);
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	/*
2423*4882a593Smuzhiyun 	 * As stated on drivers/pci/search.c, the reference count for
2424*4882a593Smuzhiyun 	 * @from is always decremented if it is not %NULL. So, as we need
2425*4882a593Smuzhiyun 	 * to get all devices up to null, we need to do a get for the device
2426*4882a593Smuzhiyun 	 */
2427*4882a593Smuzhiyun 	pci_dev_get(pdev);
2428*4882a593Smuzhiyun 
2429*4882a593Smuzhiyun 	*prev = pdev;
2430*4882a593Smuzhiyun 
2431*4882a593Smuzhiyun 	return 0;
2432*4882a593Smuzhiyun }
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun /*
2435*4882a593Smuzhiyun  * sbridge_get_all_devices - Find and perform 'get' operation on the MCH's
2436*4882a593Smuzhiyun  *			     devices we want to reference for this driver.
2437*4882a593Smuzhiyun  * @num_mc: pointer to the memory controllers count, to be incremented in case
2438*4882a593Smuzhiyun  *	    of success.
2439*4882a593Smuzhiyun  * @table: model specific table
2440*4882a593Smuzhiyun  *
2441*4882a593Smuzhiyun  * returns 0 in case of success or error code
2442*4882a593Smuzhiyun  */
sbridge_get_all_devices(u8 * num_mc,const struct pci_id_table * table)2443*4882a593Smuzhiyun static int sbridge_get_all_devices(u8 *num_mc,
2444*4882a593Smuzhiyun 					const struct pci_id_table *table)
2445*4882a593Smuzhiyun {
2446*4882a593Smuzhiyun 	int i, rc;
2447*4882a593Smuzhiyun 	struct pci_dev *pdev = NULL;
2448*4882a593Smuzhiyun 	int allow_dups = 0;
2449*4882a593Smuzhiyun 	int multi_bus = 0;
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	if (table->type == KNIGHTS_LANDING)
2452*4882a593Smuzhiyun 		allow_dups = multi_bus = 1;
2453*4882a593Smuzhiyun 	while (table && table->descr) {
2454*4882a593Smuzhiyun 		for (i = 0; i < table->n_devs_per_sock; i++) {
2455*4882a593Smuzhiyun 			if (!allow_dups || i == 0 ||
2456*4882a593Smuzhiyun 					table->descr[i].dev_id !=
2457*4882a593Smuzhiyun 						table->descr[i-1].dev_id) {
2458*4882a593Smuzhiyun 				pdev = NULL;
2459*4882a593Smuzhiyun 			}
2460*4882a593Smuzhiyun 			do {
2461*4882a593Smuzhiyun 				rc = sbridge_get_onedevice(&pdev, num_mc,
2462*4882a593Smuzhiyun 							   table, i, multi_bus);
2463*4882a593Smuzhiyun 				if (rc < 0) {
2464*4882a593Smuzhiyun 					if (i == 0) {
2465*4882a593Smuzhiyun 						i = table->n_devs_per_sock;
2466*4882a593Smuzhiyun 						break;
2467*4882a593Smuzhiyun 					}
2468*4882a593Smuzhiyun 					sbridge_put_all_devices();
2469*4882a593Smuzhiyun 					return -ENODEV;
2470*4882a593Smuzhiyun 				}
2471*4882a593Smuzhiyun 			} while (pdev && !allow_dups);
2472*4882a593Smuzhiyun 		}
2473*4882a593Smuzhiyun 		table++;
2474*4882a593Smuzhiyun 	}
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	return 0;
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun /*
2480*4882a593Smuzhiyun  * Device IDs for {SBRIDGE,IBRIDGE,HASWELL,BROADWELL}_IMC_HA0_TAD0 are in
2481*4882a593Smuzhiyun  * the format: XXXa. So we can convert from a device to the corresponding
2482*4882a593Smuzhiyun  * channel like this
2483*4882a593Smuzhiyun  */
2484*4882a593Smuzhiyun #define TAD_DEV_TO_CHAN(dev) (((dev) & 0xf) - 0xa)
2485*4882a593Smuzhiyun 
sbridge_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2486*4882a593Smuzhiyun static int sbridge_mci_bind_devs(struct mem_ctl_info *mci,
2487*4882a593Smuzhiyun 				 struct sbridge_dev *sbridge_dev)
2488*4882a593Smuzhiyun {
2489*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
2490*4882a593Smuzhiyun 	struct pci_dev *pdev;
2491*4882a593Smuzhiyun 	u8 saw_chan_mask = 0;
2492*4882a593Smuzhiyun 	int i;
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 	for (i = 0; i < sbridge_dev->n_devs; i++) {
2495*4882a593Smuzhiyun 		pdev = sbridge_dev->pdev[i];
2496*4882a593Smuzhiyun 		if (!pdev)
2497*4882a593Smuzhiyun 			continue;
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 		switch (pdev->device) {
2500*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0:
2501*4882a593Smuzhiyun 			pvt->pci_sad0 = pdev;
2502*4882a593Smuzhiyun 			break;
2503*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1:
2504*4882a593Smuzhiyun 			pvt->pci_sad1 = pdev;
2505*4882a593Smuzhiyun 			break;
2506*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_BR:
2507*4882a593Smuzhiyun 			pvt->pci_br0 = pdev;
2508*4882a593Smuzhiyun 			break;
2509*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_HA0:
2510*4882a593Smuzhiyun 			pvt->pci_ha = pdev;
2511*4882a593Smuzhiyun 			break;
2512*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TA:
2513*4882a593Smuzhiyun 			pvt->pci_ta = pdev;
2514*4882a593Smuzhiyun 			break;
2515*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS:
2516*4882a593Smuzhiyun 			pvt->pci_ras = pdev;
2517*4882a593Smuzhiyun 			break;
2518*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD0:
2519*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1:
2520*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2:
2521*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3:
2522*4882a593Smuzhiyun 		{
2523*4882a593Smuzhiyun 			int id = TAD_DEV_TO_CHAN(pdev->device);
2524*4882a593Smuzhiyun 			pvt->pci_tad[id] = pdev;
2525*4882a593Smuzhiyun 			saw_chan_mask |= 1 << id;
2526*4882a593Smuzhiyun 		}
2527*4882a593Smuzhiyun 			break;
2528*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO:
2529*4882a593Smuzhiyun 			pvt->pci_ddrio = pdev;
2530*4882a593Smuzhiyun 			break;
2531*4882a593Smuzhiyun 		default:
2532*4882a593Smuzhiyun 			goto error;
2533*4882a593Smuzhiyun 		}
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 		edac_dbg(0, "Associated PCI %02x:%02x, bus %d with dev = %p\n",
2536*4882a593Smuzhiyun 			 pdev->vendor, pdev->device,
2537*4882a593Smuzhiyun 			 sbridge_dev->bus,
2538*4882a593Smuzhiyun 			 pdev);
2539*4882a593Smuzhiyun 	}
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	/* Check if everything were registered */
2542*4882a593Smuzhiyun 	if (!pvt->pci_sad0 || !pvt->pci_sad1 || !pvt->pci_ha ||
2543*4882a593Smuzhiyun 	    !pvt->pci_ras || !pvt->pci_ta)
2544*4882a593Smuzhiyun 		goto enodev;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 	if (saw_chan_mask != 0x0f)
2547*4882a593Smuzhiyun 		goto enodev;
2548*4882a593Smuzhiyun 	return 0;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun enodev:
2551*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2552*4882a593Smuzhiyun 	return -ENODEV;
2553*4882a593Smuzhiyun 
2554*4882a593Smuzhiyun error:
2555*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Unexpected device %02x:%02x\n",
2556*4882a593Smuzhiyun 		       PCI_VENDOR_ID_INTEL, pdev->device);
2557*4882a593Smuzhiyun 	return -EINVAL;
2558*4882a593Smuzhiyun }
2559*4882a593Smuzhiyun 
ibridge_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2560*4882a593Smuzhiyun static int ibridge_mci_bind_devs(struct mem_ctl_info *mci,
2561*4882a593Smuzhiyun 				 struct sbridge_dev *sbridge_dev)
2562*4882a593Smuzhiyun {
2563*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
2564*4882a593Smuzhiyun 	struct pci_dev *pdev;
2565*4882a593Smuzhiyun 	u8 saw_chan_mask = 0;
2566*4882a593Smuzhiyun 	int i;
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	for (i = 0; i < sbridge_dev->n_devs; i++) {
2569*4882a593Smuzhiyun 		pdev = sbridge_dev->pdev[i];
2570*4882a593Smuzhiyun 		if (!pdev)
2571*4882a593Smuzhiyun 			continue;
2572*4882a593Smuzhiyun 
2573*4882a593Smuzhiyun 		switch (pdev->device) {
2574*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0:
2575*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1:
2576*4882a593Smuzhiyun 			pvt->pci_ha = pdev;
2577*4882a593Smuzhiyun 			break;
2578*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA:
2579*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA:
2580*4882a593Smuzhiyun 			pvt->pci_ta = pdev;
2581*4882a593Smuzhiyun 			break;
2582*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS:
2583*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS:
2584*4882a593Smuzhiyun 			pvt->pci_ras = pdev;
2585*4882a593Smuzhiyun 			break;
2586*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD0:
2587*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD1:
2588*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD2:
2589*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3:
2590*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0:
2591*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD1:
2592*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD2:
2593*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD3:
2594*4882a593Smuzhiyun 		{
2595*4882a593Smuzhiyun 			int id = TAD_DEV_TO_CHAN(pdev->device);
2596*4882a593Smuzhiyun 			pvt->pci_tad[id] = pdev;
2597*4882a593Smuzhiyun 			saw_chan_mask |= 1 << id;
2598*4882a593Smuzhiyun 		}
2599*4882a593Smuzhiyun 			break;
2600*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_2HA_DDRIO0:
2601*4882a593Smuzhiyun 			pvt->pci_ddrio = pdev;
2602*4882a593Smuzhiyun 			break;
2603*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_1HA_DDRIO0:
2604*4882a593Smuzhiyun 			pvt->pci_ddrio = pdev;
2605*4882a593Smuzhiyun 			break;
2606*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_SAD:
2607*4882a593Smuzhiyun 			pvt->pci_sad0 = pdev;
2608*4882a593Smuzhiyun 			break;
2609*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR0:
2610*4882a593Smuzhiyun 			pvt->pci_br0 = pdev;
2611*4882a593Smuzhiyun 			break;
2612*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_IBRIDGE_BR1:
2613*4882a593Smuzhiyun 			pvt->pci_br1 = pdev;
2614*4882a593Smuzhiyun 			break;
2615*4882a593Smuzhiyun 		default:
2616*4882a593Smuzhiyun 			goto error;
2617*4882a593Smuzhiyun 		}
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2620*4882a593Smuzhiyun 			 sbridge_dev->bus,
2621*4882a593Smuzhiyun 			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2622*4882a593Smuzhiyun 			 pdev);
2623*4882a593Smuzhiyun 	}
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	/* Check if everything were registered */
2626*4882a593Smuzhiyun 	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_br0 ||
2627*4882a593Smuzhiyun 	    !pvt->pci_br1 || !pvt->pci_ras || !pvt->pci_ta)
2628*4882a593Smuzhiyun 		goto enodev;
2629*4882a593Smuzhiyun 
2630*4882a593Smuzhiyun 	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2631*4882a593Smuzhiyun 	    saw_chan_mask != 0x03)   /* -EP */
2632*4882a593Smuzhiyun 		goto enodev;
2633*4882a593Smuzhiyun 	return 0;
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun enodev:
2636*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2637*4882a593Smuzhiyun 	return -ENODEV;
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun error:
2640*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR,
2641*4882a593Smuzhiyun 		       "Unexpected device %02x:%02x\n", PCI_VENDOR_ID_INTEL,
2642*4882a593Smuzhiyun 			pdev->device);
2643*4882a593Smuzhiyun 	return -EINVAL;
2644*4882a593Smuzhiyun }
2645*4882a593Smuzhiyun 
haswell_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2646*4882a593Smuzhiyun static int haswell_mci_bind_devs(struct mem_ctl_info *mci,
2647*4882a593Smuzhiyun 				 struct sbridge_dev *sbridge_dev)
2648*4882a593Smuzhiyun {
2649*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
2650*4882a593Smuzhiyun 	struct pci_dev *pdev;
2651*4882a593Smuzhiyun 	u8 saw_chan_mask = 0;
2652*4882a593Smuzhiyun 	int i;
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	/* there's only one device per system; not tied to any bus */
2655*4882a593Smuzhiyun 	if (pvt->info.pci_vtd == NULL)
2656*4882a593Smuzhiyun 		/* result will be checked later */
2657*4882a593Smuzhiyun 		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2658*4882a593Smuzhiyun 						   PCI_DEVICE_ID_INTEL_HASWELL_IMC_VTD_MISC,
2659*4882a593Smuzhiyun 						   NULL);
2660*4882a593Smuzhiyun 
2661*4882a593Smuzhiyun 	for (i = 0; i < sbridge_dev->n_devs; i++) {
2662*4882a593Smuzhiyun 		pdev = sbridge_dev->pdev[i];
2663*4882a593Smuzhiyun 		if (!pdev)
2664*4882a593Smuzhiyun 			continue;
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 		switch (pdev->device) {
2667*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD0:
2668*4882a593Smuzhiyun 			pvt->pci_sad0 = pdev;
2669*4882a593Smuzhiyun 			break;
2670*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_CBO_SAD1:
2671*4882a593Smuzhiyun 			pvt->pci_sad1 = pdev;
2672*4882a593Smuzhiyun 			break;
2673*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0:
2674*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1:
2675*4882a593Smuzhiyun 			pvt->pci_ha = pdev;
2676*4882a593Smuzhiyun 			break;
2677*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TA:
2678*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TA:
2679*4882a593Smuzhiyun 			pvt->pci_ta = pdev;
2680*4882a593Smuzhiyun 			break;
2681*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TM:
2682*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TM:
2683*4882a593Smuzhiyun 			pvt->pci_ras = pdev;
2684*4882a593Smuzhiyun 			break;
2685*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD0:
2686*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD1:
2687*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD2:
2688*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA0_TAD3:
2689*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD0:
2690*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD1:
2691*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD2:
2692*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_HA1_TAD3:
2693*4882a593Smuzhiyun 		{
2694*4882a593Smuzhiyun 			int id = TAD_DEV_TO_CHAN(pdev->device);
2695*4882a593Smuzhiyun 			pvt->pci_tad[id] = pdev;
2696*4882a593Smuzhiyun 			saw_chan_mask |= 1 << id;
2697*4882a593Smuzhiyun 		}
2698*4882a593Smuzhiyun 			break;
2699*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO0:
2700*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO1:
2701*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO2:
2702*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_HASWELL_IMC_DDRIO3:
2703*4882a593Smuzhiyun 			if (!pvt->pci_ddrio)
2704*4882a593Smuzhiyun 				pvt->pci_ddrio = pdev;
2705*4882a593Smuzhiyun 			break;
2706*4882a593Smuzhiyun 		default:
2707*4882a593Smuzhiyun 			break;
2708*4882a593Smuzhiyun 		}
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2711*4882a593Smuzhiyun 			 sbridge_dev->bus,
2712*4882a593Smuzhiyun 			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2713*4882a593Smuzhiyun 			 pdev);
2714*4882a593Smuzhiyun 	}
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	/* Check if everything were registered */
2717*4882a593Smuzhiyun 	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2718*4882a593Smuzhiyun 	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2719*4882a593Smuzhiyun 		goto enodev;
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2722*4882a593Smuzhiyun 	    saw_chan_mask != 0x03)   /* -EP */
2723*4882a593Smuzhiyun 		goto enodev;
2724*4882a593Smuzhiyun 	return 0;
2725*4882a593Smuzhiyun 
2726*4882a593Smuzhiyun enodev:
2727*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2728*4882a593Smuzhiyun 	return -ENODEV;
2729*4882a593Smuzhiyun }
2730*4882a593Smuzhiyun 
broadwell_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2731*4882a593Smuzhiyun static int broadwell_mci_bind_devs(struct mem_ctl_info *mci,
2732*4882a593Smuzhiyun 				 struct sbridge_dev *sbridge_dev)
2733*4882a593Smuzhiyun {
2734*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
2735*4882a593Smuzhiyun 	struct pci_dev *pdev;
2736*4882a593Smuzhiyun 	u8 saw_chan_mask = 0;
2737*4882a593Smuzhiyun 	int i;
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun 	/* there's only one device per system; not tied to any bus */
2740*4882a593Smuzhiyun 	if (pvt->info.pci_vtd == NULL)
2741*4882a593Smuzhiyun 		/* result will be checked later */
2742*4882a593Smuzhiyun 		pvt->info.pci_vtd = pci_get_device(PCI_VENDOR_ID_INTEL,
2743*4882a593Smuzhiyun 						   PCI_DEVICE_ID_INTEL_BROADWELL_IMC_VTD_MISC,
2744*4882a593Smuzhiyun 						   NULL);
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	for (i = 0; i < sbridge_dev->n_devs; i++) {
2747*4882a593Smuzhiyun 		pdev = sbridge_dev->pdev[i];
2748*4882a593Smuzhiyun 		if (!pdev)
2749*4882a593Smuzhiyun 			continue;
2750*4882a593Smuzhiyun 
2751*4882a593Smuzhiyun 		switch (pdev->device) {
2752*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD0:
2753*4882a593Smuzhiyun 			pvt->pci_sad0 = pdev;
2754*4882a593Smuzhiyun 			break;
2755*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_CBO_SAD1:
2756*4882a593Smuzhiyun 			pvt->pci_sad1 = pdev;
2757*4882a593Smuzhiyun 			break;
2758*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0:
2759*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1:
2760*4882a593Smuzhiyun 			pvt->pci_ha = pdev;
2761*4882a593Smuzhiyun 			break;
2762*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TA:
2763*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TA:
2764*4882a593Smuzhiyun 			pvt->pci_ta = pdev;
2765*4882a593Smuzhiyun 			break;
2766*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TM:
2767*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TM:
2768*4882a593Smuzhiyun 			pvt->pci_ras = pdev;
2769*4882a593Smuzhiyun 			break;
2770*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD0:
2771*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD1:
2772*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD2:
2773*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA0_TAD3:
2774*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD0:
2775*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD1:
2776*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD2:
2777*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_HA1_TAD3:
2778*4882a593Smuzhiyun 		{
2779*4882a593Smuzhiyun 			int id = TAD_DEV_TO_CHAN(pdev->device);
2780*4882a593Smuzhiyun 			pvt->pci_tad[id] = pdev;
2781*4882a593Smuzhiyun 			saw_chan_mask |= 1 << id;
2782*4882a593Smuzhiyun 		}
2783*4882a593Smuzhiyun 			break;
2784*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_BROADWELL_IMC_DDRIO0:
2785*4882a593Smuzhiyun 			pvt->pci_ddrio = pdev;
2786*4882a593Smuzhiyun 			break;
2787*4882a593Smuzhiyun 		default:
2788*4882a593Smuzhiyun 			break;
2789*4882a593Smuzhiyun 		}
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 		edac_dbg(0, "Associated PCI %02x.%02d.%d with dev = %p\n",
2792*4882a593Smuzhiyun 			 sbridge_dev->bus,
2793*4882a593Smuzhiyun 			 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
2794*4882a593Smuzhiyun 			 pdev);
2795*4882a593Smuzhiyun 	}
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	/* Check if everything were registered */
2798*4882a593Smuzhiyun 	if (!pvt->pci_sad0 || !pvt->pci_ha || !pvt->pci_sad1 ||
2799*4882a593Smuzhiyun 	    !pvt->pci_ras  || !pvt->pci_ta || !pvt->info.pci_vtd)
2800*4882a593Smuzhiyun 		goto enodev;
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun 	if (saw_chan_mask != 0x0f && /* -EN/-EX */
2803*4882a593Smuzhiyun 	    saw_chan_mask != 0x03)   /* -EP */
2804*4882a593Smuzhiyun 		goto enodev;
2805*4882a593Smuzhiyun 	return 0;
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun enodev:
2808*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2809*4882a593Smuzhiyun 	return -ENODEV;
2810*4882a593Smuzhiyun }
2811*4882a593Smuzhiyun 
knl_mci_bind_devs(struct mem_ctl_info * mci,struct sbridge_dev * sbridge_dev)2812*4882a593Smuzhiyun static int knl_mci_bind_devs(struct mem_ctl_info *mci,
2813*4882a593Smuzhiyun 			struct sbridge_dev *sbridge_dev)
2814*4882a593Smuzhiyun {
2815*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
2816*4882a593Smuzhiyun 	struct pci_dev *pdev;
2817*4882a593Smuzhiyun 	int dev, func;
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun 	int i;
2820*4882a593Smuzhiyun 	int devidx;
2821*4882a593Smuzhiyun 
2822*4882a593Smuzhiyun 	for (i = 0; i < sbridge_dev->n_devs; i++) {
2823*4882a593Smuzhiyun 		pdev = sbridge_dev->pdev[i];
2824*4882a593Smuzhiyun 		if (!pdev)
2825*4882a593Smuzhiyun 			continue;
2826*4882a593Smuzhiyun 
2827*4882a593Smuzhiyun 		/* Extract PCI device and function. */
2828*4882a593Smuzhiyun 		dev = (pdev->devfn >> 3) & 0x1f;
2829*4882a593Smuzhiyun 		func = pdev->devfn & 0x7;
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 		switch (pdev->device) {
2832*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_MC:
2833*4882a593Smuzhiyun 			if (dev == 8)
2834*4882a593Smuzhiyun 				pvt->knl.pci_mc0 = pdev;
2835*4882a593Smuzhiyun 			else if (dev == 9)
2836*4882a593Smuzhiyun 				pvt->knl.pci_mc1 = pdev;
2837*4882a593Smuzhiyun 			else {
2838*4882a593Smuzhiyun 				sbridge_printk(KERN_ERR,
2839*4882a593Smuzhiyun 					"Memory controller in unexpected place! (dev %d, fn %d)\n",
2840*4882a593Smuzhiyun 					dev, func);
2841*4882a593Smuzhiyun 				continue;
2842*4882a593Smuzhiyun 			}
2843*4882a593Smuzhiyun 			break;
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD0:
2846*4882a593Smuzhiyun 			pvt->pci_sad0 = pdev;
2847*4882a593Smuzhiyun 			break;
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_SAD1:
2850*4882a593Smuzhiyun 			pvt->pci_sad1 = pdev;
2851*4882a593Smuzhiyun 			break;
2852*4882a593Smuzhiyun 
2853*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHA:
2854*4882a593Smuzhiyun 			/* There are one of these per tile, and range from
2855*4882a593Smuzhiyun 			 * 1.14.0 to 1.18.5.
2856*4882a593Smuzhiyun 			 */
2857*4882a593Smuzhiyun 			devidx = ((dev-14)*8)+func;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 			if (devidx < 0 || devidx >= KNL_MAX_CHAS) {
2860*4882a593Smuzhiyun 				sbridge_printk(KERN_ERR,
2861*4882a593Smuzhiyun 					"Caching and Home Agent in unexpected place! (dev %d, fn %d)\n",
2862*4882a593Smuzhiyun 					dev, func);
2863*4882a593Smuzhiyun 				continue;
2864*4882a593Smuzhiyun 			}
2865*4882a593Smuzhiyun 
2866*4882a593Smuzhiyun 			WARN_ON(pvt->knl.pci_cha[devidx] != NULL);
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 			pvt->knl.pci_cha[devidx] = pdev;
2869*4882a593Smuzhiyun 			break;
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_CHAN:
2872*4882a593Smuzhiyun 			devidx = -1;
2873*4882a593Smuzhiyun 
2874*4882a593Smuzhiyun 			/*
2875*4882a593Smuzhiyun 			 *  MC0 channels 0-2 are device 9 function 2-4,
2876*4882a593Smuzhiyun 			 *  MC1 channels 3-5 are device 8 function 2-4.
2877*4882a593Smuzhiyun 			 */
2878*4882a593Smuzhiyun 
2879*4882a593Smuzhiyun 			if (dev == 9)
2880*4882a593Smuzhiyun 				devidx = func-2;
2881*4882a593Smuzhiyun 			else if (dev == 8)
2882*4882a593Smuzhiyun 				devidx = 3 + (func-2);
2883*4882a593Smuzhiyun 
2884*4882a593Smuzhiyun 			if (devidx < 0 || devidx >= KNL_MAX_CHANNELS) {
2885*4882a593Smuzhiyun 				sbridge_printk(KERN_ERR,
2886*4882a593Smuzhiyun 					"DRAM Channel Registers in unexpected place! (dev %d, fn %d)\n",
2887*4882a593Smuzhiyun 					dev, func);
2888*4882a593Smuzhiyun 				continue;
2889*4882a593Smuzhiyun 			}
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 			WARN_ON(pvt->knl.pci_channel[devidx] != NULL);
2892*4882a593Smuzhiyun 			pvt->knl.pci_channel[devidx] = pdev;
2893*4882a593Smuzhiyun 			break;
2894*4882a593Smuzhiyun 
2895*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_TOLHM:
2896*4882a593Smuzhiyun 			pvt->knl.pci_mc_info = pdev;
2897*4882a593Smuzhiyun 			break;
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 		case PCI_DEVICE_ID_INTEL_KNL_IMC_TA:
2900*4882a593Smuzhiyun 			pvt->pci_ta = pdev;
2901*4882a593Smuzhiyun 			break;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 		default:
2904*4882a593Smuzhiyun 			sbridge_printk(KERN_ERR, "Unexpected device %d\n",
2905*4882a593Smuzhiyun 				pdev->device);
2906*4882a593Smuzhiyun 			break;
2907*4882a593Smuzhiyun 		}
2908*4882a593Smuzhiyun 	}
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 	if (!pvt->knl.pci_mc0  || !pvt->knl.pci_mc1 ||
2911*4882a593Smuzhiyun 	    !pvt->pci_sad0     || !pvt->pci_sad1    ||
2912*4882a593Smuzhiyun 	    !pvt->pci_ta) {
2913*4882a593Smuzhiyun 		goto enodev;
2914*4882a593Smuzhiyun 	}
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_CHANNELS; i++) {
2917*4882a593Smuzhiyun 		if (!pvt->knl.pci_channel[i]) {
2918*4882a593Smuzhiyun 			sbridge_printk(KERN_ERR, "Missing channel %d\n", i);
2919*4882a593Smuzhiyun 			goto enodev;
2920*4882a593Smuzhiyun 		}
2921*4882a593Smuzhiyun 	}
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 	for (i = 0; i < KNL_MAX_CHAS; i++) {
2924*4882a593Smuzhiyun 		if (!pvt->knl.pci_cha[i]) {
2925*4882a593Smuzhiyun 			sbridge_printk(KERN_ERR, "Missing CHA %d\n", i);
2926*4882a593Smuzhiyun 			goto enodev;
2927*4882a593Smuzhiyun 		}
2928*4882a593Smuzhiyun 	}
2929*4882a593Smuzhiyun 
2930*4882a593Smuzhiyun 	return 0;
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun enodev:
2933*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Some needed devices are missing\n");
2934*4882a593Smuzhiyun 	return -ENODEV;
2935*4882a593Smuzhiyun }
2936*4882a593Smuzhiyun 
2937*4882a593Smuzhiyun /****************************************************************************
2938*4882a593Smuzhiyun 			Error check routines
2939*4882a593Smuzhiyun  ****************************************************************************/
2940*4882a593Smuzhiyun 
2941*4882a593Smuzhiyun /*
2942*4882a593Smuzhiyun  * While Sandy Bridge has error count registers, SMI BIOS read values from
2943*4882a593Smuzhiyun  * and resets the counters. So, they are not reliable for the OS to read
2944*4882a593Smuzhiyun  * from them. So, we have no option but to just trust on whatever MCE is
2945*4882a593Smuzhiyun  * telling us about the errors.
2946*4882a593Smuzhiyun  */
sbridge_mce_output_error(struct mem_ctl_info * mci,const struct mce * m)2947*4882a593Smuzhiyun static void sbridge_mce_output_error(struct mem_ctl_info *mci,
2948*4882a593Smuzhiyun 				    const struct mce *m)
2949*4882a593Smuzhiyun {
2950*4882a593Smuzhiyun 	struct mem_ctl_info *new_mci;
2951*4882a593Smuzhiyun 	struct sbridge_pvt *pvt = mci->pvt_info;
2952*4882a593Smuzhiyun 	enum hw_event_mc_err_type tp_event;
2953*4882a593Smuzhiyun 	char *optype, msg[256];
2954*4882a593Smuzhiyun 	bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
2955*4882a593Smuzhiyun 	bool overflow = GET_BITFIELD(m->status, 62, 62);
2956*4882a593Smuzhiyun 	bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
2957*4882a593Smuzhiyun 	bool recoverable;
2958*4882a593Smuzhiyun 	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
2959*4882a593Smuzhiyun 	u32 mscod = GET_BITFIELD(m->status, 16, 31);
2960*4882a593Smuzhiyun 	u32 errcode = GET_BITFIELD(m->status, 0, 15);
2961*4882a593Smuzhiyun 	u32 channel = GET_BITFIELD(m->status, 0, 3);
2962*4882a593Smuzhiyun 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
2963*4882a593Smuzhiyun 	/*
2964*4882a593Smuzhiyun 	 * Bits 5-0 of MCi_MISC give the least significant bit that is valid.
2965*4882a593Smuzhiyun 	 * A value 6 is for cache line aligned address, a value 12 is for page
2966*4882a593Smuzhiyun 	 * aligned address reported by patrol scrubber.
2967*4882a593Smuzhiyun 	 */
2968*4882a593Smuzhiyun 	u32 lsb = GET_BITFIELD(m->misc, 0, 5);
2969*4882a593Smuzhiyun 	long channel_mask, first_channel;
2970*4882a593Smuzhiyun 	u8  rank = 0xff, socket, ha;
2971*4882a593Smuzhiyun 	int rc, dimm;
2972*4882a593Smuzhiyun 	char *area_type = "DRAM";
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 	if (pvt->info.type != SANDY_BRIDGE)
2975*4882a593Smuzhiyun 		recoverable = true;
2976*4882a593Smuzhiyun 	else
2977*4882a593Smuzhiyun 		recoverable = GET_BITFIELD(m->status, 56, 56);
2978*4882a593Smuzhiyun 
2979*4882a593Smuzhiyun 	if (uncorrected_error) {
2980*4882a593Smuzhiyun 		core_err_cnt = 1;
2981*4882a593Smuzhiyun 		if (ripv) {
2982*4882a593Smuzhiyun 			tp_event = HW_EVENT_ERR_UNCORRECTED;
2983*4882a593Smuzhiyun 		} else {
2984*4882a593Smuzhiyun 			tp_event = HW_EVENT_ERR_FATAL;
2985*4882a593Smuzhiyun 		}
2986*4882a593Smuzhiyun 	} else {
2987*4882a593Smuzhiyun 		tp_event = HW_EVENT_ERR_CORRECTED;
2988*4882a593Smuzhiyun 	}
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun 	/*
2991*4882a593Smuzhiyun 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
2992*4882a593Smuzhiyun 	 * memory errors should fit in this mask:
2993*4882a593Smuzhiyun 	 *	000f 0000 1mmm cccc (binary)
2994*4882a593Smuzhiyun 	 * where:
2995*4882a593Smuzhiyun 	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
2996*4882a593Smuzhiyun 	 *	    won't be shown
2997*4882a593Smuzhiyun 	 *	mmm = error type
2998*4882a593Smuzhiyun 	 *	cccc = channel
2999*4882a593Smuzhiyun 	 * If the mask doesn't match, report an error to the parsing logic
3000*4882a593Smuzhiyun 	 */
3001*4882a593Smuzhiyun 	switch (optypenum) {
3002*4882a593Smuzhiyun 	case 0:
3003*4882a593Smuzhiyun 		optype = "generic undef request error";
3004*4882a593Smuzhiyun 		break;
3005*4882a593Smuzhiyun 	case 1:
3006*4882a593Smuzhiyun 		optype = "memory read error";
3007*4882a593Smuzhiyun 		break;
3008*4882a593Smuzhiyun 	case 2:
3009*4882a593Smuzhiyun 		optype = "memory write error";
3010*4882a593Smuzhiyun 		break;
3011*4882a593Smuzhiyun 	case 3:
3012*4882a593Smuzhiyun 		optype = "addr/cmd error";
3013*4882a593Smuzhiyun 		break;
3014*4882a593Smuzhiyun 	case 4:
3015*4882a593Smuzhiyun 		optype = "memory scrubbing error";
3016*4882a593Smuzhiyun 		break;
3017*4882a593Smuzhiyun 	default:
3018*4882a593Smuzhiyun 		optype = "reserved";
3019*4882a593Smuzhiyun 		break;
3020*4882a593Smuzhiyun 	}
3021*4882a593Smuzhiyun 
3022*4882a593Smuzhiyun 	if (pvt->info.type == KNIGHTS_LANDING) {
3023*4882a593Smuzhiyun 		if (channel == 14) {
3024*4882a593Smuzhiyun 			edac_dbg(0, "%s%s err_code:%04x:%04x EDRAM bank %d\n",
3025*4882a593Smuzhiyun 				overflow ? " OVERFLOW" : "",
3026*4882a593Smuzhiyun 				(uncorrected_error && recoverable)
3027*4882a593Smuzhiyun 				? " recoverable" : "",
3028*4882a593Smuzhiyun 				mscod, errcode,
3029*4882a593Smuzhiyun 				m->bank);
3030*4882a593Smuzhiyun 		} else {
3031*4882a593Smuzhiyun 			char A = *("A");
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 			/*
3034*4882a593Smuzhiyun 			 * Reported channel is in range 0-2, so we can't map it
3035*4882a593Smuzhiyun 			 * back to mc. To figure out mc we check machine check
3036*4882a593Smuzhiyun 			 * bank register that reported this error.
3037*4882a593Smuzhiyun 			 * bank15 means mc0 and bank16 means mc1.
3038*4882a593Smuzhiyun 			 */
3039*4882a593Smuzhiyun 			channel = knl_channel_remap(m->bank == 16, channel);
3040*4882a593Smuzhiyun 			channel_mask = 1 << channel;
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun 			snprintf(msg, sizeof(msg),
3043*4882a593Smuzhiyun 				"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
3044*4882a593Smuzhiyun 				overflow ? " OVERFLOW" : "",
3045*4882a593Smuzhiyun 				(uncorrected_error && recoverable)
3046*4882a593Smuzhiyun 				? " recoverable" : " ",
3047*4882a593Smuzhiyun 				mscod, errcode, channel, A + channel);
3048*4882a593Smuzhiyun 			edac_mc_handle_error(tp_event, mci, core_err_cnt,
3049*4882a593Smuzhiyun 				m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3050*4882a593Smuzhiyun 				channel, 0, -1,
3051*4882a593Smuzhiyun 				optype, msg);
3052*4882a593Smuzhiyun 		}
3053*4882a593Smuzhiyun 		return;
3054*4882a593Smuzhiyun 	} else if (lsb < 12) {
3055*4882a593Smuzhiyun 		rc = get_memory_error_data(mci, m->addr, &socket, &ha,
3056*4882a593Smuzhiyun 					   &channel_mask, &rank,
3057*4882a593Smuzhiyun 					   &area_type, msg);
3058*4882a593Smuzhiyun 	} else {
3059*4882a593Smuzhiyun 		rc = get_memory_error_data_from_mce(mci, m, &socket, &ha,
3060*4882a593Smuzhiyun 						    &channel_mask, msg);
3061*4882a593Smuzhiyun 	}
3062*4882a593Smuzhiyun 
3063*4882a593Smuzhiyun 	if (rc < 0)
3064*4882a593Smuzhiyun 		goto err_parsing;
3065*4882a593Smuzhiyun 	new_mci = get_mci_for_node_id(socket, ha);
3066*4882a593Smuzhiyun 	if (!new_mci) {
3067*4882a593Smuzhiyun 		strcpy(msg, "Error: socket got corrupted!");
3068*4882a593Smuzhiyun 		goto err_parsing;
3069*4882a593Smuzhiyun 	}
3070*4882a593Smuzhiyun 	mci = new_mci;
3071*4882a593Smuzhiyun 	pvt = mci->pvt_info;
3072*4882a593Smuzhiyun 
3073*4882a593Smuzhiyun 	first_channel = find_first_bit(&channel_mask, NUM_CHANNELS);
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun 	if (rank == 0xff)
3076*4882a593Smuzhiyun 		dimm = -1;
3077*4882a593Smuzhiyun 	else if (rank < 4)
3078*4882a593Smuzhiyun 		dimm = 0;
3079*4882a593Smuzhiyun 	else if (rank < 8)
3080*4882a593Smuzhiyun 		dimm = 1;
3081*4882a593Smuzhiyun 	else
3082*4882a593Smuzhiyun 		dimm = 2;
3083*4882a593Smuzhiyun 
3084*4882a593Smuzhiyun 	/*
3085*4882a593Smuzhiyun 	 * FIXME: On some memory configurations (mirror, lockstep), the
3086*4882a593Smuzhiyun 	 * Memory Controller can't point the error to a single DIMM. The
3087*4882a593Smuzhiyun 	 * EDAC core should be handling the channel mask, in order to point
3088*4882a593Smuzhiyun 	 * to the group of dimm's where the error may be happening.
3089*4882a593Smuzhiyun 	 */
3090*4882a593Smuzhiyun 	if (!pvt->is_lockstep && !pvt->is_cur_addr_mirrored && !pvt->is_close_pg)
3091*4882a593Smuzhiyun 		channel = first_channel;
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun 	snprintf(msg, sizeof(msg),
3094*4882a593Smuzhiyun 		 "%s%s area:%s err_code:%04x:%04x socket:%d ha:%d channel_mask:%ld rank:%d",
3095*4882a593Smuzhiyun 		 overflow ? " OVERFLOW" : "",
3096*4882a593Smuzhiyun 		 (uncorrected_error && recoverable) ? " recoverable" : "",
3097*4882a593Smuzhiyun 		 area_type,
3098*4882a593Smuzhiyun 		 mscod, errcode,
3099*4882a593Smuzhiyun 		 socket, ha,
3100*4882a593Smuzhiyun 		 channel_mask,
3101*4882a593Smuzhiyun 		 rank);
3102*4882a593Smuzhiyun 
3103*4882a593Smuzhiyun 	edac_dbg(0, "%s\n", msg);
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun 	/* FIXME: need support for channel mask */
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 	if (channel == CHANNEL_UNSPECIFIED)
3108*4882a593Smuzhiyun 		channel = -1;
3109*4882a593Smuzhiyun 
3110*4882a593Smuzhiyun 	/* Call the helper to output message */
3111*4882a593Smuzhiyun 	edac_mc_handle_error(tp_event, mci, core_err_cnt,
3112*4882a593Smuzhiyun 			     m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
3113*4882a593Smuzhiyun 			     channel, dimm, -1,
3114*4882a593Smuzhiyun 			     optype, msg);
3115*4882a593Smuzhiyun 	return;
3116*4882a593Smuzhiyun err_parsing:
3117*4882a593Smuzhiyun 	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0,
3118*4882a593Smuzhiyun 			     -1, -1, -1,
3119*4882a593Smuzhiyun 			     msg, "");
3120*4882a593Smuzhiyun 
3121*4882a593Smuzhiyun }
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun /*
3124*4882a593Smuzhiyun  * Check that logging is enabled and that this is the right type
3125*4882a593Smuzhiyun  * of error for us to handle.
3126*4882a593Smuzhiyun  */
sbridge_mce_check_error(struct notifier_block * nb,unsigned long val,void * data)3127*4882a593Smuzhiyun static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
3128*4882a593Smuzhiyun 				   void *data)
3129*4882a593Smuzhiyun {
3130*4882a593Smuzhiyun 	struct mce *mce = (struct mce *)data;
3131*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
3132*4882a593Smuzhiyun 	char *type;
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun 	if (mce->kflags & MCE_HANDLED_CEC)
3135*4882a593Smuzhiyun 		return NOTIFY_DONE;
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	/*
3138*4882a593Smuzhiyun 	 * Just let mcelog handle it if the error is
3139*4882a593Smuzhiyun 	 * outside the memory controller. A memory error
3140*4882a593Smuzhiyun 	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
3141*4882a593Smuzhiyun 	 * bit 12 has an special meaning.
3142*4882a593Smuzhiyun 	 */
3143*4882a593Smuzhiyun 	if ((mce->status & 0xefff) >> 7 != 1)
3144*4882a593Smuzhiyun 		return NOTIFY_DONE;
3145*4882a593Smuzhiyun 
3146*4882a593Smuzhiyun 	/* Check ADDRV bit in STATUS */
3147*4882a593Smuzhiyun 	if (!GET_BITFIELD(mce->status, 58, 58))
3148*4882a593Smuzhiyun 		return NOTIFY_DONE;
3149*4882a593Smuzhiyun 
3150*4882a593Smuzhiyun 	/* Check MISCV bit in STATUS */
3151*4882a593Smuzhiyun 	if (!GET_BITFIELD(mce->status, 59, 59))
3152*4882a593Smuzhiyun 		return NOTIFY_DONE;
3153*4882a593Smuzhiyun 
3154*4882a593Smuzhiyun 	/* Check address type in MISC (physical address only) */
3155*4882a593Smuzhiyun 	if (GET_BITFIELD(mce->misc, 6, 8) != 2)
3156*4882a593Smuzhiyun 		return NOTIFY_DONE;
3157*4882a593Smuzhiyun 
3158*4882a593Smuzhiyun 	mci = get_mci_for_node_id(mce->socketid, IMC0);
3159*4882a593Smuzhiyun 	if (!mci)
3160*4882a593Smuzhiyun 		return NOTIFY_DONE;
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun 	if (mce->mcgstatus & MCG_STATUS_MCIP)
3163*4882a593Smuzhiyun 		type = "Exception";
3164*4882a593Smuzhiyun 	else
3165*4882a593Smuzhiyun 		type = "Event";
3166*4882a593Smuzhiyun 
3167*4882a593Smuzhiyun 	sbridge_mc_printk(mci, KERN_DEBUG, "HANDLING MCE MEMORY ERROR\n");
3168*4882a593Smuzhiyun 
3169*4882a593Smuzhiyun 	sbridge_mc_printk(mci, KERN_DEBUG, "CPU %d: Machine Check %s: %Lx "
3170*4882a593Smuzhiyun 			  "Bank %d: %016Lx\n", mce->extcpu, type,
3171*4882a593Smuzhiyun 			  mce->mcgstatus, mce->bank, mce->status);
3172*4882a593Smuzhiyun 	sbridge_mc_printk(mci, KERN_DEBUG, "TSC %llx ", mce->tsc);
3173*4882a593Smuzhiyun 	sbridge_mc_printk(mci, KERN_DEBUG, "ADDR %llx ", mce->addr);
3174*4882a593Smuzhiyun 	sbridge_mc_printk(mci, KERN_DEBUG, "MISC %llx ", mce->misc);
3175*4882a593Smuzhiyun 
3176*4882a593Smuzhiyun 	sbridge_mc_printk(mci, KERN_DEBUG, "PROCESSOR %u:%x TIME %llu SOCKET "
3177*4882a593Smuzhiyun 			  "%u APIC %x\n", mce->cpuvendor, mce->cpuid,
3178*4882a593Smuzhiyun 			  mce->time, mce->socketid, mce->apicid);
3179*4882a593Smuzhiyun 
3180*4882a593Smuzhiyun 	sbridge_mce_output_error(mci, mce);
3181*4882a593Smuzhiyun 
3182*4882a593Smuzhiyun 	/* Advice mcelog that the error were handled */
3183*4882a593Smuzhiyun 	mce->kflags |= MCE_HANDLED_EDAC;
3184*4882a593Smuzhiyun 	return NOTIFY_OK;
3185*4882a593Smuzhiyun }
3186*4882a593Smuzhiyun 
3187*4882a593Smuzhiyun static struct notifier_block sbridge_mce_dec = {
3188*4882a593Smuzhiyun 	.notifier_call	= sbridge_mce_check_error,
3189*4882a593Smuzhiyun 	.priority	= MCE_PRIO_EDAC,
3190*4882a593Smuzhiyun };
3191*4882a593Smuzhiyun 
3192*4882a593Smuzhiyun /****************************************************************************
3193*4882a593Smuzhiyun 			EDAC register/unregister logic
3194*4882a593Smuzhiyun  ****************************************************************************/
3195*4882a593Smuzhiyun 
sbridge_unregister_mci(struct sbridge_dev * sbridge_dev)3196*4882a593Smuzhiyun static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
3197*4882a593Smuzhiyun {
3198*4882a593Smuzhiyun 	struct mem_ctl_info *mci = sbridge_dev->mci;
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun 	if (unlikely(!mci || !mci->pvt_info)) {
3201*4882a593Smuzhiyun 		edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 		sbridge_printk(KERN_ERR, "Couldn't find mci handler\n");
3204*4882a593Smuzhiyun 		return;
3205*4882a593Smuzhiyun 	}
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3208*4882a593Smuzhiyun 		 mci, &sbridge_dev->pdev[0]->dev);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	/* Remove MC sysfs nodes */
3211*4882a593Smuzhiyun 	edac_mc_del_mc(mci->pdev);
3212*4882a593Smuzhiyun 
3213*4882a593Smuzhiyun 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
3214*4882a593Smuzhiyun 	kfree(mci->ctl_name);
3215*4882a593Smuzhiyun 	edac_mc_free(mci);
3216*4882a593Smuzhiyun 	sbridge_dev->mci = NULL;
3217*4882a593Smuzhiyun }
3218*4882a593Smuzhiyun 
sbridge_register_mci(struct sbridge_dev * sbridge_dev,enum type type)3219*4882a593Smuzhiyun static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type)
3220*4882a593Smuzhiyun {
3221*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
3222*4882a593Smuzhiyun 	struct edac_mc_layer layers[2];
3223*4882a593Smuzhiyun 	struct sbridge_pvt *pvt;
3224*4882a593Smuzhiyun 	struct pci_dev *pdev = sbridge_dev->pdev[0];
3225*4882a593Smuzhiyun 	int rc;
3226*4882a593Smuzhiyun 
3227*4882a593Smuzhiyun 	/* allocate a new MC control structure */
3228*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
3229*4882a593Smuzhiyun 	layers[0].size = type == KNIGHTS_LANDING ?
3230*4882a593Smuzhiyun 		KNL_MAX_CHANNELS : NUM_CHANNELS;
3231*4882a593Smuzhiyun 	layers[0].is_virt_csrow = false;
3232*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_SLOT;
3233*4882a593Smuzhiyun 	layers[1].size = type == KNIGHTS_LANDING ? 1 : MAX_DIMMS;
3234*4882a593Smuzhiyun 	layers[1].is_virt_csrow = true;
3235*4882a593Smuzhiyun 	mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
3236*4882a593Smuzhiyun 			    sizeof(*pvt));
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 	if (unlikely(!mci))
3239*4882a593Smuzhiyun 		return -ENOMEM;
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 	edac_dbg(0, "MC: mci = %p, dev = %p\n",
3242*4882a593Smuzhiyun 		 mci, &pdev->dev);
3243*4882a593Smuzhiyun 
3244*4882a593Smuzhiyun 	pvt = mci->pvt_info;
3245*4882a593Smuzhiyun 	memset(pvt, 0, sizeof(*pvt));
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun 	/* Associate sbridge_dev and mci for future usage */
3248*4882a593Smuzhiyun 	pvt->sbridge_dev = sbridge_dev;
3249*4882a593Smuzhiyun 	sbridge_dev->mci = mci;
3250*4882a593Smuzhiyun 
3251*4882a593Smuzhiyun 	mci->mtype_cap = type == KNIGHTS_LANDING ?
3252*4882a593Smuzhiyun 		MEM_FLAG_DDR4 : MEM_FLAG_DDR3;
3253*4882a593Smuzhiyun 	mci->edac_ctl_cap = EDAC_FLAG_NONE;
3254*4882a593Smuzhiyun 	mci->edac_cap = EDAC_FLAG_NONE;
3255*4882a593Smuzhiyun 	mci->mod_name = EDAC_MOD_STR;
3256*4882a593Smuzhiyun 	mci->dev_name = pci_name(pdev);
3257*4882a593Smuzhiyun 	mci->ctl_page_to_phys = NULL;
3258*4882a593Smuzhiyun 
3259*4882a593Smuzhiyun 	pvt->info.type = type;
3260*4882a593Smuzhiyun 	switch (type) {
3261*4882a593Smuzhiyun 	case IVY_BRIDGE:
3262*4882a593Smuzhiyun 		pvt->info.rankcfgr = IB_RANK_CFG_A;
3263*4882a593Smuzhiyun 		pvt->info.get_tolm = ibridge_get_tolm;
3264*4882a593Smuzhiyun 		pvt->info.get_tohm = ibridge_get_tohm;
3265*4882a593Smuzhiyun 		pvt->info.dram_rule = ibridge_dram_rule;
3266*4882a593Smuzhiyun 		pvt->info.get_memory_type = get_memory_type;
3267*4882a593Smuzhiyun 		pvt->info.get_node_id = get_node_id;
3268*4882a593Smuzhiyun 		pvt->info.get_ha = ibridge_get_ha;
3269*4882a593Smuzhiyun 		pvt->info.rir_limit = rir_limit;
3270*4882a593Smuzhiyun 		pvt->info.sad_limit = sad_limit;
3271*4882a593Smuzhiyun 		pvt->info.interleave_mode = interleave_mode;
3272*4882a593Smuzhiyun 		pvt->info.dram_attr = dram_attr;
3273*4882a593Smuzhiyun 		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3274*4882a593Smuzhiyun 		pvt->info.interleave_list = ibridge_interleave_list;
3275*4882a593Smuzhiyun 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3276*4882a593Smuzhiyun 		pvt->info.get_width = ibridge_get_width;
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun 		/* Store pci devices at mci for faster access */
3279*4882a593Smuzhiyun 		rc = ibridge_mci_bind_devs(mci, sbridge_dev);
3280*4882a593Smuzhiyun 		if (unlikely(rc < 0))
3281*4882a593Smuzhiyun 			goto fail0;
3282*4882a593Smuzhiyun 		get_source_id(mci);
3283*4882a593Smuzhiyun 		mci->ctl_name = kasprintf(GFP_KERNEL, "Ivy Bridge SrcID#%d_Ha#%d",
3284*4882a593Smuzhiyun 			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3285*4882a593Smuzhiyun 		break;
3286*4882a593Smuzhiyun 	case SANDY_BRIDGE:
3287*4882a593Smuzhiyun 		pvt->info.rankcfgr = SB_RANK_CFG_A;
3288*4882a593Smuzhiyun 		pvt->info.get_tolm = sbridge_get_tolm;
3289*4882a593Smuzhiyun 		pvt->info.get_tohm = sbridge_get_tohm;
3290*4882a593Smuzhiyun 		pvt->info.dram_rule = sbridge_dram_rule;
3291*4882a593Smuzhiyun 		pvt->info.get_memory_type = get_memory_type;
3292*4882a593Smuzhiyun 		pvt->info.get_node_id = get_node_id;
3293*4882a593Smuzhiyun 		pvt->info.get_ha = sbridge_get_ha;
3294*4882a593Smuzhiyun 		pvt->info.rir_limit = rir_limit;
3295*4882a593Smuzhiyun 		pvt->info.sad_limit = sad_limit;
3296*4882a593Smuzhiyun 		pvt->info.interleave_mode = interleave_mode;
3297*4882a593Smuzhiyun 		pvt->info.dram_attr = dram_attr;
3298*4882a593Smuzhiyun 		pvt->info.max_sad = ARRAY_SIZE(sbridge_dram_rule);
3299*4882a593Smuzhiyun 		pvt->info.interleave_list = sbridge_interleave_list;
3300*4882a593Smuzhiyun 		pvt->info.interleave_pkg = sbridge_interleave_pkg;
3301*4882a593Smuzhiyun 		pvt->info.get_width = sbridge_get_width;
3302*4882a593Smuzhiyun 
3303*4882a593Smuzhiyun 		/* Store pci devices at mci for faster access */
3304*4882a593Smuzhiyun 		rc = sbridge_mci_bind_devs(mci, sbridge_dev);
3305*4882a593Smuzhiyun 		if (unlikely(rc < 0))
3306*4882a593Smuzhiyun 			goto fail0;
3307*4882a593Smuzhiyun 		get_source_id(mci);
3308*4882a593Smuzhiyun 		mci->ctl_name = kasprintf(GFP_KERNEL, "Sandy Bridge SrcID#%d_Ha#%d",
3309*4882a593Smuzhiyun 			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3310*4882a593Smuzhiyun 		break;
3311*4882a593Smuzhiyun 	case HASWELL:
3312*4882a593Smuzhiyun 		/* rankcfgr isn't used */
3313*4882a593Smuzhiyun 		pvt->info.get_tolm = haswell_get_tolm;
3314*4882a593Smuzhiyun 		pvt->info.get_tohm = haswell_get_tohm;
3315*4882a593Smuzhiyun 		pvt->info.dram_rule = ibridge_dram_rule;
3316*4882a593Smuzhiyun 		pvt->info.get_memory_type = haswell_get_memory_type;
3317*4882a593Smuzhiyun 		pvt->info.get_node_id = haswell_get_node_id;
3318*4882a593Smuzhiyun 		pvt->info.get_ha = ibridge_get_ha;
3319*4882a593Smuzhiyun 		pvt->info.rir_limit = haswell_rir_limit;
3320*4882a593Smuzhiyun 		pvt->info.sad_limit = sad_limit;
3321*4882a593Smuzhiyun 		pvt->info.interleave_mode = interleave_mode;
3322*4882a593Smuzhiyun 		pvt->info.dram_attr = dram_attr;
3323*4882a593Smuzhiyun 		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3324*4882a593Smuzhiyun 		pvt->info.interleave_list = ibridge_interleave_list;
3325*4882a593Smuzhiyun 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3326*4882a593Smuzhiyun 		pvt->info.get_width = ibridge_get_width;
3327*4882a593Smuzhiyun 
3328*4882a593Smuzhiyun 		/* Store pci devices at mci for faster access */
3329*4882a593Smuzhiyun 		rc = haswell_mci_bind_devs(mci, sbridge_dev);
3330*4882a593Smuzhiyun 		if (unlikely(rc < 0))
3331*4882a593Smuzhiyun 			goto fail0;
3332*4882a593Smuzhiyun 		get_source_id(mci);
3333*4882a593Smuzhiyun 		mci->ctl_name = kasprintf(GFP_KERNEL, "Haswell SrcID#%d_Ha#%d",
3334*4882a593Smuzhiyun 			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3335*4882a593Smuzhiyun 		break;
3336*4882a593Smuzhiyun 	case BROADWELL:
3337*4882a593Smuzhiyun 		/* rankcfgr isn't used */
3338*4882a593Smuzhiyun 		pvt->info.get_tolm = haswell_get_tolm;
3339*4882a593Smuzhiyun 		pvt->info.get_tohm = haswell_get_tohm;
3340*4882a593Smuzhiyun 		pvt->info.dram_rule = ibridge_dram_rule;
3341*4882a593Smuzhiyun 		pvt->info.get_memory_type = haswell_get_memory_type;
3342*4882a593Smuzhiyun 		pvt->info.get_node_id = haswell_get_node_id;
3343*4882a593Smuzhiyun 		pvt->info.get_ha = ibridge_get_ha;
3344*4882a593Smuzhiyun 		pvt->info.rir_limit = haswell_rir_limit;
3345*4882a593Smuzhiyun 		pvt->info.sad_limit = sad_limit;
3346*4882a593Smuzhiyun 		pvt->info.interleave_mode = interleave_mode;
3347*4882a593Smuzhiyun 		pvt->info.dram_attr = dram_attr;
3348*4882a593Smuzhiyun 		pvt->info.max_sad = ARRAY_SIZE(ibridge_dram_rule);
3349*4882a593Smuzhiyun 		pvt->info.interleave_list = ibridge_interleave_list;
3350*4882a593Smuzhiyun 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3351*4882a593Smuzhiyun 		pvt->info.get_width = broadwell_get_width;
3352*4882a593Smuzhiyun 
3353*4882a593Smuzhiyun 		/* Store pci devices at mci for faster access */
3354*4882a593Smuzhiyun 		rc = broadwell_mci_bind_devs(mci, sbridge_dev);
3355*4882a593Smuzhiyun 		if (unlikely(rc < 0))
3356*4882a593Smuzhiyun 			goto fail0;
3357*4882a593Smuzhiyun 		get_source_id(mci);
3358*4882a593Smuzhiyun 		mci->ctl_name = kasprintf(GFP_KERNEL, "Broadwell SrcID#%d_Ha#%d",
3359*4882a593Smuzhiyun 			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3360*4882a593Smuzhiyun 		break;
3361*4882a593Smuzhiyun 	case KNIGHTS_LANDING:
3362*4882a593Smuzhiyun 		/* pvt->info.rankcfgr == ??? */
3363*4882a593Smuzhiyun 		pvt->info.get_tolm = knl_get_tolm;
3364*4882a593Smuzhiyun 		pvt->info.get_tohm = knl_get_tohm;
3365*4882a593Smuzhiyun 		pvt->info.dram_rule = knl_dram_rule;
3366*4882a593Smuzhiyun 		pvt->info.get_memory_type = knl_get_memory_type;
3367*4882a593Smuzhiyun 		pvt->info.get_node_id = knl_get_node_id;
3368*4882a593Smuzhiyun 		pvt->info.get_ha = knl_get_ha;
3369*4882a593Smuzhiyun 		pvt->info.rir_limit = NULL;
3370*4882a593Smuzhiyun 		pvt->info.sad_limit = knl_sad_limit;
3371*4882a593Smuzhiyun 		pvt->info.interleave_mode = knl_interleave_mode;
3372*4882a593Smuzhiyun 		pvt->info.dram_attr = dram_attr_knl;
3373*4882a593Smuzhiyun 		pvt->info.max_sad = ARRAY_SIZE(knl_dram_rule);
3374*4882a593Smuzhiyun 		pvt->info.interleave_list = knl_interleave_list;
3375*4882a593Smuzhiyun 		pvt->info.interleave_pkg = ibridge_interleave_pkg;
3376*4882a593Smuzhiyun 		pvt->info.get_width = knl_get_width;
3377*4882a593Smuzhiyun 
3378*4882a593Smuzhiyun 		rc = knl_mci_bind_devs(mci, sbridge_dev);
3379*4882a593Smuzhiyun 		if (unlikely(rc < 0))
3380*4882a593Smuzhiyun 			goto fail0;
3381*4882a593Smuzhiyun 		get_source_id(mci);
3382*4882a593Smuzhiyun 		mci->ctl_name = kasprintf(GFP_KERNEL, "Knights Landing SrcID#%d_Ha#%d",
3383*4882a593Smuzhiyun 			pvt->sbridge_dev->source_id, pvt->sbridge_dev->dom);
3384*4882a593Smuzhiyun 		break;
3385*4882a593Smuzhiyun 	}
3386*4882a593Smuzhiyun 
3387*4882a593Smuzhiyun 	if (!mci->ctl_name) {
3388*4882a593Smuzhiyun 		rc = -ENOMEM;
3389*4882a593Smuzhiyun 		goto fail0;
3390*4882a593Smuzhiyun 	}
3391*4882a593Smuzhiyun 
3392*4882a593Smuzhiyun 	/* Get dimm basic config and the memory layout */
3393*4882a593Smuzhiyun 	rc = get_dimm_config(mci);
3394*4882a593Smuzhiyun 	if (rc < 0) {
3395*4882a593Smuzhiyun 		edac_dbg(0, "MC: failed to get_dimm_config()\n");
3396*4882a593Smuzhiyun 		goto fail;
3397*4882a593Smuzhiyun 	}
3398*4882a593Smuzhiyun 	get_memory_layout(mci);
3399*4882a593Smuzhiyun 
3400*4882a593Smuzhiyun 	/* record ptr to the generic device */
3401*4882a593Smuzhiyun 	mci->pdev = &pdev->dev;
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 	/* add this new MC control structure to EDAC's list of MCs */
3404*4882a593Smuzhiyun 	if (unlikely(edac_mc_add_mc(mci))) {
3405*4882a593Smuzhiyun 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
3406*4882a593Smuzhiyun 		rc = -EINVAL;
3407*4882a593Smuzhiyun 		goto fail;
3408*4882a593Smuzhiyun 	}
3409*4882a593Smuzhiyun 
3410*4882a593Smuzhiyun 	return 0;
3411*4882a593Smuzhiyun 
3412*4882a593Smuzhiyun fail:
3413*4882a593Smuzhiyun 	kfree(mci->ctl_name);
3414*4882a593Smuzhiyun fail0:
3415*4882a593Smuzhiyun 	edac_mc_free(mci);
3416*4882a593Smuzhiyun 	sbridge_dev->mci = NULL;
3417*4882a593Smuzhiyun 	return rc;
3418*4882a593Smuzhiyun }
3419*4882a593Smuzhiyun 
3420*4882a593Smuzhiyun static const struct x86_cpu_id sbridge_cpuids[] = {
3421*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(SANDYBRIDGE_X, &pci_dev_descr_sbridge_table),
3422*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X,	  &pci_dev_descr_ibridge_table),
3423*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X,	  &pci_dev_descr_haswell_table),
3424*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X,	  &pci_dev_descr_broadwell_table),
3425*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D,	  &pci_dev_descr_broadwell_table),
3426*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL,  &pci_dev_descr_knl_table),
3427*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM,  &pci_dev_descr_knl_table),
3428*4882a593Smuzhiyun 	{ }
3429*4882a593Smuzhiyun };
3430*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, sbridge_cpuids);
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun /*
3433*4882a593Smuzhiyun  *	sbridge_probe	Get all devices and register memory controllers
3434*4882a593Smuzhiyun  *			present.
3435*4882a593Smuzhiyun  *	return:
3436*4882a593Smuzhiyun  *		0 for FOUND a device
3437*4882a593Smuzhiyun  *		< 0 for error code
3438*4882a593Smuzhiyun  */
3439*4882a593Smuzhiyun 
sbridge_probe(const struct x86_cpu_id * id)3440*4882a593Smuzhiyun static int sbridge_probe(const struct x86_cpu_id *id)
3441*4882a593Smuzhiyun {
3442*4882a593Smuzhiyun 	int rc = -ENODEV;
3443*4882a593Smuzhiyun 	u8 mc, num_mc = 0;
3444*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev;
3445*4882a593Smuzhiyun 	struct pci_id_table *ptable = (struct pci_id_table *)id->driver_data;
3446*4882a593Smuzhiyun 
3447*4882a593Smuzhiyun 	/* get the pci devices we want to reserve for our use */
3448*4882a593Smuzhiyun 	rc = sbridge_get_all_devices(&num_mc, ptable);
3449*4882a593Smuzhiyun 
3450*4882a593Smuzhiyun 	if (unlikely(rc < 0)) {
3451*4882a593Smuzhiyun 		edac_dbg(0, "couldn't get all devices\n");
3452*4882a593Smuzhiyun 		goto fail0;
3453*4882a593Smuzhiyun 	}
3454*4882a593Smuzhiyun 
3455*4882a593Smuzhiyun 	mc = 0;
3456*4882a593Smuzhiyun 
3457*4882a593Smuzhiyun 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list) {
3458*4882a593Smuzhiyun 		edac_dbg(0, "Registering MC#%d (%d of %d)\n",
3459*4882a593Smuzhiyun 			 mc, mc + 1, num_mc);
3460*4882a593Smuzhiyun 
3461*4882a593Smuzhiyun 		sbridge_dev->mc = mc++;
3462*4882a593Smuzhiyun 		rc = sbridge_register_mci(sbridge_dev, ptable->type);
3463*4882a593Smuzhiyun 		if (unlikely(rc < 0))
3464*4882a593Smuzhiyun 			goto fail1;
3465*4882a593Smuzhiyun 	}
3466*4882a593Smuzhiyun 
3467*4882a593Smuzhiyun 	sbridge_printk(KERN_INFO, "%s\n", SBRIDGE_REVISION);
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun 	return 0;
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun fail1:
3472*4882a593Smuzhiyun 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3473*4882a593Smuzhiyun 		sbridge_unregister_mci(sbridge_dev);
3474*4882a593Smuzhiyun 
3475*4882a593Smuzhiyun 	sbridge_put_all_devices();
3476*4882a593Smuzhiyun fail0:
3477*4882a593Smuzhiyun 	return rc;
3478*4882a593Smuzhiyun }
3479*4882a593Smuzhiyun 
3480*4882a593Smuzhiyun /*
3481*4882a593Smuzhiyun  *	sbridge_remove	cleanup
3482*4882a593Smuzhiyun  *
3483*4882a593Smuzhiyun  */
sbridge_remove(void)3484*4882a593Smuzhiyun static void sbridge_remove(void)
3485*4882a593Smuzhiyun {
3486*4882a593Smuzhiyun 	struct sbridge_dev *sbridge_dev;
3487*4882a593Smuzhiyun 
3488*4882a593Smuzhiyun 	edac_dbg(0, "\n");
3489*4882a593Smuzhiyun 
3490*4882a593Smuzhiyun 	list_for_each_entry(sbridge_dev, &sbridge_edac_list, list)
3491*4882a593Smuzhiyun 		sbridge_unregister_mci(sbridge_dev);
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 	/* Release PCI resources */
3494*4882a593Smuzhiyun 	sbridge_put_all_devices();
3495*4882a593Smuzhiyun }
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun /*
3498*4882a593Smuzhiyun  *	sbridge_init		Module entry function
3499*4882a593Smuzhiyun  *			Try to initialize this module for its devices
3500*4882a593Smuzhiyun  */
sbridge_init(void)3501*4882a593Smuzhiyun static int __init sbridge_init(void)
3502*4882a593Smuzhiyun {
3503*4882a593Smuzhiyun 	const struct x86_cpu_id *id;
3504*4882a593Smuzhiyun 	const char *owner;
3505*4882a593Smuzhiyun 	int rc;
3506*4882a593Smuzhiyun 
3507*4882a593Smuzhiyun 	edac_dbg(2, "\n");
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun 	owner = edac_get_owner();
3510*4882a593Smuzhiyun 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
3511*4882a593Smuzhiyun 		return -EBUSY;
3512*4882a593Smuzhiyun 
3513*4882a593Smuzhiyun 	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
3514*4882a593Smuzhiyun 		return -ENODEV;
3515*4882a593Smuzhiyun 
3516*4882a593Smuzhiyun 	id = x86_match_cpu(sbridge_cpuids);
3517*4882a593Smuzhiyun 	if (!id)
3518*4882a593Smuzhiyun 		return -ENODEV;
3519*4882a593Smuzhiyun 
3520*4882a593Smuzhiyun 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
3521*4882a593Smuzhiyun 	opstate_init();
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 	rc = sbridge_probe(id);
3524*4882a593Smuzhiyun 
3525*4882a593Smuzhiyun 	if (rc >= 0) {
3526*4882a593Smuzhiyun 		mce_register_decode_chain(&sbridge_mce_dec);
3527*4882a593Smuzhiyun 		return 0;
3528*4882a593Smuzhiyun 	}
3529*4882a593Smuzhiyun 
3530*4882a593Smuzhiyun 	sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
3531*4882a593Smuzhiyun 		      rc);
3532*4882a593Smuzhiyun 
3533*4882a593Smuzhiyun 	return rc;
3534*4882a593Smuzhiyun }
3535*4882a593Smuzhiyun 
3536*4882a593Smuzhiyun /*
3537*4882a593Smuzhiyun  *	sbridge_exit()	Module exit function
3538*4882a593Smuzhiyun  *			Unregister the driver
3539*4882a593Smuzhiyun  */
sbridge_exit(void)3540*4882a593Smuzhiyun static void __exit sbridge_exit(void)
3541*4882a593Smuzhiyun {
3542*4882a593Smuzhiyun 	edac_dbg(2, "\n");
3543*4882a593Smuzhiyun 	sbridge_remove();
3544*4882a593Smuzhiyun 	mce_unregister_decode_chain(&sbridge_mce_dec);
3545*4882a593Smuzhiyun }
3546*4882a593Smuzhiyun 
3547*4882a593Smuzhiyun module_init(sbridge_init);
3548*4882a593Smuzhiyun module_exit(sbridge_exit);
3549*4882a593Smuzhiyun 
3550*4882a593Smuzhiyun module_param(edac_op_state, int, 0444);
3551*4882a593Smuzhiyun MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
3552*4882a593Smuzhiyun 
3553*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3554*4882a593Smuzhiyun MODULE_AUTHOR("Mauro Carvalho Chehab");
3555*4882a593Smuzhiyun MODULE_AUTHOR("Red Hat Inc. (https://www.redhat.com)");
3556*4882a593Smuzhiyun MODULE_DESCRIPTION("MC Driver for Intel Sandy Bridge and Ivy Bridge memory controllers - "
3557*4882a593Smuzhiyun 		   SBRIDGE_REVISION);
3558