xref: /OK3568_Linux_fs/kernel/drivers/edac/pnd2_edac.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for Pondicherry2 memory controller.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2016, Intel Corporation.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * [Derived from sb_edac.c]
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Translation of system physical addresses to DIMM addresses
10*4882a593Smuzhiyun  * is a two stage process:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  * First the Pondicherry 2 memory controller handles slice and channel interleaving
13*4882a593Smuzhiyun  * in "sys2pmi()". This is (almost) completley common between platforms.
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  * Then a platform specific dunit (DIMM unit) completes the process to provide DIMM,
16*4882a593Smuzhiyun  * rank, bank, row and column using the appropriate "dunit_ops" functions/parameters.
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/init.h>
21*4882a593Smuzhiyun #include <linux/pci.h>
22*4882a593Smuzhiyun #include <linux/pci_ids.h>
23*4882a593Smuzhiyun #include <linux/slab.h>
24*4882a593Smuzhiyun #include <linux/delay.h>
25*4882a593Smuzhiyun #include <linux/edac.h>
26*4882a593Smuzhiyun #include <linux/mmzone.h>
27*4882a593Smuzhiyun #include <linux/smp.h>
28*4882a593Smuzhiyun #include <linux/bitmap.h>
29*4882a593Smuzhiyun #include <linux/math64.h>
30*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
31*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
32*4882a593Smuzhiyun #include <asm/intel-family.h>
33*4882a593Smuzhiyun #include <asm/processor.h>
34*4882a593Smuzhiyun #include <asm/mce.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #include "edac_mc.h"
37*4882a593Smuzhiyun #include "edac_module.h"
38*4882a593Smuzhiyun #include "pnd2_edac.h"
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define EDAC_MOD_STR		"pnd2_edac"
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define APL_NUM_CHANNELS	4
43*4882a593Smuzhiyun #define DNV_NUM_CHANNELS	2
44*4882a593Smuzhiyun #define DNV_MAX_DIMMS		2 /* Max DIMMs per channel */
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun enum type {
47*4882a593Smuzhiyun 	APL,
48*4882a593Smuzhiyun 	DNV, /* All requests go to PMI CH0 on each slice (CH1 disabled) */
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun struct dram_addr {
52*4882a593Smuzhiyun 	int chan;
53*4882a593Smuzhiyun 	int dimm;
54*4882a593Smuzhiyun 	int rank;
55*4882a593Smuzhiyun 	int bank;
56*4882a593Smuzhiyun 	int row;
57*4882a593Smuzhiyun 	int col;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun struct pnd2_pvt {
61*4882a593Smuzhiyun 	int dimm_geom[APL_NUM_CHANNELS];
62*4882a593Smuzhiyun 	u64 tolm, tohm;
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * System address space is divided into multiple regions with
67*4882a593Smuzhiyun  * different interleave rules in each. The as0/as1 regions
68*4882a593Smuzhiyun  * have no interleaving at all. The as2 region is interleaved
69*4882a593Smuzhiyun  * between two channels. The mot region is magic and may overlap
70*4882a593Smuzhiyun  * other regions, with its interleave rules taking precedence.
71*4882a593Smuzhiyun  * Addresses not in any of these regions are interleaved across
72*4882a593Smuzhiyun  * all four channels.
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun static struct region {
75*4882a593Smuzhiyun 	u64	base;
76*4882a593Smuzhiyun 	u64	limit;
77*4882a593Smuzhiyun 	u8	enabled;
78*4882a593Smuzhiyun } mot, as0, as1, as2;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun static struct dunit_ops {
81*4882a593Smuzhiyun 	char *name;
82*4882a593Smuzhiyun 	enum type type;
83*4882a593Smuzhiyun 	int pmiaddr_shift;
84*4882a593Smuzhiyun 	int pmiidx_shift;
85*4882a593Smuzhiyun 	int channels;
86*4882a593Smuzhiyun 	int dimms_per_channel;
87*4882a593Smuzhiyun 	int (*rd_reg)(int port, int off, int op, void *data, size_t sz, char *name);
88*4882a593Smuzhiyun 	int (*get_registers)(void);
89*4882a593Smuzhiyun 	int (*check_ecc)(void);
90*4882a593Smuzhiyun 	void (*mk_region)(char *name, struct region *rp, void *asym);
91*4882a593Smuzhiyun 	void (*get_dimm_config)(struct mem_ctl_info *mci);
92*4882a593Smuzhiyun 	int (*pmi2mem)(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
93*4882a593Smuzhiyun 				   struct dram_addr *daddr, char *msg);
94*4882a593Smuzhiyun } *ops;
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun static struct mem_ctl_info *pnd2_mci;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define PND2_MSG_SIZE	256
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun /* Debug macros */
101*4882a593Smuzhiyun #define pnd2_printk(level, fmt, arg...)			\
102*4882a593Smuzhiyun 	edac_printk(level, "pnd2", fmt, ##arg)
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun #define pnd2_mc_printk(mci, level, fmt, arg...)	\
105*4882a593Smuzhiyun 	edac_mc_chipset_printk(mci, level, "pnd2", fmt, ##arg)
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun #define MOT_CHAN_INTLV_BIT_1SLC_2CH 12
108*4882a593Smuzhiyun #define MOT_CHAN_INTLV_BIT_2SLC_2CH 13
109*4882a593Smuzhiyun #define SELECTOR_DISABLED (-1)
110*4882a593Smuzhiyun #define _4GB (1ul << 32)
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun #define PMI_ADDRESS_WIDTH	31
113*4882a593Smuzhiyun #define PND_MAX_PHYS_BIT	39
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define APL_ASYMSHIFT		28
116*4882a593Smuzhiyun #define DNV_ASYMSHIFT		31
117*4882a593Smuzhiyun #define CH_HASH_MASK_LSB	6
118*4882a593Smuzhiyun #define SLICE_HASH_MASK_LSB	6
119*4882a593Smuzhiyun #define MOT_SLC_INTLV_BIT	12
120*4882a593Smuzhiyun #define LOG2_PMI_ADDR_GRANULARITY	5
121*4882a593Smuzhiyun #define MOT_SHIFT	24
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun #define GET_BITFIELD(v, lo, hi)	(((v) & GENMASK_ULL(hi, lo)) >> (lo))
124*4882a593Smuzhiyun #define U64_LSHIFT(val, s)	((u64)(val) << (s))
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /*
127*4882a593Smuzhiyun  * On Apollo Lake we access memory controller registers via a
128*4882a593Smuzhiyun  * side-band mailbox style interface in a hidden PCI device
129*4882a593Smuzhiyun  * configuration space.
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun static struct pci_bus	*p2sb_bus;
132*4882a593Smuzhiyun #define P2SB_DEVFN	PCI_DEVFN(0xd, 0)
133*4882a593Smuzhiyun #define P2SB_ADDR_OFF	0xd0
134*4882a593Smuzhiyun #define P2SB_DATA_OFF	0xd4
135*4882a593Smuzhiyun #define P2SB_STAT_OFF	0xd8
136*4882a593Smuzhiyun #define P2SB_ROUT_OFF	0xda
137*4882a593Smuzhiyun #define P2SB_EADD_OFF	0xdc
138*4882a593Smuzhiyun #define P2SB_HIDE_OFF	0xe1
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #define P2SB_BUSY	1
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun #define P2SB_READ(size, off, ptr) \
143*4882a593Smuzhiyun 	pci_bus_read_config_##size(p2sb_bus, P2SB_DEVFN, off, ptr)
144*4882a593Smuzhiyun #define P2SB_WRITE(size, off, val) \
145*4882a593Smuzhiyun 	pci_bus_write_config_##size(p2sb_bus, P2SB_DEVFN, off, val)
146*4882a593Smuzhiyun 
p2sb_is_busy(u16 * status)147*4882a593Smuzhiyun static bool p2sb_is_busy(u16 *status)
148*4882a593Smuzhiyun {
149*4882a593Smuzhiyun 	P2SB_READ(word, P2SB_STAT_OFF, status);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	return !!(*status & P2SB_BUSY);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
_apl_rd_reg(int port,int off,int op,u32 * data)154*4882a593Smuzhiyun static int _apl_rd_reg(int port, int off, int op, u32 *data)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	int retries = 0xff, ret;
157*4882a593Smuzhiyun 	u16 status;
158*4882a593Smuzhiyun 	u8 hidden;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	/* Unhide the P2SB device, if it's hidden */
161*4882a593Smuzhiyun 	P2SB_READ(byte, P2SB_HIDE_OFF, &hidden);
162*4882a593Smuzhiyun 	if (hidden)
163*4882a593Smuzhiyun 		P2SB_WRITE(byte, P2SB_HIDE_OFF, 0);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	if (p2sb_is_busy(&status)) {
166*4882a593Smuzhiyun 		ret = -EAGAIN;
167*4882a593Smuzhiyun 		goto out;
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	P2SB_WRITE(dword, P2SB_ADDR_OFF, (port << 24) | off);
171*4882a593Smuzhiyun 	P2SB_WRITE(dword, P2SB_DATA_OFF, 0);
172*4882a593Smuzhiyun 	P2SB_WRITE(dword, P2SB_EADD_OFF, 0);
173*4882a593Smuzhiyun 	P2SB_WRITE(word, P2SB_ROUT_OFF, 0);
174*4882a593Smuzhiyun 	P2SB_WRITE(word, P2SB_STAT_OFF, (op << 8) | P2SB_BUSY);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	while (p2sb_is_busy(&status)) {
177*4882a593Smuzhiyun 		if (retries-- == 0) {
178*4882a593Smuzhiyun 			ret = -EBUSY;
179*4882a593Smuzhiyun 			goto out;
180*4882a593Smuzhiyun 		}
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	P2SB_READ(dword, P2SB_DATA_OFF, data);
184*4882a593Smuzhiyun 	ret = (status >> 1) & 0x3;
185*4882a593Smuzhiyun out:
186*4882a593Smuzhiyun 	/* Hide the P2SB device, if it was hidden before */
187*4882a593Smuzhiyun 	if (hidden)
188*4882a593Smuzhiyun 		P2SB_WRITE(byte, P2SB_HIDE_OFF, hidden);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return ret;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
apl_rd_reg(int port,int off,int op,void * data,size_t sz,char * name)193*4882a593Smuzhiyun static int apl_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun 	int ret = 0;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	edac_dbg(2, "Read %s port=%x off=%x op=%x\n", name, port, off, op);
198*4882a593Smuzhiyun 	switch (sz) {
199*4882a593Smuzhiyun 	case 8:
200*4882a593Smuzhiyun 		ret = _apl_rd_reg(port, off + 4, op, (u32 *)(data + 4));
201*4882a593Smuzhiyun 		fallthrough;
202*4882a593Smuzhiyun 	case 4:
203*4882a593Smuzhiyun 		ret |= _apl_rd_reg(port, off, op, (u32 *)data);
204*4882a593Smuzhiyun 		pnd2_printk(KERN_DEBUG, "%s=%x%08x ret=%d\n", name,
205*4882a593Smuzhiyun 					sz == 8 ? *((u32 *)(data + 4)) : 0, *((u32 *)data), ret);
206*4882a593Smuzhiyun 		break;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	return ret;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
get_mem_ctrl_hub_base_addr(void)212*4882a593Smuzhiyun static u64 get_mem_ctrl_hub_base_addr(void)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct b_cr_mchbar_lo_pci lo;
215*4882a593Smuzhiyun 	struct b_cr_mchbar_hi_pci hi;
216*4882a593Smuzhiyun 	struct pci_dev *pdev;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
219*4882a593Smuzhiyun 	if (pdev) {
220*4882a593Smuzhiyun 		pci_read_config_dword(pdev, 0x48, (u32 *)&lo);
221*4882a593Smuzhiyun 		pci_read_config_dword(pdev, 0x4c, (u32 *)&hi);
222*4882a593Smuzhiyun 		pci_dev_put(pdev);
223*4882a593Smuzhiyun 	} else {
224*4882a593Smuzhiyun 		return 0;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (!lo.enable) {
228*4882a593Smuzhiyun 		edac_dbg(2, "MMIO via memory controller hub base address is disabled!\n");
229*4882a593Smuzhiyun 		return 0;
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	return U64_LSHIFT(hi.base, 32) | U64_LSHIFT(lo.base, 15);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
get_sideband_reg_base_addr(void)235*4882a593Smuzhiyun static u64 get_sideband_reg_base_addr(void)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	struct pci_dev *pdev;
238*4882a593Smuzhiyun 	u32 hi, lo;
239*4882a593Smuzhiyun 	u8 hidden;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x19dd, NULL);
242*4882a593Smuzhiyun 	if (pdev) {
243*4882a593Smuzhiyun 		/* Unhide the P2SB device, if it's hidden */
244*4882a593Smuzhiyun 		pci_read_config_byte(pdev, 0xe1, &hidden);
245*4882a593Smuzhiyun 		if (hidden)
246*4882a593Smuzhiyun 			pci_write_config_byte(pdev, 0xe1, 0);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 		pci_read_config_dword(pdev, 0x10, &lo);
249*4882a593Smuzhiyun 		pci_read_config_dword(pdev, 0x14, &hi);
250*4882a593Smuzhiyun 		lo &= 0xfffffff0;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 		/* Hide the P2SB device, if it was hidden before */
253*4882a593Smuzhiyun 		if (hidden)
254*4882a593Smuzhiyun 			pci_write_config_byte(pdev, 0xe1, hidden);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		pci_dev_put(pdev);
257*4882a593Smuzhiyun 		return (U64_LSHIFT(hi, 32) | U64_LSHIFT(lo, 0));
258*4882a593Smuzhiyun 	} else {
259*4882a593Smuzhiyun 		return 0xfd000000;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun #define DNV_MCHBAR_SIZE  0x8000
264*4882a593Smuzhiyun #define DNV_SB_PORT_SIZE 0x10000
dnv_rd_reg(int port,int off,int op,void * data,size_t sz,char * name)265*4882a593Smuzhiyun static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct pci_dev *pdev;
268*4882a593Smuzhiyun 	char *base;
269*4882a593Smuzhiyun 	u64 addr;
270*4882a593Smuzhiyun 	unsigned long size;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (op == 4) {
273*4882a593Smuzhiyun 		pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
274*4882a593Smuzhiyun 		if (!pdev)
275*4882a593Smuzhiyun 			return -ENODEV;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 		pci_read_config_dword(pdev, off, data);
278*4882a593Smuzhiyun 		pci_dev_put(pdev);
279*4882a593Smuzhiyun 	} else {
280*4882a593Smuzhiyun 		/* MMIO via memory controller hub base address */
281*4882a593Smuzhiyun 		if (op == 0 && port == 0x4c) {
282*4882a593Smuzhiyun 			addr = get_mem_ctrl_hub_base_addr();
283*4882a593Smuzhiyun 			if (!addr)
284*4882a593Smuzhiyun 				return -ENODEV;
285*4882a593Smuzhiyun 			size = DNV_MCHBAR_SIZE;
286*4882a593Smuzhiyun 		} else {
287*4882a593Smuzhiyun 			/* MMIO via sideband register base address */
288*4882a593Smuzhiyun 			addr = get_sideband_reg_base_addr();
289*4882a593Smuzhiyun 			if (!addr)
290*4882a593Smuzhiyun 				return -ENODEV;
291*4882a593Smuzhiyun 			addr += (port << 16);
292*4882a593Smuzhiyun 			size = DNV_SB_PORT_SIZE;
293*4882a593Smuzhiyun 		}
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		base = ioremap((resource_size_t)addr, size);
296*4882a593Smuzhiyun 		if (!base)
297*4882a593Smuzhiyun 			return -ENODEV;
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 		if (sz == 8)
300*4882a593Smuzhiyun 			*(u32 *)(data + 4) = *(u32 *)(base + off + 4);
301*4882a593Smuzhiyun 		*(u32 *)data = *(u32 *)(base + off);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		iounmap(base);
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	edac_dbg(2, "Read %s=%.8x_%.8x\n", name,
307*4882a593Smuzhiyun 			(sz == 8) ? *(u32 *)(data + 4) : 0, *(u32 *)data);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun #define RD_REGP(regp, regname, port)	\
313*4882a593Smuzhiyun 	ops->rd_reg(port,					\
314*4882a593Smuzhiyun 		regname##_offset,				\
315*4882a593Smuzhiyun 		regname##_r_opcode,				\
316*4882a593Smuzhiyun 		regp, sizeof(struct regname),	\
317*4882a593Smuzhiyun 		#regname)
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun #define RD_REG(regp, regname)			\
320*4882a593Smuzhiyun 	ops->rd_reg(regname ## _port,		\
321*4882a593Smuzhiyun 		regname##_offset,				\
322*4882a593Smuzhiyun 		regname##_r_opcode,				\
323*4882a593Smuzhiyun 		regp, sizeof(struct regname),	\
324*4882a593Smuzhiyun 		#regname)
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun static u64 top_lm, top_hm;
327*4882a593Smuzhiyun static bool two_slices;
328*4882a593Smuzhiyun static bool two_channels; /* Both PMI channels in one slice enabled */
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun static u8 sym_chan_mask;
331*4882a593Smuzhiyun static u8 asym_chan_mask;
332*4882a593Smuzhiyun static u8 chan_mask;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun static int slice_selector = -1;
335*4882a593Smuzhiyun static int chan_selector = -1;
336*4882a593Smuzhiyun static u64 slice_hash_mask;
337*4882a593Smuzhiyun static u64 chan_hash_mask;
338*4882a593Smuzhiyun 
mk_region(char * name,struct region * rp,u64 base,u64 limit)339*4882a593Smuzhiyun static void mk_region(char *name, struct region *rp, u64 base, u64 limit)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	rp->enabled = 1;
342*4882a593Smuzhiyun 	rp->base = base;
343*4882a593Smuzhiyun 	rp->limit = limit;
344*4882a593Smuzhiyun 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, limit);
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
mk_region_mask(char * name,struct region * rp,u64 base,u64 mask)347*4882a593Smuzhiyun static void mk_region_mask(char *name, struct region *rp, u64 base, u64 mask)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun 	if (mask == 0) {
350*4882a593Smuzhiyun 		pr_info(FW_BUG "MOT mask cannot be zero\n");
351*4882a593Smuzhiyun 		return;
352*4882a593Smuzhiyun 	}
353*4882a593Smuzhiyun 	if (mask != GENMASK_ULL(PND_MAX_PHYS_BIT, __ffs(mask))) {
354*4882a593Smuzhiyun 		pr_info(FW_BUG "MOT mask not power of two\n");
355*4882a593Smuzhiyun 		return;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 	if (base & ~mask) {
358*4882a593Smuzhiyun 		pr_info(FW_BUG "MOT region base/mask alignment error\n");
359*4882a593Smuzhiyun 		return;
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 	rp->base = base;
362*4882a593Smuzhiyun 	rp->limit = (base | ~mask) & GENMASK_ULL(PND_MAX_PHYS_BIT, 0);
363*4882a593Smuzhiyun 	rp->enabled = 1;
364*4882a593Smuzhiyun 	edac_dbg(2, "Region:%s [%llx, %llx]\n", name, base, rp->limit);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
in_region(struct region * rp,u64 addr)367*4882a593Smuzhiyun static bool in_region(struct region *rp, u64 addr)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	if (!rp->enabled)
370*4882a593Smuzhiyun 		return false;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	return rp->base <= addr && addr <= rp->limit;
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun 
gen_sym_mask(struct b_cr_slice_channel_hash * p)375*4882a593Smuzhiyun static int gen_sym_mask(struct b_cr_slice_channel_hash *p)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	int mask = 0;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	if (!p->slice_0_mem_disabled)
380*4882a593Smuzhiyun 		mask |= p->sym_slice0_channel_enabled;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	if (!p->slice_1_disabled)
383*4882a593Smuzhiyun 		mask |= p->sym_slice1_channel_enabled << 2;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
386*4882a593Smuzhiyun 		mask &= 0x5;
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	return mask;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun 
gen_asym_mask(struct b_cr_slice_channel_hash * p,struct b_cr_asym_mem_region0_mchbar * as0,struct b_cr_asym_mem_region1_mchbar * as1,struct b_cr_asym_2way_mem_region_mchbar * as2way)391*4882a593Smuzhiyun static int gen_asym_mask(struct b_cr_slice_channel_hash *p,
392*4882a593Smuzhiyun 			 struct b_cr_asym_mem_region0_mchbar *as0,
393*4882a593Smuzhiyun 			 struct b_cr_asym_mem_region1_mchbar *as1,
394*4882a593Smuzhiyun 			 struct b_cr_asym_2way_mem_region_mchbar *as2way)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	const int intlv[] = { 0x5, 0xA, 0x3, 0xC };
397*4882a593Smuzhiyun 	int mask = 0;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (as2way->asym_2way_interleave_enable)
400*4882a593Smuzhiyun 		mask = intlv[as2way->asym_2way_intlv_mode];
401*4882a593Smuzhiyun 	if (as0->slice0_asym_enable)
402*4882a593Smuzhiyun 		mask |= (1 << as0->slice0_asym_channel_select);
403*4882a593Smuzhiyun 	if (as1->slice1_asym_enable)
404*4882a593Smuzhiyun 		mask |= (4 << as1->slice1_asym_channel_select);
405*4882a593Smuzhiyun 	if (p->slice_0_mem_disabled)
406*4882a593Smuzhiyun 		mask &= 0xc;
407*4882a593Smuzhiyun 	if (p->slice_1_disabled)
408*4882a593Smuzhiyun 		mask &= 0x3;
409*4882a593Smuzhiyun 	if (p->ch_1_disabled || p->enable_pmi_dual_data_mode)
410*4882a593Smuzhiyun 		mask &= 0x5;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return mask;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun static struct b_cr_tolud_pci tolud;
416*4882a593Smuzhiyun static struct b_cr_touud_lo_pci touud_lo;
417*4882a593Smuzhiyun static struct b_cr_touud_hi_pci touud_hi;
418*4882a593Smuzhiyun static struct b_cr_asym_mem_region0_mchbar asym0;
419*4882a593Smuzhiyun static struct b_cr_asym_mem_region1_mchbar asym1;
420*4882a593Smuzhiyun static struct b_cr_asym_2way_mem_region_mchbar asym_2way;
421*4882a593Smuzhiyun static struct b_cr_mot_out_base_mchbar mot_base;
422*4882a593Smuzhiyun static struct b_cr_mot_out_mask_mchbar mot_mask;
423*4882a593Smuzhiyun static struct b_cr_slice_channel_hash chash;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun /* Apollo Lake dunit */
426*4882a593Smuzhiyun /*
427*4882a593Smuzhiyun  * Validated on board with just two DIMMs in the [0] and [2] positions
428*4882a593Smuzhiyun  * in this array. Other port number matches documentation, but caution
429*4882a593Smuzhiyun  * advised.
430*4882a593Smuzhiyun  */
431*4882a593Smuzhiyun static const int apl_dports[APL_NUM_CHANNELS] = { 0x18, 0x10, 0x11, 0x19 };
432*4882a593Smuzhiyun static struct d_cr_drp0 drp0[APL_NUM_CHANNELS];
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun /* Denverton dunit */
435*4882a593Smuzhiyun static const int dnv_dports[DNV_NUM_CHANNELS] = { 0x10, 0x12 };
436*4882a593Smuzhiyun static struct d_cr_dsch dsch;
437*4882a593Smuzhiyun static struct d_cr_ecc_ctrl ecc_ctrl[DNV_NUM_CHANNELS];
438*4882a593Smuzhiyun static struct d_cr_drp drp[DNV_NUM_CHANNELS];
439*4882a593Smuzhiyun static struct d_cr_dmap dmap[DNV_NUM_CHANNELS];
440*4882a593Smuzhiyun static struct d_cr_dmap1 dmap1[DNV_NUM_CHANNELS];
441*4882a593Smuzhiyun static struct d_cr_dmap2 dmap2[DNV_NUM_CHANNELS];
442*4882a593Smuzhiyun static struct d_cr_dmap3 dmap3[DNV_NUM_CHANNELS];
443*4882a593Smuzhiyun static struct d_cr_dmap4 dmap4[DNV_NUM_CHANNELS];
444*4882a593Smuzhiyun static struct d_cr_dmap5 dmap5[DNV_NUM_CHANNELS];
445*4882a593Smuzhiyun 
apl_mk_region(char * name,struct region * rp,void * asym)446*4882a593Smuzhiyun static void apl_mk_region(char *name, struct region *rp, void *asym)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct b_cr_asym_mem_region0_mchbar *a = asym;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	mk_region(name, rp,
451*4882a593Smuzhiyun 			  U64_LSHIFT(a->slice0_asym_base, APL_ASYMSHIFT),
452*4882a593Smuzhiyun 			  U64_LSHIFT(a->slice0_asym_limit, APL_ASYMSHIFT) +
453*4882a593Smuzhiyun 			  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
dnv_mk_region(char * name,struct region * rp,void * asym)456*4882a593Smuzhiyun static void dnv_mk_region(char *name, struct region *rp, void *asym)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	struct b_cr_asym_mem_region_denverton *a = asym;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	mk_region(name, rp,
461*4882a593Smuzhiyun 			  U64_LSHIFT(a->slice_asym_base, DNV_ASYMSHIFT),
462*4882a593Smuzhiyun 			  U64_LSHIFT(a->slice_asym_limit, DNV_ASYMSHIFT) +
463*4882a593Smuzhiyun 			  GENMASK_ULL(DNV_ASYMSHIFT - 1, 0));
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
apl_get_registers(void)466*4882a593Smuzhiyun static int apl_get_registers(void)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	int ret = -ENODEV;
469*4882a593Smuzhiyun 	int i;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if (RD_REG(&asym_2way, b_cr_asym_2way_mem_region_mchbar))
472*4882a593Smuzhiyun 		return -ENODEV;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	/*
475*4882a593Smuzhiyun 	 * RD_REGP() will fail for unpopulated or non-existent
476*4882a593Smuzhiyun 	 * DIMM slots. Return success if we find at least one DIMM.
477*4882a593Smuzhiyun 	 */
478*4882a593Smuzhiyun 	for (i = 0; i < APL_NUM_CHANNELS; i++)
479*4882a593Smuzhiyun 		if (!RD_REGP(&drp0[i], d_cr_drp0, apl_dports[i]))
480*4882a593Smuzhiyun 			ret = 0;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	return ret;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
dnv_get_registers(void)485*4882a593Smuzhiyun static int dnv_get_registers(void)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	int i;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	if (RD_REG(&dsch, d_cr_dsch))
490*4882a593Smuzhiyun 		return -ENODEV;
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
493*4882a593Smuzhiyun 		if (RD_REGP(&ecc_ctrl[i], d_cr_ecc_ctrl, dnv_dports[i]) ||
494*4882a593Smuzhiyun 			RD_REGP(&drp[i], d_cr_drp, dnv_dports[i]) ||
495*4882a593Smuzhiyun 			RD_REGP(&dmap[i], d_cr_dmap, dnv_dports[i]) ||
496*4882a593Smuzhiyun 			RD_REGP(&dmap1[i], d_cr_dmap1, dnv_dports[i]) ||
497*4882a593Smuzhiyun 			RD_REGP(&dmap2[i], d_cr_dmap2, dnv_dports[i]) ||
498*4882a593Smuzhiyun 			RD_REGP(&dmap3[i], d_cr_dmap3, dnv_dports[i]) ||
499*4882a593Smuzhiyun 			RD_REGP(&dmap4[i], d_cr_dmap4, dnv_dports[i]) ||
500*4882a593Smuzhiyun 			RD_REGP(&dmap5[i], d_cr_dmap5, dnv_dports[i]))
501*4882a593Smuzhiyun 			return -ENODEV;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	return 0;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun /*
507*4882a593Smuzhiyun  * Read all the h/w config registers once here (they don't
508*4882a593Smuzhiyun  * change at run time. Figure out which address ranges have
509*4882a593Smuzhiyun  * which interleave characteristics.
510*4882a593Smuzhiyun  */
get_registers(void)511*4882a593Smuzhiyun static int get_registers(void)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	const int intlv[] = { 10, 11, 12, 12 };
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (RD_REG(&tolud, b_cr_tolud_pci) ||
516*4882a593Smuzhiyun 		RD_REG(&touud_lo, b_cr_touud_lo_pci) ||
517*4882a593Smuzhiyun 		RD_REG(&touud_hi, b_cr_touud_hi_pci) ||
518*4882a593Smuzhiyun 		RD_REG(&asym0, b_cr_asym_mem_region0_mchbar) ||
519*4882a593Smuzhiyun 		RD_REG(&asym1, b_cr_asym_mem_region1_mchbar) ||
520*4882a593Smuzhiyun 		RD_REG(&mot_base, b_cr_mot_out_base_mchbar) ||
521*4882a593Smuzhiyun 		RD_REG(&mot_mask, b_cr_mot_out_mask_mchbar) ||
522*4882a593Smuzhiyun 		RD_REG(&chash, b_cr_slice_channel_hash))
523*4882a593Smuzhiyun 		return -ENODEV;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	if (ops->get_registers())
526*4882a593Smuzhiyun 		return -ENODEV;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (ops->type == DNV) {
529*4882a593Smuzhiyun 		/* PMI channel idx (always 0) for asymmetric region */
530*4882a593Smuzhiyun 		asym0.slice0_asym_channel_select = 0;
531*4882a593Smuzhiyun 		asym1.slice1_asym_channel_select = 0;
532*4882a593Smuzhiyun 		/* PMI channel bitmap (always 1) for symmetric region */
533*4882a593Smuzhiyun 		chash.sym_slice0_channel_enabled = 0x1;
534*4882a593Smuzhiyun 		chash.sym_slice1_channel_enabled = 0x1;
535*4882a593Smuzhiyun 	}
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (asym0.slice0_asym_enable)
538*4882a593Smuzhiyun 		ops->mk_region("as0", &as0, &asym0);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	if (asym1.slice1_asym_enable)
541*4882a593Smuzhiyun 		ops->mk_region("as1", &as1, &asym1);
542*4882a593Smuzhiyun 
543*4882a593Smuzhiyun 	if (asym_2way.asym_2way_interleave_enable) {
544*4882a593Smuzhiyun 		mk_region("as2way", &as2,
545*4882a593Smuzhiyun 				  U64_LSHIFT(asym_2way.asym_2way_base, APL_ASYMSHIFT),
546*4882a593Smuzhiyun 				  U64_LSHIFT(asym_2way.asym_2way_limit, APL_ASYMSHIFT) +
547*4882a593Smuzhiyun 				  GENMASK_ULL(APL_ASYMSHIFT - 1, 0));
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (mot_base.imr_en) {
551*4882a593Smuzhiyun 		mk_region_mask("mot", &mot,
552*4882a593Smuzhiyun 					   U64_LSHIFT(mot_base.mot_out_base, MOT_SHIFT),
553*4882a593Smuzhiyun 					   U64_LSHIFT(mot_mask.mot_out_mask, MOT_SHIFT));
554*4882a593Smuzhiyun 	}
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	top_lm = U64_LSHIFT(tolud.tolud, 20);
557*4882a593Smuzhiyun 	top_hm = U64_LSHIFT(touud_hi.touud, 32) | U64_LSHIFT(touud_lo.touud, 20);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	two_slices = !chash.slice_1_disabled &&
560*4882a593Smuzhiyun 				 !chash.slice_0_mem_disabled &&
561*4882a593Smuzhiyun 				 (chash.sym_slice0_channel_enabled != 0) &&
562*4882a593Smuzhiyun 				 (chash.sym_slice1_channel_enabled != 0);
563*4882a593Smuzhiyun 	two_channels = !chash.ch_1_disabled &&
564*4882a593Smuzhiyun 				 !chash.enable_pmi_dual_data_mode &&
565*4882a593Smuzhiyun 				 ((chash.sym_slice0_channel_enabled == 3) ||
566*4882a593Smuzhiyun 				 (chash.sym_slice1_channel_enabled == 3));
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	sym_chan_mask = gen_sym_mask(&chash);
569*4882a593Smuzhiyun 	asym_chan_mask = gen_asym_mask(&chash, &asym0, &asym1, &asym_2way);
570*4882a593Smuzhiyun 	chan_mask = sym_chan_mask | asym_chan_mask;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (two_slices && !two_channels) {
573*4882a593Smuzhiyun 		if (chash.hvm_mode)
574*4882a593Smuzhiyun 			slice_selector = 29;
575*4882a593Smuzhiyun 		else
576*4882a593Smuzhiyun 			slice_selector = intlv[chash.interleave_mode];
577*4882a593Smuzhiyun 	} else if (!two_slices && two_channels) {
578*4882a593Smuzhiyun 		if (chash.hvm_mode)
579*4882a593Smuzhiyun 			chan_selector = 29;
580*4882a593Smuzhiyun 		else
581*4882a593Smuzhiyun 			chan_selector = intlv[chash.interleave_mode];
582*4882a593Smuzhiyun 	} else if (two_slices && two_channels) {
583*4882a593Smuzhiyun 		if (chash.hvm_mode) {
584*4882a593Smuzhiyun 			slice_selector = 29;
585*4882a593Smuzhiyun 			chan_selector = 30;
586*4882a593Smuzhiyun 		} else {
587*4882a593Smuzhiyun 			slice_selector = intlv[chash.interleave_mode];
588*4882a593Smuzhiyun 			chan_selector = intlv[chash.interleave_mode] + 1;
589*4882a593Smuzhiyun 		}
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (two_slices) {
593*4882a593Smuzhiyun 		if (!chash.hvm_mode)
594*4882a593Smuzhiyun 			slice_hash_mask = chash.slice_hash_mask << SLICE_HASH_MASK_LSB;
595*4882a593Smuzhiyun 		if (!two_channels)
596*4882a593Smuzhiyun 			slice_hash_mask |= BIT_ULL(slice_selector);
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	if (two_channels) {
600*4882a593Smuzhiyun 		if (!chash.hvm_mode)
601*4882a593Smuzhiyun 			chan_hash_mask = chash.ch_hash_mask << CH_HASH_MASK_LSB;
602*4882a593Smuzhiyun 		if (!two_slices)
603*4882a593Smuzhiyun 			chan_hash_mask |= BIT_ULL(chan_selector);
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	return 0;
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun /* Get a contiguous memory address (remove the MMIO gap) */
remove_mmio_gap(u64 sys)610*4882a593Smuzhiyun static u64 remove_mmio_gap(u64 sys)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	return (sys < _4GB) ? sys : sys - (_4GB - top_lm);
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun /* Squeeze out one address bit, shift upper part down to fill gap */
remove_addr_bit(u64 * addr,int bitidx)616*4882a593Smuzhiyun static void remove_addr_bit(u64 *addr, int bitidx)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun 	u64	mask;
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	if (bitidx == -1)
621*4882a593Smuzhiyun 		return;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	mask = (1ull << bitidx) - 1;
624*4882a593Smuzhiyun 	*addr = ((*addr >> 1) & ~mask) | (*addr & mask);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun /* XOR all the bits from addr specified in mask */
hash_by_mask(u64 addr,u64 mask)628*4882a593Smuzhiyun static int hash_by_mask(u64 addr, u64 mask)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun 	u64 result = addr & mask;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	result = (result >> 32) ^ result;
633*4882a593Smuzhiyun 	result = (result >> 16) ^ result;
634*4882a593Smuzhiyun 	result = (result >> 8) ^ result;
635*4882a593Smuzhiyun 	result = (result >> 4) ^ result;
636*4882a593Smuzhiyun 	result = (result >> 2) ^ result;
637*4882a593Smuzhiyun 	result = (result >> 1) ^ result;
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 	return (int)result & 1;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun  * First stage decode. Take the system address and figure out which
644*4882a593Smuzhiyun  * second stage will deal with it based on interleave modes.
645*4882a593Smuzhiyun  */
sys2pmi(const u64 addr,u32 * pmiidx,u64 * pmiaddr,char * msg)646*4882a593Smuzhiyun static int sys2pmi(const u64 addr, u32 *pmiidx, u64 *pmiaddr, char *msg)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	u64 contig_addr, contig_base, contig_offset, contig_base_adj;
649*4882a593Smuzhiyun 	int mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
650*4882a593Smuzhiyun 						MOT_CHAN_INTLV_BIT_1SLC_2CH;
651*4882a593Smuzhiyun 	int slice_intlv_bit_rm = SELECTOR_DISABLED;
652*4882a593Smuzhiyun 	int chan_intlv_bit_rm = SELECTOR_DISABLED;
653*4882a593Smuzhiyun 	/* Determine if address is in the MOT region. */
654*4882a593Smuzhiyun 	bool mot_hit = in_region(&mot, addr);
655*4882a593Smuzhiyun 	/* Calculate the number of symmetric regions enabled. */
656*4882a593Smuzhiyun 	int sym_channels = hweight8(sym_chan_mask);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	/*
659*4882a593Smuzhiyun 	 * The amount we need to shift the asym base can be determined by the
660*4882a593Smuzhiyun 	 * number of enabled symmetric channels.
661*4882a593Smuzhiyun 	 * NOTE: This can only work because symmetric memory is not supposed
662*4882a593Smuzhiyun 	 * to do a 3-way interleave.
663*4882a593Smuzhiyun 	 */
664*4882a593Smuzhiyun 	int sym_chan_shift = sym_channels >> 1;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	/* Give up if address is out of range, or in MMIO gap */
667*4882a593Smuzhiyun 	if (addr >= (1ul << PND_MAX_PHYS_BIT) ||
668*4882a593Smuzhiyun 	   (addr >= top_lm && addr < _4GB) || addr >= top_hm) {
669*4882a593Smuzhiyun 		snprintf(msg, PND2_MSG_SIZE, "Error address 0x%llx is not DRAM", addr);
670*4882a593Smuzhiyun 		return -EINVAL;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	/* Get a contiguous memory address (remove the MMIO gap) */
674*4882a593Smuzhiyun 	contig_addr = remove_mmio_gap(addr);
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	if (in_region(&as0, addr)) {
677*4882a593Smuzhiyun 		*pmiidx = asym0.slice0_asym_channel_select;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		contig_base = remove_mmio_gap(as0.base);
680*4882a593Smuzhiyun 		contig_offset = contig_addr - contig_base;
681*4882a593Smuzhiyun 		contig_base_adj = (contig_base >> sym_chan_shift) *
682*4882a593Smuzhiyun 						  ((chash.sym_slice0_channel_enabled >> (*pmiidx & 1)) & 1);
683*4882a593Smuzhiyun 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
684*4882a593Smuzhiyun 	} else if (in_region(&as1, addr)) {
685*4882a593Smuzhiyun 		*pmiidx = 2u + asym1.slice1_asym_channel_select;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 		contig_base = remove_mmio_gap(as1.base);
688*4882a593Smuzhiyun 		contig_offset = contig_addr - contig_base;
689*4882a593Smuzhiyun 		contig_base_adj = (contig_base >> sym_chan_shift) *
690*4882a593Smuzhiyun 						  ((chash.sym_slice1_channel_enabled >> (*pmiidx & 1)) & 1);
691*4882a593Smuzhiyun 		contig_addr = contig_offset + ((sym_channels > 0) ? contig_base_adj : 0ull);
692*4882a593Smuzhiyun 	} else if (in_region(&as2, addr) && (asym_2way.asym_2way_intlv_mode == 0x3ul)) {
693*4882a593Smuzhiyun 		bool channel1;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 		mot_intlv_bit = MOT_CHAN_INTLV_BIT_1SLC_2CH;
696*4882a593Smuzhiyun 		*pmiidx = (asym_2way.asym_2way_intlv_mode & 1) << 1;
697*4882a593Smuzhiyun 		channel1 = mot_hit ? ((bool)((addr >> mot_intlv_bit) & 1)) :
698*4882a593Smuzhiyun 			hash_by_mask(contig_addr, chan_hash_mask);
699*4882a593Smuzhiyun 		*pmiidx |= (u32)channel1;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		contig_base = remove_mmio_gap(as2.base);
702*4882a593Smuzhiyun 		chan_intlv_bit_rm = mot_hit ? mot_intlv_bit : chan_selector;
703*4882a593Smuzhiyun 		contig_offset = contig_addr - contig_base;
704*4882a593Smuzhiyun 		remove_addr_bit(&contig_offset, chan_intlv_bit_rm);
705*4882a593Smuzhiyun 		contig_addr = (contig_base >> sym_chan_shift) + contig_offset;
706*4882a593Smuzhiyun 	} else {
707*4882a593Smuzhiyun 		/* Otherwise we're in normal, boring symmetric mode. */
708*4882a593Smuzhiyun 		*pmiidx = 0u;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		if (two_slices) {
711*4882a593Smuzhiyun 			bool slice1;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 			if (mot_hit) {
714*4882a593Smuzhiyun 				slice_intlv_bit_rm = MOT_SLC_INTLV_BIT;
715*4882a593Smuzhiyun 				slice1 = (addr >> MOT_SLC_INTLV_BIT) & 1;
716*4882a593Smuzhiyun 			} else {
717*4882a593Smuzhiyun 				slice_intlv_bit_rm = slice_selector;
718*4882a593Smuzhiyun 				slice1 = hash_by_mask(addr, slice_hash_mask);
719*4882a593Smuzhiyun 			}
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 			*pmiidx = (u32)slice1 << 1;
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 		if (two_channels) {
725*4882a593Smuzhiyun 			bool channel1;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 			mot_intlv_bit = two_slices ? MOT_CHAN_INTLV_BIT_2SLC_2CH :
728*4882a593Smuzhiyun 							MOT_CHAN_INTLV_BIT_1SLC_2CH;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 			if (mot_hit) {
731*4882a593Smuzhiyun 				chan_intlv_bit_rm = mot_intlv_bit;
732*4882a593Smuzhiyun 				channel1 = (addr >> mot_intlv_bit) & 1;
733*4882a593Smuzhiyun 			} else {
734*4882a593Smuzhiyun 				chan_intlv_bit_rm = chan_selector;
735*4882a593Smuzhiyun 				channel1 = hash_by_mask(contig_addr, chan_hash_mask);
736*4882a593Smuzhiyun 			}
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 			*pmiidx |= (u32)channel1;
739*4882a593Smuzhiyun 		}
740*4882a593Smuzhiyun 	}
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	/* Remove the chan_selector bit first */
743*4882a593Smuzhiyun 	remove_addr_bit(&contig_addr, chan_intlv_bit_rm);
744*4882a593Smuzhiyun 	/* Remove the slice bit (we remove it second because it must be lower */
745*4882a593Smuzhiyun 	remove_addr_bit(&contig_addr, slice_intlv_bit_rm);
746*4882a593Smuzhiyun 	*pmiaddr = contig_addr;
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	return 0;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun /* Translate PMI address to memory (rank, row, bank, column) */
752*4882a593Smuzhiyun #define C(n) (0x10 | (n))	/* column */
753*4882a593Smuzhiyun #define B(n) (0x20 | (n))	/* bank */
754*4882a593Smuzhiyun #define R(n) (0x40 | (n))	/* row */
755*4882a593Smuzhiyun #define RS   (0x80)			/* rank */
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun /* addrdec values */
758*4882a593Smuzhiyun #define AMAP_1KB	0
759*4882a593Smuzhiyun #define AMAP_2KB	1
760*4882a593Smuzhiyun #define AMAP_4KB	2
761*4882a593Smuzhiyun #define AMAP_RSVD	3
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun /* dden values */
764*4882a593Smuzhiyun #define DEN_4Gb		0
765*4882a593Smuzhiyun #define DEN_8Gb		2
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /* dwid values */
768*4882a593Smuzhiyun #define X8		0
769*4882a593Smuzhiyun #define X16		1
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun static struct dimm_geometry {
772*4882a593Smuzhiyun 	u8	addrdec;
773*4882a593Smuzhiyun 	u8	dden;
774*4882a593Smuzhiyun 	u8	dwid;
775*4882a593Smuzhiyun 	u8	rowbits, colbits;
776*4882a593Smuzhiyun 	u16	bits[PMI_ADDRESS_WIDTH];
777*4882a593Smuzhiyun } dimms[] = {
778*4882a593Smuzhiyun 	{
779*4882a593Smuzhiyun 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X16,
780*4882a593Smuzhiyun 		.rowbits = 15, .colbits = 10,
781*4882a593Smuzhiyun 		.bits = {
782*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
783*4882a593Smuzhiyun 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
784*4882a593Smuzhiyun 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
785*4882a593Smuzhiyun 			0,     0,     0,     0
786*4882a593Smuzhiyun 		}
787*4882a593Smuzhiyun 	},
788*4882a593Smuzhiyun 	{
789*4882a593Smuzhiyun 		.addrdec = AMAP_1KB, .dden = DEN_4Gb, .dwid = X8,
790*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 10,
791*4882a593Smuzhiyun 		.bits = {
792*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
793*4882a593Smuzhiyun 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
794*4882a593Smuzhiyun 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
795*4882a593Smuzhiyun 			R(15), 0,     0,     0
796*4882a593Smuzhiyun 		}
797*4882a593Smuzhiyun 	},
798*4882a593Smuzhiyun 	{
799*4882a593Smuzhiyun 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X16,
800*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 10,
801*4882a593Smuzhiyun 		.bits = {
802*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
803*4882a593Smuzhiyun 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
804*4882a593Smuzhiyun 			R(10), C(7),  C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
805*4882a593Smuzhiyun 			R(15), 0,     0,     0
806*4882a593Smuzhiyun 		}
807*4882a593Smuzhiyun 	},
808*4882a593Smuzhiyun 	{
809*4882a593Smuzhiyun 		.addrdec = AMAP_1KB, .dden = DEN_8Gb, .dwid = X8,
810*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 11,
811*4882a593Smuzhiyun 		.bits = {
812*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  B(0),  B(1),  B(2),  R(0),
813*4882a593Smuzhiyun 			R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),  R(9),
814*4882a593Smuzhiyun 			R(10), C(7),  C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
815*4882a593Smuzhiyun 			R(14), R(15), 0,     0
816*4882a593Smuzhiyun 		}
817*4882a593Smuzhiyun 	},
818*4882a593Smuzhiyun 	{
819*4882a593Smuzhiyun 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X16,
820*4882a593Smuzhiyun 		.rowbits = 15, .colbits = 10,
821*4882a593Smuzhiyun 		.bits = {
822*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
823*4882a593Smuzhiyun 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
824*4882a593Smuzhiyun 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
825*4882a593Smuzhiyun 			0,     0,     0,     0
826*4882a593Smuzhiyun 		}
827*4882a593Smuzhiyun 	},
828*4882a593Smuzhiyun 	{
829*4882a593Smuzhiyun 		.addrdec = AMAP_2KB, .dden = DEN_4Gb, .dwid = X8,
830*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 10,
831*4882a593Smuzhiyun 		.bits = {
832*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
833*4882a593Smuzhiyun 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
834*4882a593Smuzhiyun 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
835*4882a593Smuzhiyun 			R(15), 0,     0,     0
836*4882a593Smuzhiyun 		}
837*4882a593Smuzhiyun 	},
838*4882a593Smuzhiyun 	{
839*4882a593Smuzhiyun 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X16,
840*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 10,
841*4882a593Smuzhiyun 		.bits = {
842*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
843*4882a593Smuzhiyun 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
844*4882a593Smuzhiyun 			R(9),  R(10), C(8),  C(9),  R(11), RS,    R(12), R(13), R(14),
845*4882a593Smuzhiyun 			R(15), 0,     0,     0
846*4882a593Smuzhiyun 		}
847*4882a593Smuzhiyun 	},
848*4882a593Smuzhiyun 	{
849*4882a593Smuzhiyun 		.addrdec = AMAP_2KB, .dden = DEN_8Gb, .dwid = X8,
850*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 11,
851*4882a593Smuzhiyun 		.bits = {
852*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  B(0),  B(1),  B(2),
853*4882a593Smuzhiyun 			R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),  R(8),
854*4882a593Smuzhiyun 			R(9),  R(10), C(8),  C(9),  R(11), RS,    C(11), R(12), R(13),
855*4882a593Smuzhiyun 			R(14), R(15), 0,     0
856*4882a593Smuzhiyun 		}
857*4882a593Smuzhiyun 	},
858*4882a593Smuzhiyun 	{
859*4882a593Smuzhiyun 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X16,
860*4882a593Smuzhiyun 		.rowbits = 15, .colbits = 10,
861*4882a593Smuzhiyun 		.bits = {
862*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
863*4882a593Smuzhiyun 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
864*4882a593Smuzhiyun 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
865*4882a593Smuzhiyun 			0,     0,     0,     0
866*4882a593Smuzhiyun 		}
867*4882a593Smuzhiyun 	},
868*4882a593Smuzhiyun 	{
869*4882a593Smuzhiyun 		.addrdec = AMAP_4KB, .dden = DEN_4Gb, .dwid = X8,
870*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 10,
871*4882a593Smuzhiyun 		.bits = {
872*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
873*4882a593Smuzhiyun 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
874*4882a593Smuzhiyun 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
875*4882a593Smuzhiyun 			R(15), 0,     0,     0
876*4882a593Smuzhiyun 		}
877*4882a593Smuzhiyun 	},
878*4882a593Smuzhiyun 	{
879*4882a593Smuzhiyun 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X16,
880*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 10,
881*4882a593Smuzhiyun 		.bits = {
882*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
883*4882a593Smuzhiyun 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
884*4882a593Smuzhiyun 			R(8),  R(9),  R(10), C(9),  R(11), RS,    R(12), R(13), R(14),
885*4882a593Smuzhiyun 			R(15), 0,     0,     0
886*4882a593Smuzhiyun 		}
887*4882a593Smuzhiyun 	},
888*4882a593Smuzhiyun 	{
889*4882a593Smuzhiyun 		.addrdec = AMAP_4KB, .dden = DEN_8Gb, .dwid = X8,
890*4882a593Smuzhiyun 		.rowbits = 16, .colbits = 11,
891*4882a593Smuzhiyun 		.bits = {
892*4882a593Smuzhiyun 			C(2),  C(3),  C(4),  C(5),  C(6),  C(7),  C(8),  B(0),  B(1),
893*4882a593Smuzhiyun 			B(2),  R(0),  R(1),  R(2),  R(3),  R(4),  R(5),  R(6),  R(7),
894*4882a593Smuzhiyun 			R(8),  R(9),  R(10), C(9),  R(11), RS,    C(11), R(12), R(13),
895*4882a593Smuzhiyun 			R(14), R(15), 0,     0
896*4882a593Smuzhiyun 		}
897*4882a593Smuzhiyun 	}
898*4882a593Smuzhiyun };
899*4882a593Smuzhiyun 
bank_hash(u64 pmiaddr,int idx,int shft)900*4882a593Smuzhiyun static int bank_hash(u64 pmiaddr, int idx, int shft)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	int bhash = 0;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	switch (idx) {
905*4882a593Smuzhiyun 	case 0:
906*4882a593Smuzhiyun 		bhash ^= ((pmiaddr >> (12 + shft)) ^ (pmiaddr >> (9 + shft))) & 1;
907*4882a593Smuzhiyun 		break;
908*4882a593Smuzhiyun 	case 1:
909*4882a593Smuzhiyun 		bhash ^= (((pmiaddr >> (10 + shft)) ^ (pmiaddr >> (8 + shft))) & 1) << 1;
910*4882a593Smuzhiyun 		bhash ^= ((pmiaddr >> 22) & 1) << 1;
911*4882a593Smuzhiyun 		break;
912*4882a593Smuzhiyun 	case 2:
913*4882a593Smuzhiyun 		bhash ^= (((pmiaddr >> (13 + shft)) ^ (pmiaddr >> (11 + shft))) & 1) << 2;
914*4882a593Smuzhiyun 		break;
915*4882a593Smuzhiyun 	}
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	return bhash;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
rank_hash(u64 pmiaddr)920*4882a593Smuzhiyun static int rank_hash(u64 pmiaddr)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	return ((pmiaddr >> 16) ^ (pmiaddr >> 10)) & 1;
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun /* Second stage decode. Compute rank, bank, row & column. */
apl_pmi2mem(struct mem_ctl_info * mci,u64 pmiaddr,u32 pmiidx,struct dram_addr * daddr,char * msg)926*4882a593Smuzhiyun static int apl_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
927*4882a593Smuzhiyun 		       struct dram_addr *daddr, char *msg)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	struct d_cr_drp0 *cr_drp0 = &drp0[pmiidx];
930*4882a593Smuzhiyun 	struct pnd2_pvt *pvt = mci->pvt_info;
931*4882a593Smuzhiyun 	int g = pvt->dimm_geom[pmiidx];
932*4882a593Smuzhiyun 	struct dimm_geometry *d = &dimms[g];
933*4882a593Smuzhiyun 	int column = 0, bank = 0, row = 0, rank = 0;
934*4882a593Smuzhiyun 	int i, idx, type, skiprs = 0;
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	for (i = 0; i < PMI_ADDRESS_WIDTH; i++) {
937*4882a593Smuzhiyun 		int	bit = (pmiaddr >> i) & 1;
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 		if (i + skiprs >= PMI_ADDRESS_WIDTH) {
940*4882a593Smuzhiyun 			snprintf(msg, PND2_MSG_SIZE, "Bad dimm_geometry[] table\n");
941*4882a593Smuzhiyun 			return -EINVAL;
942*4882a593Smuzhiyun 		}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 		type = d->bits[i + skiprs] & ~0xf;
945*4882a593Smuzhiyun 		idx = d->bits[i + skiprs] & 0xf;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 		/*
948*4882a593Smuzhiyun 		 * On single rank DIMMs ignore the rank select bit
949*4882a593Smuzhiyun 		 * and shift remainder of "bits[]" down one place.
950*4882a593Smuzhiyun 		 */
951*4882a593Smuzhiyun 		if (type == RS && (cr_drp0->rken0 + cr_drp0->rken1) == 1) {
952*4882a593Smuzhiyun 			skiprs = 1;
953*4882a593Smuzhiyun 			type = d->bits[i + skiprs] & ~0xf;
954*4882a593Smuzhiyun 			idx = d->bits[i + skiprs] & 0xf;
955*4882a593Smuzhiyun 		}
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 		switch (type) {
958*4882a593Smuzhiyun 		case C(0):
959*4882a593Smuzhiyun 			column |= (bit << idx);
960*4882a593Smuzhiyun 			break;
961*4882a593Smuzhiyun 		case B(0):
962*4882a593Smuzhiyun 			bank |= (bit << idx);
963*4882a593Smuzhiyun 			if (cr_drp0->bahen)
964*4882a593Smuzhiyun 				bank ^= bank_hash(pmiaddr, idx, d->addrdec);
965*4882a593Smuzhiyun 			break;
966*4882a593Smuzhiyun 		case R(0):
967*4882a593Smuzhiyun 			row |= (bit << idx);
968*4882a593Smuzhiyun 			break;
969*4882a593Smuzhiyun 		case RS:
970*4882a593Smuzhiyun 			rank = bit;
971*4882a593Smuzhiyun 			if (cr_drp0->rsien)
972*4882a593Smuzhiyun 				rank ^= rank_hash(pmiaddr);
973*4882a593Smuzhiyun 			break;
974*4882a593Smuzhiyun 		default:
975*4882a593Smuzhiyun 			if (bit) {
976*4882a593Smuzhiyun 				snprintf(msg, PND2_MSG_SIZE, "Bad translation\n");
977*4882a593Smuzhiyun 				return -EINVAL;
978*4882a593Smuzhiyun 			}
979*4882a593Smuzhiyun 			goto done;
980*4882a593Smuzhiyun 		}
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun done:
984*4882a593Smuzhiyun 	daddr->col = column;
985*4882a593Smuzhiyun 	daddr->bank = bank;
986*4882a593Smuzhiyun 	daddr->row = row;
987*4882a593Smuzhiyun 	daddr->rank = rank;
988*4882a593Smuzhiyun 	daddr->dimm = 0;
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun 	return 0;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun /* Pluck bit "in" from pmiaddr and return value shifted to bit "out" */
994*4882a593Smuzhiyun #define dnv_get_bit(pmi, in, out) ((int)(((pmi) >> (in)) & 1u) << (out))
995*4882a593Smuzhiyun 
dnv_pmi2mem(struct mem_ctl_info * mci,u64 pmiaddr,u32 pmiidx,struct dram_addr * daddr,char * msg)996*4882a593Smuzhiyun static int dnv_pmi2mem(struct mem_ctl_info *mci, u64 pmiaddr, u32 pmiidx,
997*4882a593Smuzhiyun 					   struct dram_addr *daddr, char *msg)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun 	/* Rank 0 or 1 */
1000*4882a593Smuzhiyun 	daddr->rank = dnv_get_bit(pmiaddr, dmap[pmiidx].rs0 + 13, 0);
1001*4882a593Smuzhiyun 	/* Rank 2 or 3 */
1002*4882a593Smuzhiyun 	daddr->rank |= dnv_get_bit(pmiaddr, dmap[pmiidx].rs1 + 13, 1);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	/*
1005*4882a593Smuzhiyun 	 * Normally ranks 0,1 are DIMM0, and 2,3 are DIMM1, but we
1006*4882a593Smuzhiyun 	 * flip them if DIMM1 is larger than DIMM0.
1007*4882a593Smuzhiyun 	 */
1008*4882a593Smuzhiyun 	daddr->dimm = (daddr->rank >= 2) ^ drp[pmiidx].dimmflip;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	daddr->bank = dnv_get_bit(pmiaddr, dmap[pmiidx].ba0 + 6, 0);
1011*4882a593Smuzhiyun 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].ba1 + 6, 1);
1012*4882a593Smuzhiyun 	daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg0 + 6, 2);
1013*4882a593Smuzhiyun 	if (dsch.ddr4en)
1014*4882a593Smuzhiyun 		daddr->bank |= dnv_get_bit(pmiaddr, dmap[pmiidx].bg1 + 6, 3);
1015*4882a593Smuzhiyun 	if (dmap1[pmiidx].bxor) {
1016*4882a593Smuzhiyun 		if (dsch.ddr4en) {
1017*4882a593Smuzhiyun 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 0);
1018*4882a593Smuzhiyun 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 1);
1019*4882a593Smuzhiyun 			if (dsch.chan_width == 0)
1020*4882a593Smuzhiyun 				/* 64/72 bit dram channel width */
1021*4882a593Smuzhiyun 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1022*4882a593Smuzhiyun 			else
1023*4882a593Smuzhiyun 				/* 32/40 bit dram channel width */
1024*4882a593Smuzhiyun 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1025*4882a593Smuzhiyun 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 3);
1026*4882a593Smuzhiyun 		} else {
1027*4882a593Smuzhiyun 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 0);
1028*4882a593Smuzhiyun 			daddr->bank ^= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 1);
1029*4882a593Smuzhiyun 			if (dsch.chan_width == 0)
1030*4882a593Smuzhiyun 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 2);
1031*4882a593Smuzhiyun 			else
1032*4882a593Smuzhiyun 				daddr->bank ^= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 2);
1033*4882a593Smuzhiyun 		}
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 	daddr->row = dnv_get_bit(pmiaddr, dmap2[pmiidx].row0 + 6, 0);
1037*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row1 + 6, 1);
1038*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row2 + 6, 2);
1039*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row3 + 6, 3);
1040*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row4 + 6, 4);
1041*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap2[pmiidx].row5 + 6, 5);
1042*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row6 + 6, 6);
1043*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row7 + 6, 7);
1044*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row8 + 6, 8);
1045*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row9 + 6, 9);
1046*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row10 + 6, 10);
1047*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap3[pmiidx].row11 + 6, 11);
1048*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row12 + 6, 12);
1049*4882a593Smuzhiyun 	daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row13 + 6, 13);
1050*4882a593Smuzhiyun 	if (dmap4[pmiidx].row14 != 31)
1051*4882a593Smuzhiyun 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row14 + 6, 14);
1052*4882a593Smuzhiyun 	if (dmap4[pmiidx].row15 != 31)
1053*4882a593Smuzhiyun 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row15 + 6, 15);
1054*4882a593Smuzhiyun 	if (dmap4[pmiidx].row16 != 31)
1055*4882a593Smuzhiyun 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row16 + 6, 16);
1056*4882a593Smuzhiyun 	if (dmap4[pmiidx].row17 != 31)
1057*4882a593Smuzhiyun 		daddr->row |= dnv_get_bit(pmiaddr, dmap4[pmiidx].row17 + 6, 17);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	daddr->col = dnv_get_bit(pmiaddr, dmap5[pmiidx].ca3 + 6, 3);
1060*4882a593Smuzhiyun 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca4 + 6, 4);
1061*4882a593Smuzhiyun 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca5 + 6, 5);
1062*4882a593Smuzhiyun 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca6 + 6, 6);
1063*4882a593Smuzhiyun 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca7 + 6, 7);
1064*4882a593Smuzhiyun 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca8 + 6, 8);
1065*4882a593Smuzhiyun 	daddr->col |= dnv_get_bit(pmiaddr, dmap5[pmiidx].ca9 + 6, 9);
1066*4882a593Smuzhiyun 	if (!dsch.ddr4en && dmap1[pmiidx].ca11 != 0x3f)
1067*4882a593Smuzhiyun 		daddr->col |= dnv_get_bit(pmiaddr, dmap1[pmiidx].ca11 + 13, 11);
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	return 0;
1070*4882a593Smuzhiyun }
1071*4882a593Smuzhiyun 
check_channel(int ch)1072*4882a593Smuzhiyun static int check_channel(int ch)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun 	if (drp0[ch].dramtype != 0) {
1075*4882a593Smuzhiyun 		pnd2_printk(KERN_INFO, "Unsupported DIMM in channel %d\n", ch);
1076*4882a593Smuzhiyun 		return 1;
1077*4882a593Smuzhiyun 	} else if (drp0[ch].eccen == 0) {
1078*4882a593Smuzhiyun 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1079*4882a593Smuzhiyun 		return 1;
1080*4882a593Smuzhiyun 	}
1081*4882a593Smuzhiyun 	return 0;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun 
apl_check_ecc_active(void)1084*4882a593Smuzhiyun static int apl_check_ecc_active(void)
1085*4882a593Smuzhiyun {
1086*4882a593Smuzhiyun 	int	i, ret = 0;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	/* Check dramtype and ECC mode for each present DIMM */
1089*4882a593Smuzhiyun 	for (i = 0; i < APL_NUM_CHANNELS; i++)
1090*4882a593Smuzhiyun 		if (chan_mask & BIT(i))
1091*4882a593Smuzhiyun 			ret += check_channel(i);
1092*4882a593Smuzhiyun 	return ret ? -EINVAL : 0;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun #define DIMMS_PRESENT(d) ((d)->rken0 + (d)->rken1 + (d)->rken2 + (d)->rken3)
1096*4882a593Smuzhiyun 
check_unit(int ch)1097*4882a593Smuzhiyun static int check_unit(int ch)
1098*4882a593Smuzhiyun {
1099*4882a593Smuzhiyun 	struct d_cr_drp *d = &drp[ch];
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	if (DIMMS_PRESENT(d) && !ecc_ctrl[ch].eccen) {
1102*4882a593Smuzhiyun 		pnd2_printk(KERN_INFO, "ECC disabled on channel %d\n", ch);
1103*4882a593Smuzhiyun 		return 1;
1104*4882a593Smuzhiyun 	}
1105*4882a593Smuzhiyun 	return 0;
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun 
dnv_check_ecc_active(void)1108*4882a593Smuzhiyun static int dnv_check_ecc_active(void)
1109*4882a593Smuzhiyun {
1110*4882a593Smuzhiyun 	int	i, ret = 0;
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun 	for (i = 0; i < DNV_NUM_CHANNELS; i++)
1113*4882a593Smuzhiyun 		ret += check_unit(i);
1114*4882a593Smuzhiyun 	return ret ? -EINVAL : 0;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
get_memory_error_data(struct mem_ctl_info * mci,u64 addr,struct dram_addr * daddr,char * msg)1117*4882a593Smuzhiyun static int get_memory_error_data(struct mem_ctl_info *mci, u64 addr,
1118*4882a593Smuzhiyun 								 struct dram_addr *daddr, char *msg)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	u64	pmiaddr;
1121*4882a593Smuzhiyun 	u32	pmiidx;
1122*4882a593Smuzhiyun 	int	ret;
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 	ret = sys2pmi(addr, &pmiidx, &pmiaddr, msg);
1125*4882a593Smuzhiyun 	if (ret)
1126*4882a593Smuzhiyun 		return ret;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	pmiaddr >>= ops->pmiaddr_shift;
1129*4882a593Smuzhiyun 	/* pmi channel idx to dimm channel idx */
1130*4882a593Smuzhiyun 	pmiidx >>= ops->pmiidx_shift;
1131*4882a593Smuzhiyun 	daddr->chan = pmiidx;
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	ret = ops->pmi2mem(mci, pmiaddr, pmiidx, daddr, msg);
1134*4882a593Smuzhiyun 	if (ret)
1135*4882a593Smuzhiyun 		return ret;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	edac_dbg(0, "SysAddr=%llx PmiAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1138*4882a593Smuzhiyun 			 addr, pmiaddr, daddr->chan, daddr->dimm, daddr->rank, daddr->bank, daddr->row, daddr->col);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	return 0;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
pnd2_mce_output_error(struct mem_ctl_info * mci,const struct mce * m,struct dram_addr * daddr)1143*4882a593Smuzhiyun static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1144*4882a593Smuzhiyun 				  struct dram_addr *daddr)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	enum hw_event_mc_err_type tp_event;
1147*4882a593Smuzhiyun 	char *optype, msg[PND2_MSG_SIZE];
1148*4882a593Smuzhiyun 	bool ripv = m->mcgstatus & MCG_STATUS_RIPV;
1149*4882a593Smuzhiyun 	bool overflow = m->status & MCI_STATUS_OVER;
1150*4882a593Smuzhiyun 	bool uc_err = m->status & MCI_STATUS_UC;
1151*4882a593Smuzhiyun 	bool recov = m->status & MCI_STATUS_S;
1152*4882a593Smuzhiyun 	u32 core_err_cnt = GET_BITFIELD(m->status, 38, 52);
1153*4882a593Smuzhiyun 	u32 mscod = GET_BITFIELD(m->status, 16, 31);
1154*4882a593Smuzhiyun 	u32 errcode = GET_BITFIELD(m->status, 0, 15);
1155*4882a593Smuzhiyun 	u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1156*4882a593Smuzhiyun 	int rc;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1159*4882a593Smuzhiyun 						 HW_EVENT_ERR_CORRECTED;
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	/*
1162*4882a593Smuzhiyun 	 * According with Table 15-9 of the Intel Architecture spec vol 3A,
1163*4882a593Smuzhiyun 	 * memory errors should fit in this mask:
1164*4882a593Smuzhiyun 	 *	000f 0000 1mmm cccc (binary)
1165*4882a593Smuzhiyun 	 * where:
1166*4882a593Smuzhiyun 	 *	f = Correction Report Filtering Bit. If 1, subsequent errors
1167*4882a593Smuzhiyun 	 *	    won't be shown
1168*4882a593Smuzhiyun 	 *	mmm = error type
1169*4882a593Smuzhiyun 	 *	cccc = channel
1170*4882a593Smuzhiyun 	 * If the mask doesn't match, report an error to the parsing logic
1171*4882a593Smuzhiyun 	 */
1172*4882a593Smuzhiyun 	if (!((errcode & 0xef80) == 0x80)) {
1173*4882a593Smuzhiyun 		optype = "Can't parse: it is not a mem";
1174*4882a593Smuzhiyun 	} else {
1175*4882a593Smuzhiyun 		switch (optypenum) {
1176*4882a593Smuzhiyun 		case 0:
1177*4882a593Smuzhiyun 			optype = "generic undef request error";
1178*4882a593Smuzhiyun 			break;
1179*4882a593Smuzhiyun 		case 1:
1180*4882a593Smuzhiyun 			optype = "memory read error";
1181*4882a593Smuzhiyun 			break;
1182*4882a593Smuzhiyun 		case 2:
1183*4882a593Smuzhiyun 			optype = "memory write error";
1184*4882a593Smuzhiyun 			break;
1185*4882a593Smuzhiyun 		case 3:
1186*4882a593Smuzhiyun 			optype = "addr/cmd error";
1187*4882a593Smuzhiyun 			break;
1188*4882a593Smuzhiyun 		case 4:
1189*4882a593Smuzhiyun 			optype = "memory scrubbing error";
1190*4882a593Smuzhiyun 			break;
1191*4882a593Smuzhiyun 		default:
1192*4882a593Smuzhiyun 			optype = "reserved";
1193*4882a593Smuzhiyun 			break;
1194*4882a593Smuzhiyun 		}
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	/* Only decode errors with an valid address (ADDRV) */
1198*4882a593Smuzhiyun 	if (!(m->status & MCI_STATUS_ADDRV))
1199*4882a593Smuzhiyun 		return;
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	rc = get_memory_error_data(mci, m->addr, daddr, msg);
1202*4882a593Smuzhiyun 	if (rc)
1203*4882a593Smuzhiyun 		goto address_error;
1204*4882a593Smuzhiyun 
1205*4882a593Smuzhiyun 	snprintf(msg, sizeof(msg),
1206*4882a593Smuzhiyun 		 "%s%s err_code:%04x:%04x channel:%d DIMM:%d rank:%d row:%d bank:%d col:%d",
1207*4882a593Smuzhiyun 		 overflow ? " OVERFLOW" : "", (uc_err && recov) ? " recoverable" : "", mscod,
1208*4882a593Smuzhiyun 		 errcode, daddr->chan, daddr->dimm, daddr->rank, daddr->row, daddr->bank, daddr->col);
1209*4882a593Smuzhiyun 
1210*4882a593Smuzhiyun 	edac_dbg(0, "%s\n", msg);
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	/* Call the helper to output message */
1213*4882a593Smuzhiyun 	edac_mc_handle_error(tp_event, mci, core_err_cnt, m->addr >> PAGE_SHIFT,
1214*4882a593Smuzhiyun 						 m->addr & ~PAGE_MASK, 0, daddr->chan, daddr->dimm, -1, optype, msg);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	return;
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun address_error:
1219*4882a593Smuzhiyun 	edac_mc_handle_error(tp_event, mci, core_err_cnt, 0, 0, 0, -1, -1, -1, msg, "");
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun 
apl_get_dimm_config(struct mem_ctl_info * mci)1222*4882a593Smuzhiyun static void apl_get_dimm_config(struct mem_ctl_info *mci)
1223*4882a593Smuzhiyun {
1224*4882a593Smuzhiyun 	struct pnd2_pvt	*pvt = mci->pvt_info;
1225*4882a593Smuzhiyun 	struct dimm_info *dimm;
1226*4882a593Smuzhiyun 	struct d_cr_drp0 *d;
1227*4882a593Smuzhiyun 	u64	capacity;
1228*4882a593Smuzhiyun 	int	i, g;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	for (i = 0; i < APL_NUM_CHANNELS; i++) {
1231*4882a593Smuzhiyun 		if (!(chan_mask & BIT(i)))
1232*4882a593Smuzhiyun 			continue;
1233*4882a593Smuzhiyun 
1234*4882a593Smuzhiyun 		dimm = edac_get_dimm(mci, i, 0, 0);
1235*4882a593Smuzhiyun 		if (!dimm) {
1236*4882a593Smuzhiyun 			edac_dbg(0, "No allocated DIMM for channel %d\n", i);
1237*4882a593Smuzhiyun 			continue;
1238*4882a593Smuzhiyun 		}
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 		d = &drp0[i];
1241*4882a593Smuzhiyun 		for (g = 0; g < ARRAY_SIZE(dimms); g++)
1242*4882a593Smuzhiyun 			if (dimms[g].addrdec == d->addrdec &&
1243*4882a593Smuzhiyun 			    dimms[g].dden == d->dden &&
1244*4882a593Smuzhiyun 			    dimms[g].dwid == d->dwid)
1245*4882a593Smuzhiyun 				break;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 		if (g == ARRAY_SIZE(dimms)) {
1248*4882a593Smuzhiyun 			edac_dbg(0, "Channel %d: unrecognized DIMM\n", i);
1249*4882a593Smuzhiyun 			continue;
1250*4882a593Smuzhiyun 		}
1251*4882a593Smuzhiyun 
1252*4882a593Smuzhiyun 		pvt->dimm_geom[i] = g;
1253*4882a593Smuzhiyun 		capacity = (d->rken0 + d->rken1) * 8 * (1ul << dimms[g].rowbits) *
1254*4882a593Smuzhiyun 				   (1ul << dimms[g].colbits);
1255*4882a593Smuzhiyun 		edac_dbg(0, "Channel %d: %lld MByte DIMM\n", i, capacity >> (20 - 3));
1256*4882a593Smuzhiyun 		dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1257*4882a593Smuzhiyun 		dimm->grain = 32;
1258*4882a593Smuzhiyun 		dimm->dtype = (d->dwid == 0) ? DEV_X8 : DEV_X16;
1259*4882a593Smuzhiyun 		dimm->mtype = MEM_DDR3;
1260*4882a593Smuzhiyun 		dimm->edac_mode = EDAC_SECDED;
1261*4882a593Smuzhiyun 		snprintf(dimm->label, sizeof(dimm->label), "Slice#%d_Chan#%d", i / 2, i % 2);
1262*4882a593Smuzhiyun 	}
1263*4882a593Smuzhiyun }
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun static const int dnv_dtypes[] = {
1266*4882a593Smuzhiyun 	DEV_X8, DEV_X4, DEV_X16, DEV_UNKNOWN
1267*4882a593Smuzhiyun };
1268*4882a593Smuzhiyun 
dnv_get_dimm_config(struct mem_ctl_info * mci)1269*4882a593Smuzhiyun static void dnv_get_dimm_config(struct mem_ctl_info *mci)
1270*4882a593Smuzhiyun {
1271*4882a593Smuzhiyun 	int	i, j, ranks_of_dimm[DNV_MAX_DIMMS], banks, rowbits, colbits, memtype;
1272*4882a593Smuzhiyun 	struct dimm_info *dimm;
1273*4882a593Smuzhiyun 	struct d_cr_drp *d;
1274*4882a593Smuzhiyun 	u64	capacity;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	if (dsch.ddr4en) {
1277*4882a593Smuzhiyun 		memtype = MEM_DDR4;
1278*4882a593Smuzhiyun 		banks = 16;
1279*4882a593Smuzhiyun 		colbits = 10;
1280*4882a593Smuzhiyun 	} else {
1281*4882a593Smuzhiyun 		memtype = MEM_DDR3;
1282*4882a593Smuzhiyun 		banks = 8;
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	for (i = 0; i < DNV_NUM_CHANNELS; i++) {
1286*4882a593Smuzhiyun 		if (dmap4[i].row14 == 31)
1287*4882a593Smuzhiyun 			rowbits = 14;
1288*4882a593Smuzhiyun 		else if (dmap4[i].row15 == 31)
1289*4882a593Smuzhiyun 			rowbits = 15;
1290*4882a593Smuzhiyun 		else if (dmap4[i].row16 == 31)
1291*4882a593Smuzhiyun 			rowbits = 16;
1292*4882a593Smuzhiyun 		else if (dmap4[i].row17 == 31)
1293*4882a593Smuzhiyun 			rowbits = 17;
1294*4882a593Smuzhiyun 		else
1295*4882a593Smuzhiyun 			rowbits = 18;
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 		if (memtype == MEM_DDR3) {
1298*4882a593Smuzhiyun 			if (dmap1[i].ca11 != 0x3f)
1299*4882a593Smuzhiyun 				colbits = 12;
1300*4882a593Smuzhiyun 			else
1301*4882a593Smuzhiyun 				colbits = 10;
1302*4882a593Smuzhiyun 		}
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 		d = &drp[i];
1305*4882a593Smuzhiyun 		/* DIMM0 is present if rank0 and/or rank1 is enabled */
1306*4882a593Smuzhiyun 		ranks_of_dimm[0] = d->rken0 + d->rken1;
1307*4882a593Smuzhiyun 		/* DIMM1 is present if rank2 and/or rank3 is enabled */
1308*4882a593Smuzhiyun 		ranks_of_dimm[1] = d->rken2 + d->rken3;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		for (j = 0; j < DNV_MAX_DIMMS; j++) {
1311*4882a593Smuzhiyun 			if (!ranks_of_dimm[j])
1312*4882a593Smuzhiyun 				continue;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 			dimm = edac_get_dimm(mci, i, j, 0);
1315*4882a593Smuzhiyun 			if (!dimm) {
1316*4882a593Smuzhiyun 				edac_dbg(0, "No allocated DIMM for channel %d DIMM %d\n", i, j);
1317*4882a593Smuzhiyun 				continue;
1318*4882a593Smuzhiyun 			}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 			capacity = ranks_of_dimm[j] * banks * (1ul << rowbits) * (1ul << colbits);
1321*4882a593Smuzhiyun 			edac_dbg(0, "Channel %d DIMM %d: %lld MByte DIMM\n", i, j, capacity >> (20 - 3));
1322*4882a593Smuzhiyun 			dimm->nr_pages = MiB_TO_PAGES(capacity >> (20 - 3));
1323*4882a593Smuzhiyun 			dimm->grain = 32;
1324*4882a593Smuzhiyun 			dimm->dtype = dnv_dtypes[j ? d->dimmdwid0 : d->dimmdwid1];
1325*4882a593Smuzhiyun 			dimm->mtype = memtype;
1326*4882a593Smuzhiyun 			dimm->edac_mode = EDAC_SECDED;
1327*4882a593Smuzhiyun 			snprintf(dimm->label, sizeof(dimm->label), "Chan#%d_DIMM#%d", i, j);
1328*4882a593Smuzhiyun 		}
1329*4882a593Smuzhiyun 	}
1330*4882a593Smuzhiyun }
1331*4882a593Smuzhiyun 
pnd2_register_mci(struct mem_ctl_info ** ppmci)1332*4882a593Smuzhiyun static int pnd2_register_mci(struct mem_ctl_info **ppmci)
1333*4882a593Smuzhiyun {
1334*4882a593Smuzhiyun 	struct edac_mc_layer layers[2];
1335*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
1336*4882a593Smuzhiyun 	struct pnd2_pvt *pvt;
1337*4882a593Smuzhiyun 	int rc;
1338*4882a593Smuzhiyun 
1339*4882a593Smuzhiyun 	rc = ops->check_ecc();
1340*4882a593Smuzhiyun 	if (rc < 0)
1341*4882a593Smuzhiyun 		return rc;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	/* Allocate a new MC control structure */
1344*4882a593Smuzhiyun 	layers[0].type = EDAC_MC_LAYER_CHANNEL;
1345*4882a593Smuzhiyun 	layers[0].size = ops->channels;
1346*4882a593Smuzhiyun 	layers[0].is_virt_csrow = false;
1347*4882a593Smuzhiyun 	layers[1].type = EDAC_MC_LAYER_SLOT;
1348*4882a593Smuzhiyun 	layers[1].size = ops->dimms_per_channel;
1349*4882a593Smuzhiyun 	layers[1].is_virt_csrow = true;
1350*4882a593Smuzhiyun 	mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1351*4882a593Smuzhiyun 	if (!mci)
1352*4882a593Smuzhiyun 		return -ENOMEM;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	pvt = mci->pvt_info;
1355*4882a593Smuzhiyun 	memset(pvt, 0, sizeof(*pvt));
1356*4882a593Smuzhiyun 
1357*4882a593Smuzhiyun 	mci->mod_name = EDAC_MOD_STR;
1358*4882a593Smuzhiyun 	mci->dev_name = ops->name;
1359*4882a593Smuzhiyun 	mci->ctl_name = "Pondicherry2";
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	/* Get dimm basic config and the memory layout */
1362*4882a593Smuzhiyun 	ops->get_dimm_config(mci);
1363*4882a593Smuzhiyun 
1364*4882a593Smuzhiyun 	if (edac_mc_add_mc(mci)) {
1365*4882a593Smuzhiyun 		edac_dbg(0, "MC: failed edac_mc_add_mc()\n");
1366*4882a593Smuzhiyun 		edac_mc_free(mci);
1367*4882a593Smuzhiyun 		return -EINVAL;
1368*4882a593Smuzhiyun 	}
1369*4882a593Smuzhiyun 
1370*4882a593Smuzhiyun 	*ppmci = mci;
1371*4882a593Smuzhiyun 
1372*4882a593Smuzhiyun 	return 0;
1373*4882a593Smuzhiyun }
1374*4882a593Smuzhiyun 
pnd2_unregister_mci(struct mem_ctl_info * mci)1375*4882a593Smuzhiyun static void pnd2_unregister_mci(struct mem_ctl_info *mci)
1376*4882a593Smuzhiyun {
1377*4882a593Smuzhiyun 	if (unlikely(!mci || !mci->pvt_info)) {
1378*4882a593Smuzhiyun 		pnd2_printk(KERN_ERR, "Couldn't find mci handler\n");
1379*4882a593Smuzhiyun 		return;
1380*4882a593Smuzhiyun 	}
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	/* Remove MC sysfs nodes */
1383*4882a593Smuzhiyun 	edac_mc_del_mc(NULL);
1384*4882a593Smuzhiyun 	edac_dbg(1, "%s: free mci struct\n", mci->ctl_name);
1385*4882a593Smuzhiyun 	edac_mc_free(mci);
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun /*
1389*4882a593Smuzhiyun  * Callback function registered with core kernel mce code.
1390*4882a593Smuzhiyun  * Called once for each logged error.
1391*4882a593Smuzhiyun  */
pnd2_mce_check_error(struct notifier_block * nb,unsigned long val,void * data)1392*4882a593Smuzhiyun static int pnd2_mce_check_error(struct notifier_block *nb, unsigned long val, void *data)
1393*4882a593Smuzhiyun {
1394*4882a593Smuzhiyun 	struct mce *mce = (struct mce *)data;
1395*4882a593Smuzhiyun 	struct mem_ctl_info *mci;
1396*4882a593Smuzhiyun 	struct dram_addr daddr;
1397*4882a593Smuzhiyun 	char *type;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	mci = pnd2_mci;
1400*4882a593Smuzhiyun 	if (!mci || (mce->kflags & MCE_HANDLED_CEC))
1401*4882a593Smuzhiyun 		return NOTIFY_DONE;
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	/*
1404*4882a593Smuzhiyun 	 * Just let mcelog handle it if the error is
1405*4882a593Smuzhiyun 	 * outside the memory controller. A memory error
1406*4882a593Smuzhiyun 	 * is indicated by bit 7 = 1 and bits = 8-11,13-15 = 0.
1407*4882a593Smuzhiyun 	 * bit 12 has an special meaning.
1408*4882a593Smuzhiyun 	 */
1409*4882a593Smuzhiyun 	if ((mce->status & 0xefff) >> 7 != 1)
1410*4882a593Smuzhiyun 		return NOTIFY_DONE;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	if (mce->mcgstatus & MCG_STATUS_MCIP)
1413*4882a593Smuzhiyun 		type = "Exception";
1414*4882a593Smuzhiyun 	else
1415*4882a593Smuzhiyun 		type = "Event";
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	pnd2_mc_printk(mci, KERN_INFO, "HANDLING MCE MEMORY ERROR\n");
1418*4882a593Smuzhiyun 	pnd2_mc_printk(mci, KERN_INFO, "CPU %u: Machine Check %s: %llx Bank %u: %llx\n",
1419*4882a593Smuzhiyun 				   mce->extcpu, type, mce->mcgstatus, mce->bank, mce->status);
1420*4882a593Smuzhiyun 	pnd2_mc_printk(mci, KERN_INFO, "TSC %llx ", mce->tsc);
1421*4882a593Smuzhiyun 	pnd2_mc_printk(mci, KERN_INFO, "ADDR %llx ", mce->addr);
1422*4882a593Smuzhiyun 	pnd2_mc_printk(mci, KERN_INFO, "MISC %llx ", mce->misc);
1423*4882a593Smuzhiyun 	pnd2_mc_printk(mci, KERN_INFO, "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x\n",
1424*4882a593Smuzhiyun 				   mce->cpuvendor, mce->cpuid, mce->time, mce->socketid, mce->apicid);
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	pnd2_mce_output_error(mci, mce, &daddr);
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	/* Advice mcelog that the error were handled */
1429*4882a593Smuzhiyun 	mce->kflags |= MCE_HANDLED_EDAC;
1430*4882a593Smuzhiyun 	return NOTIFY_OK;
1431*4882a593Smuzhiyun }
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun static struct notifier_block pnd2_mce_dec = {
1434*4882a593Smuzhiyun 	.notifier_call	= pnd2_mce_check_error,
1435*4882a593Smuzhiyun 	.priority	= MCE_PRIO_EDAC,
1436*4882a593Smuzhiyun };
1437*4882a593Smuzhiyun 
1438*4882a593Smuzhiyun #ifdef CONFIG_EDAC_DEBUG
1439*4882a593Smuzhiyun /*
1440*4882a593Smuzhiyun  * Write an address to this file to exercise the address decode
1441*4882a593Smuzhiyun  * logic in this driver.
1442*4882a593Smuzhiyun  */
1443*4882a593Smuzhiyun static u64 pnd2_fake_addr;
1444*4882a593Smuzhiyun #define PND2_BLOB_SIZE 1024
1445*4882a593Smuzhiyun static char pnd2_result[PND2_BLOB_SIZE];
1446*4882a593Smuzhiyun static struct dentry *pnd2_test;
1447*4882a593Smuzhiyun static struct debugfs_blob_wrapper pnd2_blob = {
1448*4882a593Smuzhiyun 	.data = pnd2_result,
1449*4882a593Smuzhiyun 	.size = 0
1450*4882a593Smuzhiyun };
1451*4882a593Smuzhiyun 
debugfs_u64_set(void * data,u64 val)1452*4882a593Smuzhiyun static int debugfs_u64_set(void *data, u64 val)
1453*4882a593Smuzhiyun {
1454*4882a593Smuzhiyun 	struct dram_addr daddr;
1455*4882a593Smuzhiyun 	struct mce m;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	*(u64 *)data = val;
1458*4882a593Smuzhiyun 	m.mcgstatus = 0;
1459*4882a593Smuzhiyun 	/* ADDRV + MemRd + Unknown channel */
1460*4882a593Smuzhiyun 	m.status = MCI_STATUS_ADDRV + 0x9f;
1461*4882a593Smuzhiyun 	m.addr = val;
1462*4882a593Smuzhiyun 	pnd2_mce_output_error(pnd2_mci, &m, &daddr);
1463*4882a593Smuzhiyun 	snprintf(pnd2_blob.data, PND2_BLOB_SIZE,
1464*4882a593Smuzhiyun 			 "SysAddr=%llx Channel=%d DIMM=%d Rank=%d Bank=%d Row=%d Column=%d\n",
1465*4882a593Smuzhiyun 			 m.addr, daddr.chan, daddr.dimm, daddr.rank, daddr.bank, daddr.row, daddr.col);
1466*4882a593Smuzhiyun 	pnd2_blob.size = strlen(pnd2_blob.data);
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 	return 0;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
1471*4882a593Smuzhiyun 
setup_pnd2_debug(void)1472*4882a593Smuzhiyun static void setup_pnd2_debug(void)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun 	pnd2_test = edac_debugfs_create_dir("pnd2_test");
1475*4882a593Smuzhiyun 	edac_debugfs_create_file("pnd2_debug_addr", 0200, pnd2_test,
1476*4882a593Smuzhiyun 							 &pnd2_fake_addr, &fops_u64_wo);
1477*4882a593Smuzhiyun 	debugfs_create_blob("pnd2_debug_results", 0400, pnd2_test, &pnd2_blob);
1478*4882a593Smuzhiyun }
1479*4882a593Smuzhiyun 
teardown_pnd2_debug(void)1480*4882a593Smuzhiyun static void teardown_pnd2_debug(void)
1481*4882a593Smuzhiyun {
1482*4882a593Smuzhiyun 	debugfs_remove_recursive(pnd2_test);
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun #else
setup_pnd2_debug(void)1485*4882a593Smuzhiyun static void setup_pnd2_debug(void)	{}
teardown_pnd2_debug(void)1486*4882a593Smuzhiyun static void teardown_pnd2_debug(void)	{}
1487*4882a593Smuzhiyun #endif /* CONFIG_EDAC_DEBUG */
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 
pnd2_probe(void)1490*4882a593Smuzhiyun static int pnd2_probe(void)
1491*4882a593Smuzhiyun {
1492*4882a593Smuzhiyun 	int rc;
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	edac_dbg(2, "\n");
1495*4882a593Smuzhiyun 	rc = get_registers();
1496*4882a593Smuzhiyun 	if (rc)
1497*4882a593Smuzhiyun 		return rc;
1498*4882a593Smuzhiyun 
1499*4882a593Smuzhiyun 	return pnd2_register_mci(&pnd2_mci);
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
pnd2_remove(void)1502*4882a593Smuzhiyun static void pnd2_remove(void)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun 	edac_dbg(0, "\n");
1505*4882a593Smuzhiyun 	pnd2_unregister_mci(pnd2_mci);
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun static struct dunit_ops apl_ops = {
1509*4882a593Smuzhiyun 		.name			= "pnd2/apl",
1510*4882a593Smuzhiyun 		.type			= APL,
1511*4882a593Smuzhiyun 		.pmiaddr_shift		= LOG2_PMI_ADDR_GRANULARITY,
1512*4882a593Smuzhiyun 		.pmiidx_shift		= 0,
1513*4882a593Smuzhiyun 		.channels		= APL_NUM_CHANNELS,
1514*4882a593Smuzhiyun 		.dimms_per_channel	= 1,
1515*4882a593Smuzhiyun 		.rd_reg			= apl_rd_reg,
1516*4882a593Smuzhiyun 		.get_registers		= apl_get_registers,
1517*4882a593Smuzhiyun 		.check_ecc		= apl_check_ecc_active,
1518*4882a593Smuzhiyun 		.mk_region		= apl_mk_region,
1519*4882a593Smuzhiyun 		.get_dimm_config	= apl_get_dimm_config,
1520*4882a593Smuzhiyun 		.pmi2mem		= apl_pmi2mem,
1521*4882a593Smuzhiyun };
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun static struct dunit_ops dnv_ops = {
1524*4882a593Smuzhiyun 		.name			= "pnd2/dnv",
1525*4882a593Smuzhiyun 		.type			= DNV,
1526*4882a593Smuzhiyun 		.pmiaddr_shift		= 0,
1527*4882a593Smuzhiyun 		.pmiidx_shift		= 1,
1528*4882a593Smuzhiyun 		.channels		= DNV_NUM_CHANNELS,
1529*4882a593Smuzhiyun 		.dimms_per_channel	= 2,
1530*4882a593Smuzhiyun 		.rd_reg			= dnv_rd_reg,
1531*4882a593Smuzhiyun 		.get_registers		= dnv_get_registers,
1532*4882a593Smuzhiyun 		.check_ecc		= dnv_check_ecc_active,
1533*4882a593Smuzhiyun 		.mk_region		= dnv_mk_region,
1534*4882a593Smuzhiyun 		.get_dimm_config	= dnv_get_dimm_config,
1535*4882a593Smuzhiyun 		.pmi2mem		= dnv_pmi2mem,
1536*4882a593Smuzhiyun };
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun static const struct x86_cpu_id pnd2_cpuids[] = {
1539*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT,	&apl_ops),
1540*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D,	&dnv_ops),
1541*4882a593Smuzhiyun 	{ }
1542*4882a593Smuzhiyun };
1543*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids);
1544*4882a593Smuzhiyun 
pnd2_init(void)1545*4882a593Smuzhiyun static int __init pnd2_init(void)
1546*4882a593Smuzhiyun {
1547*4882a593Smuzhiyun 	const struct x86_cpu_id *id;
1548*4882a593Smuzhiyun 	const char *owner;
1549*4882a593Smuzhiyun 	int rc;
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	edac_dbg(2, "\n");
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	owner = edac_get_owner();
1554*4882a593Smuzhiyun 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
1555*4882a593Smuzhiyun 		return -EBUSY;
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
1558*4882a593Smuzhiyun 		return -ENODEV;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	id = x86_match_cpu(pnd2_cpuids);
1561*4882a593Smuzhiyun 	if (!id)
1562*4882a593Smuzhiyun 		return -ENODEV;
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	ops = (struct dunit_ops *)id->driver_data;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	if (ops->type == APL) {
1567*4882a593Smuzhiyun 		p2sb_bus = pci_find_bus(0, 0);
1568*4882a593Smuzhiyun 		if (!p2sb_bus)
1569*4882a593Smuzhiyun 			return -ENODEV;
1570*4882a593Smuzhiyun 	}
1571*4882a593Smuzhiyun 
1572*4882a593Smuzhiyun 	/* Ensure that the OPSTATE is set correctly for POLL or NMI */
1573*4882a593Smuzhiyun 	opstate_init();
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	rc = pnd2_probe();
1576*4882a593Smuzhiyun 	if (rc < 0) {
1577*4882a593Smuzhiyun 		pnd2_printk(KERN_ERR, "Failed to register device with error %d.\n", rc);
1578*4882a593Smuzhiyun 		return rc;
1579*4882a593Smuzhiyun 	}
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	if (!pnd2_mci)
1582*4882a593Smuzhiyun 		return -ENODEV;
1583*4882a593Smuzhiyun 
1584*4882a593Smuzhiyun 	mce_register_decode_chain(&pnd2_mce_dec);
1585*4882a593Smuzhiyun 	setup_pnd2_debug();
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	return 0;
1588*4882a593Smuzhiyun }
1589*4882a593Smuzhiyun 
pnd2_exit(void)1590*4882a593Smuzhiyun static void __exit pnd2_exit(void)
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun 	edac_dbg(2, "\n");
1593*4882a593Smuzhiyun 	teardown_pnd2_debug();
1594*4882a593Smuzhiyun 	mce_unregister_decode_chain(&pnd2_mce_dec);
1595*4882a593Smuzhiyun 	pnd2_remove();
1596*4882a593Smuzhiyun }
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun module_init(pnd2_init);
1599*4882a593Smuzhiyun module_exit(pnd2_exit);
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun module_param(edac_op_state, int, 0444);
1602*4882a593Smuzhiyun MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1605*4882a593Smuzhiyun MODULE_AUTHOR("Tony Luck");
1606*4882a593Smuzhiyun MODULE_DESCRIPTION("MC Driver for Intel SoC using Pondicherry memory controller");
1607