1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * EDAC driver for Intel(R) Xeon(R) Skylake processors
4*4882a593Smuzhiyun * Copyright (c) 2016, Intel Corporation.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/processor.h>
9*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
10*4882a593Smuzhiyun #include <asm/intel-family.h>
11*4882a593Smuzhiyun #include <asm/mce.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "edac_module.h"
14*4882a593Smuzhiyun #include "skx_common.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #define EDAC_MOD_STR "skx_edac"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun * Debug macros
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun #define skx_printk(level, fmt, arg...) \
22*4882a593Smuzhiyun edac_printk(level, "skx", fmt, ##arg)
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #define skx_mc_printk(mci, level, fmt, arg...) \
25*4882a593Smuzhiyun edac_mc_chipset_printk(mci, level, "skx", fmt, ##arg)
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static struct list_head *skx_edac_list;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun static u64 skx_tolm, skx_tohm;
30*4882a593Smuzhiyun static int skx_num_sockets;
31*4882a593Smuzhiyun static unsigned int nvdimm_count;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define MASK26 0x3FFFFFF /* Mask for 2^26 */
34*4882a593Smuzhiyun #define MASK29 0x1FFFFFFF /* Mask for 2^29 */
35*4882a593Smuzhiyun
get_skx_dev(struct pci_bus * bus,u8 idx)36*4882a593Smuzhiyun static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct skx_dev *d;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun list_for_each_entry(d, skx_edac_list, list) {
41*4882a593Smuzhiyun if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number)
42*4882a593Smuzhiyun return d;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return NULL;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun enum munittype {
49*4882a593Smuzhiyun CHAN0, CHAN1, CHAN2, SAD_ALL, UTIL_ALL, SAD,
50*4882a593Smuzhiyun ERRCHAN0, ERRCHAN1, ERRCHAN2,
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct munit {
54*4882a593Smuzhiyun u16 did;
55*4882a593Smuzhiyun u16 devfn[SKX_NUM_IMC];
56*4882a593Smuzhiyun u8 busidx;
57*4882a593Smuzhiyun u8 per_socket;
58*4882a593Smuzhiyun enum munittype mtype;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /*
62*4882a593Smuzhiyun * List of PCI device ids that we need together with some device
63*4882a593Smuzhiyun * number and function numbers to tell which memory controller the
64*4882a593Smuzhiyun * device belongs to.
65*4882a593Smuzhiyun */
66*4882a593Smuzhiyun static const struct munit skx_all_munits[] = {
67*4882a593Smuzhiyun { 0x2054, { }, 1, 1, SAD_ALL },
68*4882a593Smuzhiyun { 0x2055, { }, 1, 1, UTIL_ALL },
69*4882a593Smuzhiyun { 0x2040, { PCI_DEVFN(10, 0), PCI_DEVFN(12, 0) }, 2, 2, CHAN0 },
70*4882a593Smuzhiyun { 0x2044, { PCI_DEVFN(10, 4), PCI_DEVFN(12, 4) }, 2, 2, CHAN1 },
71*4882a593Smuzhiyun { 0x2048, { PCI_DEVFN(11, 0), PCI_DEVFN(13, 0) }, 2, 2, CHAN2 },
72*4882a593Smuzhiyun { 0x2043, { PCI_DEVFN(10, 3), PCI_DEVFN(12, 3) }, 2, 2, ERRCHAN0 },
73*4882a593Smuzhiyun { 0x2047, { PCI_DEVFN(10, 7), PCI_DEVFN(12, 7) }, 2, 2, ERRCHAN1 },
74*4882a593Smuzhiyun { 0x204b, { PCI_DEVFN(11, 3), PCI_DEVFN(13, 3) }, 2, 2, ERRCHAN2 },
75*4882a593Smuzhiyun { 0x208e, { }, 1, 0, SAD },
76*4882a593Smuzhiyun { }
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
get_all_munits(const struct munit * m)79*4882a593Smuzhiyun static int get_all_munits(const struct munit *m)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct pci_dev *pdev, *prev;
82*4882a593Smuzhiyun struct skx_dev *d;
83*4882a593Smuzhiyun u32 reg;
84*4882a593Smuzhiyun int i = 0, ndev = 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun prev = NULL;
87*4882a593Smuzhiyun for (;;) {
88*4882a593Smuzhiyun pdev = pci_get_device(PCI_VENDOR_ID_INTEL, m->did, prev);
89*4882a593Smuzhiyun if (!pdev)
90*4882a593Smuzhiyun break;
91*4882a593Smuzhiyun ndev++;
92*4882a593Smuzhiyun if (m->per_socket == SKX_NUM_IMC) {
93*4882a593Smuzhiyun for (i = 0; i < SKX_NUM_IMC; i++)
94*4882a593Smuzhiyun if (m->devfn[i] == pdev->devfn)
95*4882a593Smuzhiyun break;
96*4882a593Smuzhiyun if (i == SKX_NUM_IMC)
97*4882a593Smuzhiyun goto fail;
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun d = get_skx_dev(pdev->bus, m->busidx);
100*4882a593Smuzhiyun if (!d)
101*4882a593Smuzhiyun goto fail;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun /* Be sure that the device is enabled */
104*4882a593Smuzhiyun if (unlikely(pci_enable_device(pdev) < 0)) {
105*4882a593Smuzhiyun skx_printk(KERN_ERR, "Couldn't enable device %04x:%04x\n",
106*4882a593Smuzhiyun PCI_VENDOR_ID_INTEL, m->did);
107*4882a593Smuzhiyun goto fail;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun switch (m->mtype) {
111*4882a593Smuzhiyun case CHAN0:
112*4882a593Smuzhiyun case CHAN1:
113*4882a593Smuzhiyun case CHAN2:
114*4882a593Smuzhiyun pci_dev_get(pdev);
115*4882a593Smuzhiyun d->imc[i].chan[m->mtype].cdev = pdev;
116*4882a593Smuzhiyun break;
117*4882a593Smuzhiyun case ERRCHAN0:
118*4882a593Smuzhiyun case ERRCHAN1:
119*4882a593Smuzhiyun case ERRCHAN2:
120*4882a593Smuzhiyun pci_dev_get(pdev);
121*4882a593Smuzhiyun d->imc[i].chan[m->mtype - ERRCHAN0].edev = pdev;
122*4882a593Smuzhiyun break;
123*4882a593Smuzhiyun case SAD_ALL:
124*4882a593Smuzhiyun pci_dev_get(pdev);
125*4882a593Smuzhiyun d->sad_all = pdev;
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun case UTIL_ALL:
128*4882a593Smuzhiyun pci_dev_get(pdev);
129*4882a593Smuzhiyun d->util_all = pdev;
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun case SAD:
132*4882a593Smuzhiyun /*
133*4882a593Smuzhiyun * one of these devices per core, including cores
134*4882a593Smuzhiyun * that don't exist on this SKU. Ignore any that
135*4882a593Smuzhiyun * read a route table of zero, make sure all the
136*4882a593Smuzhiyun * non-zero values match.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun pci_read_config_dword(pdev, 0xB4, ®);
139*4882a593Smuzhiyun if (reg != 0) {
140*4882a593Smuzhiyun if (d->mcroute == 0) {
141*4882a593Smuzhiyun d->mcroute = reg;
142*4882a593Smuzhiyun } else if (d->mcroute != reg) {
143*4882a593Smuzhiyun skx_printk(KERN_ERR, "mcroute mismatch\n");
144*4882a593Smuzhiyun goto fail;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun ndev--;
148*4882a593Smuzhiyun break;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun prev = pdev;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun return ndev;
155*4882a593Smuzhiyun fail:
156*4882a593Smuzhiyun pci_dev_put(pdev);
157*4882a593Smuzhiyun return -ENODEV;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun static struct res_config skx_cfg = {
161*4882a593Smuzhiyun .type = SKX,
162*4882a593Smuzhiyun .decs_did = 0x2016,
163*4882a593Smuzhiyun .busno_cfg_offset = 0xcc,
164*4882a593Smuzhiyun };
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun static const struct x86_cpu_id skx_cpuids[] = {
167*4882a593Smuzhiyun X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
168*4882a593Smuzhiyun { }
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
171*4882a593Smuzhiyun
skx_check_ecc(u32 mcmtr)172*4882a593Smuzhiyun static bool skx_check_ecc(u32 mcmtr)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun return !!GET_BITFIELD(mcmtr, 2, 2);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
skx_get_dimm_config(struct mem_ctl_info * mci)177*4882a593Smuzhiyun static int skx_get_dimm_config(struct mem_ctl_info *mci)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct skx_pvt *pvt = mci->pvt_info;
180*4882a593Smuzhiyun u32 mtr, mcmtr, amap, mcddrtcfg;
181*4882a593Smuzhiyun struct skx_imc *imc = pvt->imc;
182*4882a593Smuzhiyun struct dimm_info *dimm;
183*4882a593Smuzhiyun int i, j;
184*4882a593Smuzhiyun int ndimms;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Only the mcmtr on the first channel is effective */
187*4882a593Smuzhiyun pci_read_config_dword(imc->chan[0].cdev, 0x87c, &mcmtr);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun for (i = 0; i < SKX_NUM_CHANNELS; i++) {
190*4882a593Smuzhiyun ndimms = 0;
191*4882a593Smuzhiyun pci_read_config_dword(imc->chan[i].cdev, 0x8C, &amap);
192*4882a593Smuzhiyun pci_read_config_dword(imc->chan[i].cdev, 0x400, &mcddrtcfg);
193*4882a593Smuzhiyun for (j = 0; j < SKX_NUM_DIMMS; j++) {
194*4882a593Smuzhiyun dimm = edac_get_dimm(mci, i, j, 0);
195*4882a593Smuzhiyun pci_read_config_dword(imc->chan[i].cdev,
196*4882a593Smuzhiyun 0x80 + 4 * j, &mtr);
197*4882a593Smuzhiyun if (IS_DIMM_PRESENT(mtr)) {
198*4882a593Smuzhiyun ndimms += skx_get_dimm_info(mtr, mcmtr, amap, dimm, imc, i, j);
199*4882a593Smuzhiyun } else if (IS_NVDIMM_PRESENT(mcddrtcfg, j)) {
200*4882a593Smuzhiyun ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
201*4882a593Smuzhiyun EDAC_MOD_STR);
202*4882a593Smuzhiyun nvdimm_count++;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun if (ndimms && !skx_check_ecc(mcmtr)) {
206*4882a593Smuzhiyun skx_printk(KERN_ERR, "ECC is disabled on imc %d\n", imc->mc);
207*4882a593Smuzhiyun return -ENODEV;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return 0;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun #define SKX_MAX_SAD 24
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #define SKX_GET_SAD(d, i, reg) \
217*4882a593Smuzhiyun pci_read_config_dword((d)->sad_all, 0x60 + 8 * (i), &(reg))
218*4882a593Smuzhiyun #define SKX_GET_ILV(d, i, reg) \
219*4882a593Smuzhiyun pci_read_config_dword((d)->sad_all, 0x64 + 8 * (i), &(reg))
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun #define SKX_SAD_MOD3MODE(sad) GET_BITFIELD((sad), 30, 31)
222*4882a593Smuzhiyun #define SKX_SAD_MOD3(sad) GET_BITFIELD((sad), 27, 27)
223*4882a593Smuzhiyun #define SKX_SAD_LIMIT(sad) (((u64)GET_BITFIELD((sad), 7, 26) << 26) | MASK26)
224*4882a593Smuzhiyun #define SKX_SAD_MOD3ASMOD2(sad) GET_BITFIELD((sad), 5, 6)
225*4882a593Smuzhiyun #define SKX_SAD_ATTR(sad) GET_BITFIELD((sad), 3, 4)
226*4882a593Smuzhiyun #define SKX_SAD_INTERLEAVE(sad) GET_BITFIELD((sad), 1, 2)
227*4882a593Smuzhiyun #define SKX_SAD_ENABLE(sad) GET_BITFIELD((sad), 0, 0)
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun #define SKX_ILV_REMOTE(tgt) (((tgt) & 8) == 0)
230*4882a593Smuzhiyun #define SKX_ILV_TARGET(tgt) ((tgt) & 7)
231*4882a593Smuzhiyun
skx_show_retry_rd_err_log(struct decoded_addr * res,char * msg,int len)232*4882a593Smuzhiyun static void skx_show_retry_rd_err_log(struct decoded_addr *res,
233*4882a593Smuzhiyun char *msg, int len)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun u32 log0, log1, log2, log3, log4;
236*4882a593Smuzhiyun u32 corr0, corr1, corr2, corr3;
237*4882a593Smuzhiyun struct pci_dev *edev;
238*4882a593Smuzhiyun int n;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun edev = res->dev->imc[res->imc].chan[res->channel].edev;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun pci_read_config_dword(edev, 0x154, &log0);
243*4882a593Smuzhiyun pci_read_config_dword(edev, 0x148, &log1);
244*4882a593Smuzhiyun pci_read_config_dword(edev, 0x150, &log2);
245*4882a593Smuzhiyun pci_read_config_dword(edev, 0x15c, &log3);
246*4882a593Smuzhiyun pci_read_config_dword(edev, 0x114, &log4);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun n = snprintf(msg, len, " retry_rd_err_log[%.8x %.8x %.8x %.8x %.8x]",
249*4882a593Smuzhiyun log0, log1, log2, log3, log4);
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun pci_read_config_dword(edev, 0x104, &corr0);
252*4882a593Smuzhiyun pci_read_config_dword(edev, 0x108, &corr1);
253*4882a593Smuzhiyun pci_read_config_dword(edev, 0x10c, &corr2);
254*4882a593Smuzhiyun pci_read_config_dword(edev, 0x110, &corr3);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (len - n > 0)
257*4882a593Smuzhiyun snprintf(msg + n, len - n,
258*4882a593Smuzhiyun " correrrcnt[%.4x %.4x %.4x %.4x %.4x %.4x %.4x %.4x]",
259*4882a593Smuzhiyun corr0 & 0xffff, corr0 >> 16,
260*4882a593Smuzhiyun corr1 & 0xffff, corr1 >> 16,
261*4882a593Smuzhiyun corr2 & 0xffff, corr2 >> 16,
262*4882a593Smuzhiyun corr3 & 0xffff, corr3 >> 16);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
skx_sad_decode(struct decoded_addr * res)265*4882a593Smuzhiyun static bool skx_sad_decode(struct decoded_addr *res)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct skx_dev *d = list_first_entry(skx_edac_list, typeof(*d), list);
268*4882a593Smuzhiyun u64 addr = res->addr;
269*4882a593Smuzhiyun int i, idx, tgt, lchan, shift;
270*4882a593Smuzhiyun u32 sad, ilv;
271*4882a593Smuzhiyun u64 limit, prev_limit;
272*4882a593Smuzhiyun int remote = 0;
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /* Simple sanity check for I/O space or out of range */
275*4882a593Smuzhiyun if (addr >= skx_tohm || (addr >= skx_tolm && addr < BIT_ULL(32))) {
276*4882a593Smuzhiyun edac_dbg(0, "Address 0x%llx out of range\n", addr);
277*4882a593Smuzhiyun return false;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun restart:
281*4882a593Smuzhiyun prev_limit = 0;
282*4882a593Smuzhiyun for (i = 0; i < SKX_MAX_SAD; i++) {
283*4882a593Smuzhiyun SKX_GET_SAD(d, i, sad);
284*4882a593Smuzhiyun limit = SKX_SAD_LIMIT(sad);
285*4882a593Smuzhiyun if (SKX_SAD_ENABLE(sad)) {
286*4882a593Smuzhiyun if (addr >= prev_limit && addr <= limit)
287*4882a593Smuzhiyun goto sad_found;
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun prev_limit = limit + 1;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun edac_dbg(0, "No SAD entry for 0x%llx\n", addr);
292*4882a593Smuzhiyun return false;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun sad_found:
295*4882a593Smuzhiyun SKX_GET_ILV(d, i, ilv);
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun switch (SKX_SAD_INTERLEAVE(sad)) {
298*4882a593Smuzhiyun case 0:
299*4882a593Smuzhiyun idx = GET_BITFIELD(addr, 6, 8);
300*4882a593Smuzhiyun break;
301*4882a593Smuzhiyun case 1:
302*4882a593Smuzhiyun idx = GET_BITFIELD(addr, 8, 10);
303*4882a593Smuzhiyun break;
304*4882a593Smuzhiyun case 2:
305*4882a593Smuzhiyun idx = GET_BITFIELD(addr, 12, 14);
306*4882a593Smuzhiyun break;
307*4882a593Smuzhiyun case 3:
308*4882a593Smuzhiyun idx = GET_BITFIELD(addr, 30, 32);
309*4882a593Smuzhiyun break;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun tgt = GET_BITFIELD(ilv, 4 * idx, 4 * idx + 3);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /* If point to another node, find it and start over */
315*4882a593Smuzhiyun if (SKX_ILV_REMOTE(tgt)) {
316*4882a593Smuzhiyun if (remote) {
317*4882a593Smuzhiyun edac_dbg(0, "Double remote!\n");
318*4882a593Smuzhiyun return false;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun remote = 1;
321*4882a593Smuzhiyun list_for_each_entry(d, skx_edac_list, list) {
322*4882a593Smuzhiyun if (d->imc[0].src_id == SKX_ILV_TARGET(tgt))
323*4882a593Smuzhiyun goto restart;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun edac_dbg(0, "Can't find node %d\n", SKX_ILV_TARGET(tgt));
326*4882a593Smuzhiyun return false;
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (SKX_SAD_MOD3(sad) == 0) {
330*4882a593Smuzhiyun lchan = SKX_ILV_TARGET(tgt);
331*4882a593Smuzhiyun } else {
332*4882a593Smuzhiyun switch (SKX_SAD_MOD3MODE(sad)) {
333*4882a593Smuzhiyun case 0:
334*4882a593Smuzhiyun shift = 6;
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun case 1:
337*4882a593Smuzhiyun shift = 8;
338*4882a593Smuzhiyun break;
339*4882a593Smuzhiyun case 2:
340*4882a593Smuzhiyun shift = 12;
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun default:
343*4882a593Smuzhiyun edac_dbg(0, "illegal mod3mode\n");
344*4882a593Smuzhiyun return false;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun switch (SKX_SAD_MOD3ASMOD2(sad)) {
347*4882a593Smuzhiyun case 0:
348*4882a593Smuzhiyun lchan = (addr >> shift) % 3;
349*4882a593Smuzhiyun break;
350*4882a593Smuzhiyun case 1:
351*4882a593Smuzhiyun lchan = (addr >> shift) % 2;
352*4882a593Smuzhiyun break;
353*4882a593Smuzhiyun case 2:
354*4882a593Smuzhiyun lchan = (addr >> shift) % 2;
355*4882a593Smuzhiyun lchan = (lchan << 1) | !lchan;
356*4882a593Smuzhiyun break;
357*4882a593Smuzhiyun case 3:
358*4882a593Smuzhiyun lchan = ((addr >> shift) % 2) << 1;
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun lchan = (lchan << 1) | (SKX_ILV_TARGET(tgt) & 1);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun res->dev = d;
365*4882a593Smuzhiyun res->socket = d->imc[0].src_id;
366*4882a593Smuzhiyun res->imc = GET_BITFIELD(d->mcroute, lchan * 3, lchan * 3 + 2);
367*4882a593Smuzhiyun res->channel = GET_BITFIELD(d->mcroute, lchan * 2 + 18, lchan * 2 + 19);
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun edac_dbg(2, "0x%llx: socket=%d imc=%d channel=%d\n",
370*4882a593Smuzhiyun res->addr, res->socket, res->imc, res->channel);
371*4882a593Smuzhiyun return true;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun #define SKX_MAX_TAD 8
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun #define SKX_GET_TADBASE(d, mc, i, reg) \
377*4882a593Smuzhiyun pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x850 + 4 * (i), &(reg))
378*4882a593Smuzhiyun #define SKX_GET_TADWAYNESS(d, mc, i, reg) \
379*4882a593Smuzhiyun pci_read_config_dword((d)->imc[mc].chan[0].cdev, 0x880 + 4 * (i), &(reg))
380*4882a593Smuzhiyun #define SKX_GET_TADCHNILVOFFSET(d, mc, ch, i, reg) \
381*4882a593Smuzhiyun pci_read_config_dword((d)->imc[mc].chan[ch].cdev, 0x90 + 4 * (i), &(reg))
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun #define SKX_TAD_BASE(b) ((u64)GET_BITFIELD((b), 12, 31) << 26)
384*4882a593Smuzhiyun #define SKX_TAD_SKT_GRAN(b) GET_BITFIELD((b), 4, 5)
385*4882a593Smuzhiyun #define SKX_TAD_CHN_GRAN(b) GET_BITFIELD((b), 6, 7)
386*4882a593Smuzhiyun #define SKX_TAD_LIMIT(b) (((u64)GET_BITFIELD((b), 12, 31) << 26) | MASK26)
387*4882a593Smuzhiyun #define SKX_TAD_OFFSET(b) ((u64)GET_BITFIELD((b), 4, 23) << 26)
388*4882a593Smuzhiyun #define SKX_TAD_SKTWAYS(b) (1 << GET_BITFIELD((b), 10, 11))
389*4882a593Smuzhiyun #define SKX_TAD_CHNWAYS(b) (GET_BITFIELD((b), 8, 9) + 1)
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun /* which bit used for both socket and channel interleave */
392*4882a593Smuzhiyun static int skx_granularity[] = { 6, 8, 12, 30 };
393*4882a593Smuzhiyun
skx_do_interleave(u64 addr,int shift,int ways,u64 lowbits)394*4882a593Smuzhiyun static u64 skx_do_interleave(u64 addr, int shift, int ways, u64 lowbits)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun addr >>= shift;
397*4882a593Smuzhiyun addr /= ways;
398*4882a593Smuzhiyun addr <<= shift;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return addr | (lowbits & ((1ull << shift) - 1));
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun
skx_tad_decode(struct decoded_addr * res)403*4882a593Smuzhiyun static bool skx_tad_decode(struct decoded_addr *res)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun int i;
406*4882a593Smuzhiyun u32 base, wayness, chnilvoffset;
407*4882a593Smuzhiyun int skt_interleave_bit, chn_interleave_bit;
408*4882a593Smuzhiyun u64 channel_addr;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun for (i = 0; i < SKX_MAX_TAD; i++) {
411*4882a593Smuzhiyun SKX_GET_TADBASE(res->dev, res->imc, i, base);
412*4882a593Smuzhiyun SKX_GET_TADWAYNESS(res->dev, res->imc, i, wayness);
413*4882a593Smuzhiyun if (SKX_TAD_BASE(base) <= res->addr && res->addr <= SKX_TAD_LIMIT(wayness))
414*4882a593Smuzhiyun goto tad_found;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun edac_dbg(0, "No TAD entry for 0x%llx\n", res->addr);
417*4882a593Smuzhiyun return false;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun tad_found:
420*4882a593Smuzhiyun res->sktways = SKX_TAD_SKTWAYS(wayness);
421*4882a593Smuzhiyun res->chanways = SKX_TAD_CHNWAYS(wayness);
422*4882a593Smuzhiyun skt_interleave_bit = skx_granularity[SKX_TAD_SKT_GRAN(base)];
423*4882a593Smuzhiyun chn_interleave_bit = skx_granularity[SKX_TAD_CHN_GRAN(base)];
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun SKX_GET_TADCHNILVOFFSET(res->dev, res->imc, res->channel, i, chnilvoffset);
426*4882a593Smuzhiyun channel_addr = res->addr - SKX_TAD_OFFSET(chnilvoffset);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun if (res->chanways == 3 && skt_interleave_bit > chn_interleave_bit) {
429*4882a593Smuzhiyun /* Must handle channel first, then socket */
430*4882a593Smuzhiyun channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
431*4882a593Smuzhiyun res->chanways, channel_addr);
432*4882a593Smuzhiyun channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
433*4882a593Smuzhiyun res->sktways, channel_addr);
434*4882a593Smuzhiyun } else {
435*4882a593Smuzhiyun /* Handle socket then channel. Preserve low bits from original address */
436*4882a593Smuzhiyun channel_addr = skx_do_interleave(channel_addr, skt_interleave_bit,
437*4882a593Smuzhiyun res->sktways, res->addr);
438*4882a593Smuzhiyun channel_addr = skx_do_interleave(channel_addr, chn_interleave_bit,
439*4882a593Smuzhiyun res->chanways, res->addr);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun res->chan_addr = channel_addr;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun edac_dbg(2, "0x%llx: chan_addr=0x%llx sktways=%d chanways=%d\n",
445*4882a593Smuzhiyun res->addr, res->chan_addr, res->sktways, res->chanways);
446*4882a593Smuzhiyun return true;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun #define SKX_MAX_RIR 4
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun #define SKX_GET_RIRWAYNESS(d, mc, ch, i, reg) \
452*4882a593Smuzhiyun pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
453*4882a593Smuzhiyun 0x108 + 4 * (i), &(reg))
454*4882a593Smuzhiyun #define SKX_GET_RIRILV(d, mc, ch, idx, i, reg) \
455*4882a593Smuzhiyun pci_read_config_dword((d)->imc[mc].chan[ch].cdev, \
456*4882a593Smuzhiyun 0x120 + 16 * (idx) + 4 * (i), &(reg))
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun #define SKX_RIR_VALID(b) GET_BITFIELD((b), 31, 31)
459*4882a593Smuzhiyun #define SKX_RIR_LIMIT(b) (((u64)GET_BITFIELD((b), 1, 11) << 29) | MASK29)
460*4882a593Smuzhiyun #define SKX_RIR_WAYS(b) (1 << GET_BITFIELD((b), 28, 29))
461*4882a593Smuzhiyun #define SKX_RIR_CHAN_RANK(b) GET_BITFIELD((b), 16, 19)
462*4882a593Smuzhiyun #define SKX_RIR_OFFSET(b) ((u64)(GET_BITFIELD((b), 2, 15) << 26))
463*4882a593Smuzhiyun
skx_rir_decode(struct decoded_addr * res)464*4882a593Smuzhiyun static bool skx_rir_decode(struct decoded_addr *res)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun int i, idx, chan_rank;
467*4882a593Smuzhiyun int shift;
468*4882a593Smuzhiyun u32 rirway, rirlv;
469*4882a593Smuzhiyun u64 rank_addr, prev_limit = 0, limit;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if (res->dev->imc[res->imc].chan[res->channel].dimms[0].close_pg)
472*4882a593Smuzhiyun shift = 6;
473*4882a593Smuzhiyun else
474*4882a593Smuzhiyun shift = 13;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun for (i = 0; i < SKX_MAX_RIR; i++) {
477*4882a593Smuzhiyun SKX_GET_RIRWAYNESS(res->dev, res->imc, res->channel, i, rirway);
478*4882a593Smuzhiyun limit = SKX_RIR_LIMIT(rirway);
479*4882a593Smuzhiyun if (SKX_RIR_VALID(rirway)) {
480*4882a593Smuzhiyun if (prev_limit <= res->chan_addr &&
481*4882a593Smuzhiyun res->chan_addr <= limit)
482*4882a593Smuzhiyun goto rir_found;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun prev_limit = limit;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun edac_dbg(0, "No RIR entry for 0x%llx\n", res->addr);
487*4882a593Smuzhiyun return false;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun rir_found:
490*4882a593Smuzhiyun rank_addr = res->chan_addr >> shift;
491*4882a593Smuzhiyun rank_addr /= SKX_RIR_WAYS(rirway);
492*4882a593Smuzhiyun rank_addr <<= shift;
493*4882a593Smuzhiyun rank_addr |= res->chan_addr & GENMASK_ULL(shift - 1, 0);
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun res->rank_address = rank_addr;
496*4882a593Smuzhiyun idx = (res->chan_addr >> shift) % SKX_RIR_WAYS(rirway);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun SKX_GET_RIRILV(res->dev, res->imc, res->channel, idx, i, rirlv);
499*4882a593Smuzhiyun res->rank_address = rank_addr - SKX_RIR_OFFSET(rirlv);
500*4882a593Smuzhiyun chan_rank = SKX_RIR_CHAN_RANK(rirlv);
501*4882a593Smuzhiyun res->channel_rank = chan_rank;
502*4882a593Smuzhiyun res->dimm = chan_rank / 4;
503*4882a593Smuzhiyun res->rank = chan_rank % 4;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun edac_dbg(2, "0x%llx: dimm=%d rank=%d chan_rank=%d rank_addr=0x%llx\n",
506*4882a593Smuzhiyun res->addr, res->dimm, res->rank,
507*4882a593Smuzhiyun res->channel_rank, res->rank_address);
508*4882a593Smuzhiyun return true;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun static u8 skx_close_row[] = {
512*4882a593Smuzhiyun 15, 16, 17, 18, 20, 21, 22, 28, 10, 11, 12, 13, 29, 30, 31, 32, 33
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun static u8 skx_close_column[] = {
516*4882a593Smuzhiyun 3, 4, 5, 14, 19, 23, 24, 25, 26, 27
517*4882a593Smuzhiyun };
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun static u8 skx_open_row[] = {
520*4882a593Smuzhiyun 14, 15, 16, 20, 28, 21, 22, 23, 24, 25, 26, 27, 29, 30, 31, 32, 33
521*4882a593Smuzhiyun };
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun static u8 skx_open_column[] = {
524*4882a593Smuzhiyun 3, 4, 5, 6, 7, 8, 9, 10, 11, 12
525*4882a593Smuzhiyun };
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun static u8 skx_open_fine_column[] = {
528*4882a593Smuzhiyun 3, 4, 5, 7, 8, 9, 10, 11, 12, 13
529*4882a593Smuzhiyun };
530*4882a593Smuzhiyun
skx_bits(u64 addr,int nbits,u8 * bits)531*4882a593Smuzhiyun static int skx_bits(u64 addr, int nbits, u8 *bits)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun int i, res = 0;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun for (i = 0; i < nbits; i++)
536*4882a593Smuzhiyun res |= ((addr >> bits[i]) & 1) << i;
537*4882a593Smuzhiyun return res;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
skx_bank_bits(u64 addr,int b0,int b1,int do_xor,int x0,int x1)540*4882a593Smuzhiyun static int skx_bank_bits(u64 addr, int b0, int b1, int do_xor, int x0, int x1)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun int ret = GET_BITFIELD(addr, b0, b0) | (GET_BITFIELD(addr, b1, b1) << 1);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (do_xor)
545*4882a593Smuzhiyun ret ^= GET_BITFIELD(addr, x0, x0) | (GET_BITFIELD(addr, x1, x1) << 1);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun return ret;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
skx_mad_decode(struct decoded_addr * r)550*4882a593Smuzhiyun static bool skx_mad_decode(struct decoded_addr *r)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct skx_dimm *dimm = &r->dev->imc[r->imc].chan[r->channel].dimms[r->dimm];
553*4882a593Smuzhiyun int bg0 = dimm->fine_grain_bank ? 6 : 13;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun if (dimm->close_pg) {
556*4882a593Smuzhiyun r->row = skx_bits(r->rank_address, dimm->rowbits, skx_close_row);
557*4882a593Smuzhiyun r->column = skx_bits(r->rank_address, dimm->colbits, skx_close_column);
558*4882a593Smuzhiyun r->column |= 0x400; /* C10 is autoprecharge, always set */
559*4882a593Smuzhiyun r->bank_address = skx_bank_bits(r->rank_address, 8, 9, dimm->bank_xor_enable, 22, 28);
560*4882a593Smuzhiyun r->bank_group = skx_bank_bits(r->rank_address, 6, 7, dimm->bank_xor_enable, 20, 21);
561*4882a593Smuzhiyun } else {
562*4882a593Smuzhiyun r->row = skx_bits(r->rank_address, dimm->rowbits, skx_open_row);
563*4882a593Smuzhiyun if (dimm->fine_grain_bank)
564*4882a593Smuzhiyun r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_fine_column);
565*4882a593Smuzhiyun else
566*4882a593Smuzhiyun r->column = skx_bits(r->rank_address, dimm->colbits, skx_open_column);
567*4882a593Smuzhiyun r->bank_address = skx_bank_bits(r->rank_address, 18, 19, dimm->bank_xor_enable, 22, 23);
568*4882a593Smuzhiyun r->bank_group = skx_bank_bits(r->rank_address, bg0, 17, dimm->bank_xor_enable, 20, 21);
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun r->row &= (1u << dimm->rowbits) - 1;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun edac_dbg(2, "0x%llx: row=0x%x col=0x%x bank_addr=%d bank_group=%d\n",
573*4882a593Smuzhiyun r->addr, r->row, r->column, r->bank_address,
574*4882a593Smuzhiyun r->bank_group);
575*4882a593Smuzhiyun return true;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
skx_decode(struct decoded_addr * res)578*4882a593Smuzhiyun static bool skx_decode(struct decoded_addr *res)
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun return skx_sad_decode(res) && skx_tad_decode(res) &&
581*4882a593Smuzhiyun skx_rir_decode(res) && skx_mad_decode(res);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun static struct notifier_block skx_mce_dec = {
585*4882a593Smuzhiyun .notifier_call = skx_mce_check_error,
586*4882a593Smuzhiyun .priority = MCE_PRIO_EDAC,
587*4882a593Smuzhiyun };
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun #ifdef CONFIG_EDAC_DEBUG
590*4882a593Smuzhiyun /*
591*4882a593Smuzhiyun * Debug feature.
592*4882a593Smuzhiyun * Exercise the address decode logic by writing an address to
593*4882a593Smuzhiyun * /sys/kernel/debug/edac/skx_test/addr.
594*4882a593Smuzhiyun */
595*4882a593Smuzhiyun static struct dentry *skx_test;
596*4882a593Smuzhiyun
debugfs_u64_set(void * data,u64 val)597*4882a593Smuzhiyun static int debugfs_u64_set(void *data, u64 val)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun struct mce m;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun memset(&m, 0, sizeof(m));
604*4882a593Smuzhiyun /* ADDRV + MemRd + Unknown channel */
605*4882a593Smuzhiyun m.status = MCI_STATUS_ADDRV + 0x90;
606*4882a593Smuzhiyun /* One corrected error */
607*4882a593Smuzhiyun m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
608*4882a593Smuzhiyun m.addr = val;
609*4882a593Smuzhiyun skx_mce_check_error(NULL, 0, &m);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun return 0;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
614*4882a593Smuzhiyun
setup_skx_debug(void)615*4882a593Smuzhiyun static void setup_skx_debug(void)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun skx_test = edac_debugfs_create_dir("skx_test");
618*4882a593Smuzhiyun if (!skx_test)
619*4882a593Smuzhiyun return;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun if (!edac_debugfs_create_file("addr", 0200, skx_test,
622*4882a593Smuzhiyun NULL, &fops_u64_wo)) {
623*4882a593Smuzhiyun debugfs_remove(skx_test);
624*4882a593Smuzhiyun skx_test = NULL;
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
teardown_skx_debug(void)628*4882a593Smuzhiyun static void teardown_skx_debug(void)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun debugfs_remove_recursive(skx_test);
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun #else
setup_skx_debug(void)633*4882a593Smuzhiyun static inline void setup_skx_debug(void) {}
teardown_skx_debug(void)634*4882a593Smuzhiyun static inline void teardown_skx_debug(void) {}
635*4882a593Smuzhiyun #endif /*CONFIG_EDAC_DEBUG*/
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /*
638*4882a593Smuzhiyun * skx_init:
639*4882a593Smuzhiyun * make sure we are running on the correct cpu model
640*4882a593Smuzhiyun * search for all the devices we need
641*4882a593Smuzhiyun * check which DIMMs are present.
642*4882a593Smuzhiyun */
skx_init(void)643*4882a593Smuzhiyun static int __init skx_init(void)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun const struct x86_cpu_id *id;
646*4882a593Smuzhiyun struct res_config *cfg;
647*4882a593Smuzhiyun const struct munit *m;
648*4882a593Smuzhiyun const char *owner;
649*4882a593Smuzhiyun int rc = 0, i, off[3] = {0xd0, 0xd4, 0xd8};
650*4882a593Smuzhiyun u8 mc = 0, src_id, node_id;
651*4882a593Smuzhiyun struct skx_dev *d;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun edac_dbg(2, "\n");
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun owner = edac_get_owner();
656*4882a593Smuzhiyun if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
657*4882a593Smuzhiyun return -EBUSY;
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
660*4882a593Smuzhiyun return -ENODEV;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun id = x86_match_cpu(skx_cpuids);
663*4882a593Smuzhiyun if (!id)
664*4882a593Smuzhiyun return -ENODEV;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun cfg = (struct res_config *)id->driver_data;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun rc = skx_get_hi_lo(0x2034, off, &skx_tolm, &skx_tohm);
669*4882a593Smuzhiyun if (rc)
670*4882a593Smuzhiyun return rc;
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun rc = skx_get_all_bus_mappings(cfg, &skx_edac_list);
673*4882a593Smuzhiyun if (rc < 0)
674*4882a593Smuzhiyun goto fail;
675*4882a593Smuzhiyun if (rc == 0) {
676*4882a593Smuzhiyun edac_dbg(2, "No memory controllers found\n");
677*4882a593Smuzhiyun return -ENODEV;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun skx_num_sockets = rc;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun for (m = skx_all_munits; m->did; m++) {
682*4882a593Smuzhiyun rc = get_all_munits(m);
683*4882a593Smuzhiyun if (rc < 0)
684*4882a593Smuzhiyun goto fail;
685*4882a593Smuzhiyun if (rc != m->per_socket * skx_num_sockets) {
686*4882a593Smuzhiyun edac_dbg(2, "Expected %d, got %d of 0x%x\n",
687*4882a593Smuzhiyun m->per_socket * skx_num_sockets, rc, m->did);
688*4882a593Smuzhiyun rc = -ENODEV;
689*4882a593Smuzhiyun goto fail;
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun list_for_each_entry(d, skx_edac_list, list) {
694*4882a593Smuzhiyun rc = skx_get_src_id(d, 0xf0, &src_id);
695*4882a593Smuzhiyun if (rc < 0)
696*4882a593Smuzhiyun goto fail;
697*4882a593Smuzhiyun rc = skx_get_node_id(d, &node_id);
698*4882a593Smuzhiyun if (rc < 0)
699*4882a593Smuzhiyun goto fail;
700*4882a593Smuzhiyun edac_dbg(2, "src_id=%d node_id=%d\n", src_id, node_id);
701*4882a593Smuzhiyun for (i = 0; i < SKX_NUM_IMC; i++) {
702*4882a593Smuzhiyun d->imc[i].mc = mc++;
703*4882a593Smuzhiyun d->imc[i].lmc = i;
704*4882a593Smuzhiyun d->imc[i].src_id = src_id;
705*4882a593Smuzhiyun d->imc[i].node_id = node_id;
706*4882a593Smuzhiyun rc = skx_register_mci(&d->imc[i], d->imc[i].chan[0].cdev,
707*4882a593Smuzhiyun "Skylake Socket", EDAC_MOD_STR,
708*4882a593Smuzhiyun skx_get_dimm_config);
709*4882a593Smuzhiyun if (rc < 0)
710*4882a593Smuzhiyun goto fail;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun skx_set_decode(skx_decode, skx_show_retry_rd_err_log);
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (nvdimm_count && skx_adxl_get() == -ENODEV)
717*4882a593Smuzhiyun skx_printk(KERN_NOTICE, "Only decoding DDR4 address!\n");
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* Ensure that the OPSTATE is set correctly for POLL or NMI */
720*4882a593Smuzhiyun opstate_init();
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun setup_skx_debug();
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun mce_register_decode_chain(&skx_mce_dec);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return 0;
727*4882a593Smuzhiyun fail:
728*4882a593Smuzhiyun skx_remove();
729*4882a593Smuzhiyun return rc;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
skx_exit(void)732*4882a593Smuzhiyun static void __exit skx_exit(void)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun edac_dbg(2, "\n");
735*4882a593Smuzhiyun mce_unregister_decode_chain(&skx_mce_dec);
736*4882a593Smuzhiyun teardown_skx_debug();
737*4882a593Smuzhiyun if (nvdimm_count)
738*4882a593Smuzhiyun skx_adxl_put();
739*4882a593Smuzhiyun skx_remove();
740*4882a593Smuzhiyun }
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun module_init(skx_init);
743*4882a593Smuzhiyun module_exit(skx_exit);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun module_param(edac_op_state, int, 0444);
746*4882a593Smuzhiyun MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
749*4882a593Smuzhiyun MODULE_AUTHOR("Tony Luck");
750*4882a593Smuzhiyun MODULE_DESCRIPTION("MC Driver for Intel Skylake server processors");
751