xref: /OK3568_Linux_fs/kernel/drivers/edac/i10nm_base.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for Intel(R) 10nm server memory controller.
4*4882a593Smuzhiyun  * Copyright (c) 2019, Intel Corporation.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/kernel.h>
9*4882a593Smuzhiyun #include <linux/io.h>
10*4882a593Smuzhiyun #include <asm/cpu_device_id.h>
11*4882a593Smuzhiyun #include <asm/intel-family.h>
12*4882a593Smuzhiyun #include <asm/mce.h>
13*4882a593Smuzhiyun #include "edac_module.h"
14*4882a593Smuzhiyun #include "skx_common.h"
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define I10NM_REVISION	"v0.0.3"
17*4882a593Smuzhiyun #define EDAC_MOD_STR	"i10nm_edac"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* Debug macros */
20*4882a593Smuzhiyun #define i10nm_printk(level, fmt, arg...)	\
21*4882a593Smuzhiyun 	edac_printk(level, "i10nm", fmt, ##arg)
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define I10NM_GET_SCK_BAR(d, reg)	\
24*4882a593Smuzhiyun 	pci_read_config_dword((d)->uracu, 0xd0, &(reg))
25*4882a593Smuzhiyun #define I10NM_GET_IMC_BAR(d, i, reg)	\
26*4882a593Smuzhiyun 	pci_read_config_dword((d)->uracu, 0xd8 + (i) * 4, &(reg))
27*4882a593Smuzhiyun #define I10NM_GET_DIMMMTR(m, i, j)	\
28*4882a593Smuzhiyun 	readl((m)->mbase + 0x2080c + (i) * 0x4000 + (j) * 4)
29*4882a593Smuzhiyun #define I10NM_GET_MCDDRTCFG(m, i)	\
30*4882a593Smuzhiyun 	readl((m)->mbase + 0x20970 + (i) * 0x4000)
31*4882a593Smuzhiyun #define I10NM_GET_MCMTR(m, i)		\
32*4882a593Smuzhiyun 	readl((m)->mbase + 0x20ef8 + (i) * 0x4000)
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #define I10NM_GET_SCK_MMIO_BASE(reg)	(GET_BITFIELD(reg, 0, 28) << 23)
35*4882a593Smuzhiyun #define I10NM_GET_IMC_MMIO_OFFSET(reg)	(GET_BITFIELD(reg, 0, 10) << 12)
36*4882a593Smuzhiyun #define I10NM_GET_IMC_MMIO_SIZE(reg)	((GET_BITFIELD(reg, 13, 23) - \
37*4882a593Smuzhiyun 					 GET_BITFIELD(reg, 0, 10) + 1) << 12)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static struct list_head *i10nm_edac_list;
40*4882a593Smuzhiyun 
pci_get_dev_wrapper(int dom,unsigned int bus,unsigned int dev,unsigned int fun)41*4882a593Smuzhiyun static struct pci_dev *pci_get_dev_wrapper(int dom, unsigned int bus,
42*4882a593Smuzhiyun 					   unsigned int dev, unsigned int fun)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct pci_dev *pdev;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	pdev = pci_get_domain_bus_and_slot(dom, bus, PCI_DEVFN(dev, fun));
47*4882a593Smuzhiyun 	if (!pdev) {
48*4882a593Smuzhiyun 		edac_dbg(2, "No device %02x:%02x.%x\n",
49*4882a593Smuzhiyun 			 bus, dev, fun);
50*4882a593Smuzhiyun 		return NULL;
51*4882a593Smuzhiyun 	}
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (unlikely(pci_enable_device(pdev) < 0)) {
54*4882a593Smuzhiyun 		edac_dbg(2, "Failed to enable device %02x:%02x.%x\n",
55*4882a593Smuzhiyun 			 bus, dev, fun);
56*4882a593Smuzhiyun 		return NULL;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	pci_dev_get(pdev);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	return pdev;
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun 
i10nm_get_all_munits(void)64*4882a593Smuzhiyun static int i10nm_get_all_munits(void)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	struct pci_dev *mdev;
67*4882a593Smuzhiyun 	void __iomem *mbase;
68*4882a593Smuzhiyun 	unsigned long size;
69*4882a593Smuzhiyun 	struct skx_dev *d;
70*4882a593Smuzhiyun 	int i, j = 0;
71*4882a593Smuzhiyun 	u32 reg, off;
72*4882a593Smuzhiyun 	u64 base;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	list_for_each_entry(d, i10nm_edac_list, list) {
75*4882a593Smuzhiyun 		d->util_all = pci_get_dev_wrapper(d->seg, d->bus[1], 29, 1);
76*4882a593Smuzhiyun 		if (!d->util_all)
77*4882a593Smuzhiyun 			return -ENODEV;
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 		d->uracu = pci_get_dev_wrapper(d->seg, d->bus[0], 0, 1);
80*4882a593Smuzhiyun 		if (!d->uracu)
81*4882a593Smuzhiyun 			return -ENODEV;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 		if (I10NM_GET_SCK_BAR(d, reg)) {
84*4882a593Smuzhiyun 			i10nm_printk(KERN_ERR, "Failed to socket bar\n");
85*4882a593Smuzhiyun 			return -ENODEV;
86*4882a593Smuzhiyun 		}
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 		base = I10NM_GET_SCK_MMIO_BASE(reg);
89*4882a593Smuzhiyun 		edac_dbg(2, "socket%d mmio base 0x%llx (reg 0x%x)\n",
90*4882a593Smuzhiyun 			 j++, base, reg);
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		for (i = 0; i < I10NM_NUM_IMC; i++) {
93*4882a593Smuzhiyun 			mdev = pci_get_dev_wrapper(d->seg, d->bus[0],
94*4882a593Smuzhiyun 						   12 + i, 0);
95*4882a593Smuzhiyun 			if (i == 0 && !mdev) {
96*4882a593Smuzhiyun 				i10nm_printk(KERN_ERR, "No IMC found\n");
97*4882a593Smuzhiyun 				return -ENODEV;
98*4882a593Smuzhiyun 			}
99*4882a593Smuzhiyun 			if (!mdev)
100*4882a593Smuzhiyun 				continue;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 			d->imc[i].mdev = mdev;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 			if (I10NM_GET_IMC_BAR(d, i, reg)) {
105*4882a593Smuzhiyun 				i10nm_printk(KERN_ERR, "Failed to get mc bar\n");
106*4882a593Smuzhiyun 				return -ENODEV;
107*4882a593Smuzhiyun 			}
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 			off  = I10NM_GET_IMC_MMIO_OFFSET(reg);
110*4882a593Smuzhiyun 			size = I10NM_GET_IMC_MMIO_SIZE(reg);
111*4882a593Smuzhiyun 			edac_dbg(2, "mc%d mmio base 0x%llx size 0x%lx (reg 0x%x)\n",
112*4882a593Smuzhiyun 				 i, base + off, size, reg);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 			mbase = ioremap(base + off, size);
115*4882a593Smuzhiyun 			if (!mbase) {
116*4882a593Smuzhiyun 				i10nm_printk(KERN_ERR, "Failed to ioremap 0x%llx\n",
117*4882a593Smuzhiyun 					     base + off);
118*4882a593Smuzhiyun 				return -ENODEV;
119*4882a593Smuzhiyun 			}
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 			d->imc[i].mbase = mbase;
122*4882a593Smuzhiyun 		}
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun static struct res_config i10nm_cfg0 = {
129*4882a593Smuzhiyun 	.type			= I10NM,
130*4882a593Smuzhiyun 	.decs_did		= 0x3452,
131*4882a593Smuzhiyun 	.busno_cfg_offset	= 0xcc,
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun static struct res_config i10nm_cfg1 = {
135*4882a593Smuzhiyun 	.type			= I10NM,
136*4882a593Smuzhiyun 	.decs_did		= 0x3452,
137*4882a593Smuzhiyun 	.busno_cfg_offset	= 0xd0,
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun static const struct x86_cpu_id i10nm_cpuids[] = {
141*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
142*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D,	X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
143*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X,		X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
144*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X,		X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
145*4882a593Smuzhiyun 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D,		X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
146*4882a593Smuzhiyun 	{}
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
149*4882a593Smuzhiyun 
i10nm_check_ecc(struct skx_imc * imc,int chan)150*4882a593Smuzhiyun static bool i10nm_check_ecc(struct skx_imc *imc, int chan)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun 	u32 mcmtr;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	mcmtr = I10NM_GET_MCMTR(imc, chan);
155*4882a593Smuzhiyun 	edac_dbg(1, "ch%d mcmtr reg %x\n", chan, mcmtr);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	return !!GET_BITFIELD(mcmtr, 2, 2);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
i10nm_get_dimm_config(struct mem_ctl_info * mci)160*4882a593Smuzhiyun static int i10nm_get_dimm_config(struct mem_ctl_info *mci)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	struct skx_pvt *pvt = mci->pvt_info;
163*4882a593Smuzhiyun 	struct skx_imc *imc = pvt->imc;
164*4882a593Smuzhiyun 	struct dimm_info *dimm;
165*4882a593Smuzhiyun 	u32 mtr, mcddrtcfg;
166*4882a593Smuzhiyun 	int i, j, ndimms;
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	for (i = 0; i < I10NM_NUM_CHANNELS; i++) {
169*4882a593Smuzhiyun 		if (!imc->mbase)
170*4882a593Smuzhiyun 			continue;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 		ndimms = 0;
173*4882a593Smuzhiyun 		mcddrtcfg = I10NM_GET_MCDDRTCFG(imc, i);
174*4882a593Smuzhiyun 		for (j = 0; j < I10NM_NUM_DIMMS; j++) {
175*4882a593Smuzhiyun 			dimm = edac_get_dimm(mci, i, j, 0);
176*4882a593Smuzhiyun 			mtr = I10NM_GET_DIMMMTR(imc, i, j);
177*4882a593Smuzhiyun 			edac_dbg(1, "dimmmtr 0x%x mcddrtcfg 0x%x (mc%d ch%d dimm%d)\n",
178*4882a593Smuzhiyun 				 mtr, mcddrtcfg, imc->mc, i, j);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 			if (IS_DIMM_PRESENT(mtr))
181*4882a593Smuzhiyun 				ndimms += skx_get_dimm_info(mtr, 0, 0, dimm,
182*4882a593Smuzhiyun 							    imc, i, j);
183*4882a593Smuzhiyun 			else if (IS_NVDIMM_PRESENT(mcddrtcfg, j))
184*4882a593Smuzhiyun 				ndimms += skx_get_nvdimm_info(dimm, imc, i, j,
185*4882a593Smuzhiyun 							      EDAC_MOD_STR);
186*4882a593Smuzhiyun 		}
187*4882a593Smuzhiyun 		if (ndimms && !i10nm_check_ecc(imc, i)) {
188*4882a593Smuzhiyun 			i10nm_printk(KERN_ERR, "ECC is disabled on imc %d channel %d\n",
189*4882a593Smuzhiyun 				     imc->mc, i);
190*4882a593Smuzhiyun 			return -ENODEV;
191*4882a593Smuzhiyun 		}
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	return 0;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun static struct notifier_block i10nm_mce_dec = {
198*4882a593Smuzhiyun 	.notifier_call	= skx_mce_check_error,
199*4882a593Smuzhiyun 	.priority	= MCE_PRIO_EDAC,
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun #ifdef CONFIG_EDAC_DEBUG
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun  * Debug feature.
205*4882a593Smuzhiyun  * Exercise the address decode logic by writing an address to
206*4882a593Smuzhiyun  * /sys/kernel/debug/edac/i10nm_test/addr.
207*4882a593Smuzhiyun  */
208*4882a593Smuzhiyun static struct dentry *i10nm_test;
209*4882a593Smuzhiyun 
debugfs_u64_set(void * data,u64 val)210*4882a593Smuzhiyun static int debugfs_u64_set(void *data, u64 val)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct mce m;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	pr_warn_once("Fake error to 0x%llx injected via debugfs\n", val);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	memset(&m, 0, sizeof(m));
217*4882a593Smuzhiyun 	/* ADDRV + MemRd + Unknown channel */
218*4882a593Smuzhiyun 	m.status = MCI_STATUS_ADDRV + 0x90;
219*4882a593Smuzhiyun 	/* One corrected error */
220*4882a593Smuzhiyun 	m.status |= BIT_ULL(MCI_STATUS_CEC_SHIFT);
221*4882a593Smuzhiyun 	m.addr = val;
222*4882a593Smuzhiyun 	skx_mce_check_error(NULL, 0, &m);
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	return 0;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun DEFINE_SIMPLE_ATTRIBUTE(fops_u64_wo, NULL, debugfs_u64_set, "%llu\n");
227*4882a593Smuzhiyun 
setup_i10nm_debug(void)228*4882a593Smuzhiyun static void setup_i10nm_debug(void)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun 	i10nm_test = edac_debugfs_create_dir("i10nm_test");
231*4882a593Smuzhiyun 	if (!i10nm_test)
232*4882a593Smuzhiyun 		return;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (!edac_debugfs_create_file("addr", 0200, i10nm_test,
235*4882a593Smuzhiyun 				      NULL, &fops_u64_wo)) {
236*4882a593Smuzhiyun 		debugfs_remove(i10nm_test);
237*4882a593Smuzhiyun 		i10nm_test = NULL;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
teardown_i10nm_debug(void)241*4882a593Smuzhiyun static void teardown_i10nm_debug(void)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	debugfs_remove_recursive(i10nm_test);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun #else
setup_i10nm_debug(void)246*4882a593Smuzhiyun static inline void setup_i10nm_debug(void) {}
teardown_i10nm_debug(void)247*4882a593Smuzhiyun static inline void teardown_i10nm_debug(void) {}
248*4882a593Smuzhiyun #endif /*CONFIG_EDAC_DEBUG*/
249*4882a593Smuzhiyun 
i10nm_init(void)250*4882a593Smuzhiyun static int __init i10nm_init(void)
251*4882a593Smuzhiyun {
252*4882a593Smuzhiyun 	u8 mc = 0, src_id = 0, node_id = 0;
253*4882a593Smuzhiyun 	const struct x86_cpu_id *id;
254*4882a593Smuzhiyun 	struct res_config *cfg;
255*4882a593Smuzhiyun 	const char *owner;
256*4882a593Smuzhiyun 	struct skx_dev *d;
257*4882a593Smuzhiyun 	int rc, i, off[3] = {0xd0, 0xc8, 0xcc};
258*4882a593Smuzhiyun 	u64 tolm, tohm;
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	edac_dbg(2, "\n");
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	owner = edac_get_owner();
263*4882a593Smuzhiyun 	if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR)))
264*4882a593Smuzhiyun 		return -EBUSY;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR))
267*4882a593Smuzhiyun 		return -ENODEV;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	id = x86_match_cpu(i10nm_cpuids);
270*4882a593Smuzhiyun 	if (!id)
271*4882a593Smuzhiyun 		return -ENODEV;
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	cfg = (struct res_config *)id->driver_data;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	rc = skx_get_hi_lo(0x09a2, off, &tolm, &tohm);
276*4882a593Smuzhiyun 	if (rc)
277*4882a593Smuzhiyun 		return rc;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	rc = skx_get_all_bus_mappings(cfg, &i10nm_edac_list);
280*4882a593Smuzhiyun 	if (rc < 0)
281*4882a593Smuzhiyun 		goto fail;
282*4882a593Smuzhiyun 	if (rc == 0) {
283*4882a593Smuzhiyun 		i10nm_printk(KERN_ERR, "No memory controllers found\n");
284*4882a593Smuzhiyun 		return -ENODEV;
285*4882a593Smuzhiyun 	}
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	rc = i10nm_get_all_munits();
288*4882a593Smuzhiyun 	if (rc < 0)
289*4882a593Smuzhiyun 		goto fail;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	list_for_each_entry(d, i10nm_edac_list, list) {
292*4882a593Smuzhiyun 		rc = skx_get_src_id(d, 0xf8, &src_id);
293*4882a593Smuzhiyun 		if (rc < 0)
294*4882a593Smuzhiyun 			goto fail;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		rc = skx_get_node_id(d, &node_id);
297*4882a593Smuzhiyun 		if (rc < 0)
298*4882a593Smuzhiyun 			goto fail;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		edac_dbg(2, "src_id = %d node_id = %d\n", src_id, node_id);
301*4882a593Smuzhiyun 		for (i = 0; i < I10NM_NUM_IMC; i++) {
302*4882a593Smuzhiyun 			if (!d->imc[i].mdev)
303*4882a593Smuzhiyun 				continue;
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 			d->imc[i].mc  = mc++;
306*4882a593Smuzhiyun 			d->imc[i].lmc = i;
307*4882a593Smuzhiyun 			d->imc[i].src_id  = src_id;
308*4882a593Smuzhiyun 			d->imc[i].node_id = node_id;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 			rc = skx_register_mci(&d->imc[i], d->imc[i].mdev,
311*4882a593Smuzhiyun 					      "Intel_10nm Socket", EDAC_MOD_STR,
312*4882a593Smuzhiyun 					      i10nm_get_dimm_config);
313*4882a593Smuzhiyun 			if (rc < 0)
314*4882a593Smuzhiyun 				goto fail;
315*4882a593Smuzhiyun 		}
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	rc = skx_adxl_get();
319*4882a593Smuzhiyun 	if (rc)
320*4882a593Smuzhiyun 		goto fail;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	opstate_init();
323*4882a593Smuzhiyun 	mce_register_decode_chain(&i10nm_mce_dec);
324*4882a593Smuzhiyun 	setup_i10nm_debug();
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	i10nm_printk(KERN_INFO, "%s\n", I10NM_REVISION);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 0;
329*4882a593Smuzhiyun fail:
330*4882a593Smuzhiyun 	skx_remove();
331*4882a593Smuzhiyun 	return rc;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
i10nm_exit(void)334*4882a593Smuzhiyun static void __exit i10nm_exit(void)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	edac_dbg(2, "\n");
337*4882a593Smuzhiyun 	teardown_i10nm_debug();
338*4882a593Smuzhiyun 	mce_unregister_decode_chain(&i10nm_mce_dec);
339*4882a593Smuzhiyun 	skx_adxl_put();
340*4882a593Smuzhiyun 	skx_remove();
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun module_init(i10nm_init);
344*4882a593Smuzhiyun module_exit(i10nm_exit);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
347*4882a593Smuzhiyun MODULE_DESCRIPTION("MC Driver for Intel 10nm server processors");
348