1*4882a593Smuzhiyun /******************************************************************************
2*4882a593Smuzhiyun * mcelog.c
3*4882a593Smuzhiyun * Driver for receiving and transferring machine check error infomation
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2012 Intel Corporation
6*4882a593Smuzhiyun * Author: Liu, Jinsong <jinsong.liu@intel.com>
7*4882a593Smuzhiyun * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
8*4882a593Smuzhiyun * Author: Ke, Liping <liping.ke@intel.com>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
11*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
12*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
13*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
14*4882a593Smuzhiyun * software packages, subject to the following license:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
17*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
18*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
19*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
20*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
21*4882a593Smuzhiyun * the following conditions:
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
24*4882a593Smuzhiyun * all copies or substantial portions of the Software.
25*4882a593Smuzhiyun *
26*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
27*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
28*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
29*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
30*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
31*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32*4882a593Smuzhiyun * IN THE SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #define pr_fmt(fmt) "xen_mcelog: " fmt
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #include <linux/init.h>
38*4882a593Smuzhiyun #include <linux/types.h>
39*4882a593Smuzhiyun #include <linux/kernel.h>
40*4882a593Smuzhiyun #include <linux/slab.h>
41*4882a593Smuzhiyun #include <linux/fs.h>
42*4882a593Smuzhiyun #include <linux/device.h>
43*4882a593Smuzhiyun #include <linux/miscdevice.h>
44*4882a593Smuzhiyun #include <linux/uaccess.h>
45*4882a593Smuzhiyun #include <linux/capability.h>
46*4882a593Smuzhiyun #include <linux/poll.h>
47*4882a593Smuzhiyun #include <linux/sched.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include <xen/interface/xen.h>
50*4882a593Smuzhiyun #include <xen/events.h>
51*4882a593Smuzhiyun #include <xen/interface/vcpu.h>
52*4882a593Smuzhiyun #include <xen/xen.h>
53*4882a593Smuzhiyun #include <asm/xen/hypercall.h>
54*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static struct mc_info g_mi;
57*4882a593Smuzhiyun static struct mcinfo_logical_cpu *g_physinfo;
58*4882a593Smuzhiyun static uint32_t ncpus;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static DEFINE_MUTEX(mcelog_lock);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static struct xen_mce_log xen_mcelog = {
63*4882a593Smuzhiyun .signature = XEN_MCE_LOG_SIGNATURE,
64*4882a593Smuzhiyun .len = XEN_MCE_LOG_LEN,
65*4882a593Smuzhiyun .recordlen = sizeof(struct xen_mce),
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun static DEFINE_SPINLOCK(xen_mce_chrdev_state_lock);
69*4882a593Smuzhiyun static int xen_mce_chrdev_open_count; /* #times opened */
70*4882a593Smuzhiyun static int xen_mce_chrdev_open_exclu; /* already open exclusive? */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun static DECLARE_WAIT_QUEUE_HEAD(xen_mce_chrdev_wait);
73*4882a593Smuzhiyun
xen_mce_chrdev_open(struct inode * inode,struct file * file)74*4882a593Smuzhiyun static int xen_mce_chrdev_open(struct inode *inode, struct file *file)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun spin_lock(&xen_mce_chrdev_state_lock);
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun if (xen_mce_chrdev_open_exclu ||
79*4882a593Smuzhiyun (xen_mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
80*4882a593Smuzhiyun spin_unlock(&xen_mce_chrdev_state_lock);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return -EBUSY;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun if (file->f_flags & O_EXCL)
86*4882a593Smuzhiyun xen_mce_chrdev_open_exclu = 1;
87*4882a593Smuzhiyun xen_mce_chrdev_open_count++;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun spin_unlock(&xen_mce_chrdev_state_lock);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return nonseekable_open(inode, file);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
xen_mce_chrdev_release(struct inode * inode,struct file * file)94*4882a593Smuzhiyun static int xen_mce_chrdev_release(struct inode *inode, struct file *file)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun spin_lock(&xen_mce_chrdev_state_lock);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun xen_mce_chrdev_open_count--;
99*4882a593Smuzhiyun xen_mce_chrdev_open_exclu = 0;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun spin_unlock(&xen_mce_chrdev_state_lock);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return 0;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
xen_mce_chrdev_read(struct file * filp,char __user * ubuf,size_t usize,loff_t * off)106*4882a593Smuzhiyun static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
107*4882a593Smuzhiyun size_t usize, loff_t *off)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun char __user *buf = ubuf;
110*4882a593Smuzhiyun unsigned num;
111*4882a593Smuzhiyun int i, err;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun mutex_lock(&mcelog_lock);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun num = xen_mcelog.next;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* Only supports full reads right now */
118*4882a593Smuzhiyun err = -EINVAL;
119*4882a593Smuzhiyun if (*off != 0 || usize < XEN_MCE_LOG_LEN*sizeof(struct xen_mce))
120*4882a593Smuzhiyun goto out;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun err = 0;
123*4882a593Smuzhiyun for (i = 0; i < num; i++) {
124*4882a593Smuzhiyun struct xen_mce *m = &xen_mcelog.entry[i];
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun err |= copy_to_user(buf, m, sizeof(*m));
127*4882a593Smuzhiyun buf += sizeof(*m);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun memset(xen_mcelog.entry, 0, num * sizeof(struct xen_mce));
131*4882a593Smuzhiyun xen_mcelog.next = 0;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun if (err)
134*4882a593Smuzhiyun err = -EFAULT;
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun out:
137*4882a593Smuzhiyun mutex_unlock(&mcelog_lock);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun return err ? err : buf - ubuf;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun
xen_mce_chrdev_poll(struct file * file,poll_table * wait)142*4882a593Smuzhiyun static __poll_t xen_mce_chrdev_poll(struct file *file, poll_table *wait)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun poll_wait(file, &xen_mce_chrdev_wait, wait);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (xen_mcelog.next)
147*4882a593Smuzhiyun return EPOLLIN | EPOLLRDNORM;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return 0;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
xen_mce_chrdev_ioctl(struct file * f,unsigned int cmd,unsigned long arg)152*4882a593Smuzhiyun static long xen_mce_chrdev_ioctl(struct file *f, unsigned int cmd,
153*4882a593Smuzhiyun unsigned long arg)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun int __user *p = (int __user *)arg;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
158*4882a593Smuzhiyun return -EPERM;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun switch (cmd) {
161*4882a593Smuzhiyun case MCE_GET_RECORD_LEN:
162*4882a593Smuzhiyun return put_user(sizeof(struct xen_mce), p);
163*4882a593Smuzhiyun case MCE_GET_LOG_LEN:
164*4882a593Smuzhiyun return put_user(XEN_MCE_LOG_LEN, p);
165*4882a593Smuzhiyun case MCE_GETCLEAR_FLAGS: {
166*4882a593Smuzhiyun unsigned flags;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun do {
169*4882a593Smuzhiyun flags = xen_mcelog.flags;
170*4882a593Smuzhiyun } while (cmpxchg(&xen_mcelog.flags, flags, 0) != flags);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun return put_user(flags, p);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun default:
175*4882a593Smuzhiyun return -ENOTTY;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun static const struct file_operations xen_mce_chrdev_ops = {
180*4882a593Smuzhiyun .open = xen_mce_chrdev_open,
181*4882a593Smuzhiyun .release = xen_mce_chrdev_release,
182*4882a593Smuzhiyun .read = xen_mce_chrdev_read,
183*4882a593Smuzhiyun .poll = xen_mce_chrdev_poll,
184*4882a593Smuzhiyun .unlocked_ioctl = xen_mce_chrdev_ioctl,
185*4882a593Smuzhiyun .llseek = no_llseek,
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun static struct miscdevice xen_mce_chrdev_device = {
189*4882a593Smuzhiyun MISC_MCELOG_MINOR,
190*4882a593Smuzhiyun "mcelog",
191*4882a593Smuzhiyun &xen_mce_chrdev_ops,
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Caller should hold the mcelog_lock
196*4882a593Smuzhiyun */
xen_mce_log(struct xen_mce * mce)197*4882a593Smuzhiyun static void xen_mce_log(struct xen_mce *mce)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun unsigned entry;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun entry = xen_mcelog.next;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /*
204*4882a593Smuzhiyun * When the buffer fills up discard new entries.
205*4882a593Smuzhiyun * Assume that the earlier errors are the more
206*4882a593Smuzhiyun * interesting ones:
207*4882a593Smuzhiyun */
208*4882a593Smuzhiyun if (entry >= XEN_MCE_LOG_LEN) {
209*4882a593Smuzhiyun set_bit(XEN_MCE_OVERFLOW,
210*4882a593Smuzhiyun (unsigned long *)&xen_mcelog.flags);
211*4882a593Smuzhiyun return;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun memcpy(xen_mcelog.entry + entry, mce, sizeof(struct xen_mce));
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun xen_mcelog.next++;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
convert_log(struct mc_info * mi)219*4882a593Smuzhiyun static int convert_log(struct mc_info *mi)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun struct mcinfo_common *mic;
222*4882a593Smuzhiyun struct mcinfo_global *mc_global;
223*4882a593Smuzhiyun struct mcinfo_bank *mc_bank;
224*4882a593Smuzhiyun struct xen_mce m;
225*4882a593Smuzhiyun unsigned int i, j;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun mic = NULL;
228*4882a593Smuzhiyun x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
229*4882a593Smuzhiyun if (unlikely(!mic)) {
230*4882a593Smuzhiyun pr_warn("Failed to find global error info\n");
231*4882a593Smuzhiyun return -ENODEV;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun memset(&m, 0, sizeof(struct xen_mce));
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun mc_global = (struct mcinfo_global *)mic;
237*4882a593Smuzhiyun m.mcgstatus = mc_global->mc_gstatus;
238*4882a593Smuzhiyun m.apicid = mc_global->mc_apicid;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun for (i = 0; i < ncpus; i++)
241*4882a593Smuzhiyun if (g_physinfo[i].mc_apicid == m.apicid)
242*4882a593Smuzhiyun break;
243*4882a593Smuzhiyun if (unlikely(i == ncpus)) {
244*4882a593Smuzhiyun pr_warn("Failed to match cpu with apicid %d\n", m.apicid);
245*4882a593Smuzhiyun return -ENODEV;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun m.socketid = g_physinfo[i].mc_chipid;
249*4882a593Smuzhiyun m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
250*4882a593Smuzhiyun m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
251*4882a593Smuzhiyun for (j = 0; j < g_physinfo[i].mc_nmsrvals; ++j)
252*4882a593Smuzhiyun switch (g_physinfo[i].mc_msrvalues[j].reg) {
253*4882a593Smuzhiyun case MSR_IA32_MCG_CAP:
254*4882a593Smuzhiyun m.mcgcap = g_physinfo[i].mc_msrvalues[j].value;
255*4882a593Smuzhiyun break;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun case MSR_PPIN:
258*4882a593Smuzhiyun case MSR_AMD_PPIN:
259*4882a593Smuzhiyun m.ppin = g_physinfo[i].mc_msrvalues[j].value;
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun mic = NULL;
264*4882a593Smuzhiyun x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
265*4882a593Smuzhiyun if (unlikely(!mic)) {
266*4882a593Smuzhiyun pr_warn("Fail to find bank error info\n");
267*4882a593Smuzhiyun return -ENODEV;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun do {
271*4882a593Smuzhiyun if ((!mic) || (mic->size == 0) ||
272*4882a593Smuzhiyun (mic->type != MC_TYPE_GLOBAL &&
273*4882a593Smuzhiyun mic->type != MC_TYPE_BANK &&
274*4882a593Smuzhiyun mic->type != MC_TYPE_EXTENDED &&
275*4882a593Smuzhiyun mic->type != MC_TYPE_RECOVERY))
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (mic->type == MC_TYPE_BANK) {
279*4882a593Smuzhiyun mc_bank = (struct mcinfo_bank *)mic;
280*4882a593Smuzhiyun m.misc = mc_bank->mc_misc;
281*4882a593Smuzhiyun m.status = mc_bank->mc_status;
282*4882a593Smuzhiyun m.addr = mc_bank->mc_addr;
283*4882a593Smuzhiyun m.tsc = mc_bank->mc_tsc;
284*4882a593Smuzhiyun m.bank = mc_bank->mc_bank;
285*4882a593Smuzhiyun m.finished = 1;
286*4882a593Smuzhiyun /*log this record*/
287*4882a593Smuzhiyun xen_mce_log(&m);
288*4882a593Smuzhiyun }
289*4882a593Smuzhiyun mic = x86_mcinfo_next(mic);
290*4882a593Smuzhiyun } while (1);
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
mc_queue_handle(uint32_t flags)295*4882a593Smuzhiyun static int mc_queue_handle(uint32_t flags)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun struct xen_mc mc_op;
298*4882a593Smuzhiyun int ret = 0;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun mc_op.cmd = XEN_MC_fetch;
301*4882a593Smuzhiyun set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
302*4882a593Smuzhiyun do {
303*4882a593Smuzhiyun mc_op.u.mc_fetch.flags = flags;
304*4882a593Smuzhiyun ret = HYPERVISOR_mca(&mc_op);
305*4882a593Smuzhiyun if (ret) {
306*4882a593Smuzhiyun pr_err("Failed to fetch %surgent error log\n",
307*4882a593Smuzhiyun flags == XEN_MC_URGENT ? "" : "non");
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (mc_op.u.mc_fetch.flags & XEN_MC_NODATA ||
312*4882a593Smuzhiyun mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED)
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun else {
315*4882a593Smuzhiyun ret = convert_log(&g_mi);
316*4882a593Smuzhiyun if (ret)
317*4882a593Smuzhiyun pr_warn("Failed to convert this error log, continue acking it anyway\n");
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun mc_op.u.mc_fetch.flags = flags | XEN_MC_ACK;
320*4882a593Smuzhiyun ret = HYPERVISOR_mca(&mc_op);
321*4882a593Smuzhiyun if (ret) {
322*4882a593Smuzhiyun pr_err("Failed to ack previous error log\n");
323*4882a593Smuzhiyun break;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun } while (1);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun return ret;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* virq handler for machine check error info*/
xen_mce_work_fn(struct work_struct * work)332*4882a593Smuzhiyun static void xen_mce_work_fn(struct work_struct *work)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun int err;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun mutex_lock(&mcelog_lock);
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun /* urgent mc_info */
339*4882a593Smuzhiyun err = mc_queue_handle(XEN_MC_URGENT);
340*4882a593Smuzhiyun if (err)
341*4882a593Smuzhiyun pr_err("Failed to handle urgent mc_info queue, continue handling nonurgent mc_info queue anyway\n");
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* nonurgent mc_info */
344*4882a593Smuzhiyun err = mc_queue_handle(XEN_MC_NONURGENT);
345*4882a593Smuzhiyun if (err)
346*4882a593Smuzhiyun pr_err("Failed to handle nonurgent mc_info queue\n");
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* wake processes polling /dev/mcelog */
349*4882a593Smuzhiyun wake_up_interruptible(&xen_mce_chrdev_wait);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun mutex_unlock(&mcelog_lock);
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun static DECLARE_WORK(xen_mce_work, xen_mce_work_fn);
354*4882a593Smuzhiyun
xen_mce_interrupt(int irq,void * dev_id)355*4882a593Smuzhiyun static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun schedule_work(&xen_mce_work);
358*4882a593Smuzhiyun return IRQ_HANDLED;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
bind_virq_for_mce(void)361*4882a593Smuzhiyun static int bind_virq_for_mce(void)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun int ret;
364*4882a593Smuzhiyun struct xen_mc mc_op;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun memset(&mc_op, 0, sizeof(struct xen_mc));
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun /* Fetch physical CPU Numbers */
369*4882a593Smuzhiyun mc_op.cmd = XEN_MC_physcpuinfo;
370*4882a593Smuzhiyun set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
371*4882a593Smuzhiyun ret = HYPERVISOR_mca(&mc_op);
372*4882a593Smuzhiyun if (ret) {
373*4882a593Smuzhiyun pr_err("Failed to get CPU numbers\n");
374*4882a593Smuzhiyun return ret;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* Fetch each CPU Physical Info for later reference*/
378*4882a593Smuzhiyun ncpus = mc_op.u.mc_physcpuinfo.ncpus;
379*4882a593Smuzhiyun g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
380*4882a593Smuzhiyun GFP_KERNEL);
381*4882a593Smuzhiyun if (!g_physinfo)
382*4882a593Smuzhiyun return -ENOMEM;
383*4882a593Smuzhiyun set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
384*4882a593Smuzhiyun ret = HYPERVISOR_mca(&mc_op);
385*4882a593Smuzhiyun if (ret) {
386*4882a593Smuzhiyun pr_err("Failed to get CPU info\n");
387*4882a593Smuzhiyun kfree(g_physinfo);
388*4882a593Smuzhiyun return ret;
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun ret = bind_virq_to_irqhandler(VIRQ_MCA, 0,
392*4882a593Smuzhiyun xen_mce_interrupt, 0, "mce", NULL);
393*4882a593Smuzhiyun if (ret < 0) {
394*4882a593Smuzhiyun pr_err("Failed to bind virq\n");
395*4882a593Smuzhiyun kfree(g_physinfo);
396*4882a593Smuzhiyun return ret;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
xen_late_init_mcelog(void)402*4882a593Smuzhiyun static int __init xen_late_init_mcelog(void)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun int ret;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Only DOM0 is responsible for MCE logging */
407*4882a593Smuzhiyun if (!xen_initial_domain())
408*4882a593Smuzhiyun return -ENODEV;
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun /* register character device /dev/mcelog for xen mcelog */
411*4882a593Smuzhiyun ret = misc_register(&xen_mce_chrdev_device);
412*4882a593Smuzhiyun if (ret)
413*4882a593Smuzhiyun return ret;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun ret = bind_virq_for_mce();
416*4882a593Smuzhiyun if (ret)
417*4882a593Smuzhiyun goto deregister;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun pr_info("/dev/mcelog registered by Xen\n");
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun return 0;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun deregister:
424*4882a593Smuzhiyun misc_deregister(&xen_mce_chrdev_device);
425*4882a593Smuzhiyun return ret;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun device_initcall(xen_late_init_mcelog);
428