1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun #include <linux/types.h>
6*4882a593Smuzhiyun #include <linux/mm.h>
7*4882a593Smuzhiyun #include <linux/fs.h>
8*4882a593Smuzhiyun #include <linux/miscdevice.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/capability.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <xen/xen.h>
13*4882a593Smuzhiyun #include <xen/page.h>
14*4882a593Smuzhiyun #include <xen/xenbus.h>
15*4882a593Smuzhiyun #include <xen/xenbus_dev.h>
16*4882a593Smuzhiyun #include <xen/grant_table.h>
17*4882a593Smuzhiyun #include <xen/events.h>
18*4882a593Smuzhiyun #include <asm/xen/hypervisor.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "xenbus.h"
21*4882a593Smuzhiyun
xenbus_backend_open(struct inode * inode,struct file * filp)22*4882a593Smuzhiyun static int xenbus_backend_open(struct inode *inode, struct file *filp)
23*4882a593Smuzhiyun {
24*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
25*4882a593Smuzhiyun return -EPERM;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun return nonseekable_open(inode, filp);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun
xenbus_alloc(domid_t domid)30*4882a593Smuzhiyun static long xenbus_alloc(domid_t domid)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun struct evtchn_alloc_unbound arg;
33*4882a593Smuzhiyun int err = -EEXIST;
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun xs_suspend();
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* If xenstored_ready is nonzero, that means we have already talked to
38*4882a593Smuzhiyun * xenstore and set up watches. These watches will be restored by
39*4882a593Smuzhiyun * xs_resume, but that requires communication over the port established
40*4882a593Smuzhiyun * below that is not visible to anyone until the ioctl returns.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * This can be resolved by splitting the ioctl into two parts
43*4882a593Smuzhiyun * (postponing the resume until xenstored is active) but this is
44*4882a593Smuzhiyun * unnecessarily complex for the intended use where xenstored is only
45*4882a593Smuzhiyun * started once - so return -EEXIST if it's already running.
46*4882a593Smuzhiyun */
47*4882a593Smuzhiyun if (xenstored_ready)
48*4882a593Smuzhiyun goto out_err;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
51*4882a593Smuzhiyun virt_to_gfn(xen_store_interface), 0 /* writable */);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun arg.dom = DOMID_SELF;
54*4882a593Smuzhiyun arg.remote_dom = domid;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
57*4882a593Smuzhiyun if (err)
58*4882a593Smuzhiyun goto out_err;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if (xen_store_evtchn > 0)
61*4882a593Smuzhiyun xb_deinit_comms();
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun xen_store_evtchn = arg.port;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun xs_resume();
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return arg.port;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun out_err:
70*4882a593Smuzhiyun xs_suspend_cancel();
71*4882a593Smuzhiyun return err;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
xenbus_backend_ioctl(struct file * file,unsigned int cmd,unsigned long data)74*4882a593Smuzhiyun static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
75*4882a593Smuzhiyun unsigned long data)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
78*4882a593Smuzhiyun return -EPERM;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun switch (cmd) {
81*4882a593Smuzhiyun case IOCTL_XENBUS_BACKEND_EVTCHN:
82*4882a593Smuzhiyun if (xen_store_evtchn > 0)
83*4882a593Smuzhiyun return xen_store_evtchn;
84*4882a593Smuzhiyun return -ENODEV;
85*4882a593Smuzhiyun case IOCTL_XENBUS_BACKEND_SETUP:
86*4882a593Smuzhiyun return xenbus_alloc(data);
87*4882a593Smuzhiyun default:
88*4882a593Smuzhiyun return -ENOTTY;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
xenbus_backend_mmap(struct file * file,struct vm_area_struct * vma)92*4882a593Smuzhiyun static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun size_t size = vma->vm_end - vma->vm_start;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun if (!capable(CAP_SYS_ADMIN))
97*4882a593Smuzhiyun return -EPERM;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
100*4882a593Smuzhiyun return -EINVAL;
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun if (remap_pfn_range(vma, vma->vm_start,
103*4882a593Smuzhiyun virt_to_pfn(xen_store_interface),
104*4882a593Smuzhiyun size, vma->vm_page_prot))
105*4882a593Smuzhiyun return -EAGAIN;
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun return 0;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun static const struct file_operations xenbus_backend_fops = {
111*4882a593Smuzhiyun .open = xenbus_backend_open,
112*4882a593Smuzhiyun .mmap = xenbus_backend_mmap,
113*4882a593Smuzhiyun .unlocked_ioctl = xenbus_backend_ioctl,
114*4882a593Smuzhiyun };
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun static struct miscdevice xenbus_backend_dev = {
117*4882a593Smuzhiyun .minor = MISC_DYNAMIC_MINOR,
118*4882a593Smuzhiyun .name = "xen/xenbus_backend",
119*4882a593Smuzhiyun .fops = &xenbus_backend_fops,
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun
xenbus_backend_init(void)122*4882a593Smuzhiyun static int __init xenbus_backend_init(void)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun int err;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (!xen_initial_domain())
127*4882a593Smuzhiyun return -ENODEV;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun err = misc_register(&xenbus_backend_dev);
130*4882a593Smuzhiyun if (err)
131*4882a593Smuzhiyun pr_err("Could not register xenbus backend device\n");
132*4882a593Smuzhiyun return err;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun device_initcall(xenbus_backend_init);
135