1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * vboxguest linux pci driver, char-dev and input-device code,
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2006-2016 Oracle Corporation
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/cred.h>
9*4882a593Smuzhiyun #include <linux/input.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/miscdevice.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/pci.h>
14*4882a593Smuzhiyun #include <linux/poll.h>
15*4882a593Smuzhiyun #include <linux/vbox_utils.h>
16*4882a593Smuzhiyun #include "vboxguest_core.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun /** The device name. */
19*4882a593Smuzhiyun #define DEVICE_NAME "vboxguest"
20*4882a593Smuzhiyun /** The device name for the device node open to everyone. */
21*4882a593Smuzhiyun #define DEVICE_NAME_USER "vboxuser"
22*4882a593Smuzhiyun /** VirtualBox PCI vendor ID. */
23*4882a593Smuzhiyun #define VBOX_VENDORID 0x80ee
24*4882a593Smuzhiyun /** VMMDev PCI card product ID. */
25*4882a593Smuzhiyun #define VMMDEV_DEVICEID 0xcafe
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
28*4882a593Smuzhiyun static DEFINE_MUTEX(vbg_gdev_mutex);
29*4882a593Smuzhiyun /** Global vbg_gdev pointer used by vbg_get/put_gdev. */
30*4882a593Smuzhiyun static struct vbg_dev *vbg_gdev;
31*4882a593Smuzhiyun
vbg_misc_device_requestor(struct inode * inode)32*4882a593Smuzhiyun static u32 vbg_misc_device_requestor(struct inode *inode)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun u32 requestor = VMMDEV_REQUESTOR_USERMODE |
35*4882a593Smuzhiyun VMMDEV_REQUESTOR_CON_DONT_KNOW |
36*4882a593Smuzhiyun VMMDEV_REQUESTOR_TRUST_NOT_GIVEN;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun if (from_kuid(current_user_ns(), current_uid()) == 0)
39*4882a593Smuzhiyun requestor |= VMMDEV_REQUESTOR_USR_ROOT;
40*4882a593Smuzhiyun else
41*4882a593Smuzhiyun requestor |= VMMDEV_REQUESTOR_USR_USER;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (in_egroup_p(inode->i_gid))
44*4882a593Smuzhiyun requestor |= VMMDEV_REQUESTOR_GRP_VBOX;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun return requestor;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
vbg_misc_device_open(struct inode * inode,struct file * filp)49*4882a593Smuzhiyun static int vbg_misc_device_open(struct inode *inode, struct file *filp)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun struct vbg_session *session;
52*4882a593Smuzhiyun struct vbg_dev *gdev;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* misc_open sets filp->private_data to our misc device */
55*4882a593Smuzhiyun gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode));
58*4882a593Smuzhiyun if (IS_ERR(session))
59*4882a593Smuzhiyun return PTR_ERR(session);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun filp->private_data = session;
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
vbg_misc_device_user_open(struct inode * inode,struct file * filp)65*4882a593Smuzhiyun static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct vbg_session *session;
68*4882a593Smuzhiyun struct vbg_dev *gdev;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* misc_open sets filp->private_data to our misc device */
71*4882a593Smuzhiyun gdev = container_of(filp->private_data, struct vbg_dev,
72*4882a593Smuzhiyun misc_device_user);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun session = vbg_core_open_session(gdev, vbg_misc_device_requestor(inode) |
75*4882a593Smuzhiyun VMMDEV_REQUESTOR_USER_DEVICE);
76*4882a593Smuzhiyun if (IS_ERR(session))
77*4882a593Smuzhiyun return PTR_ERR(session);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun filp->private_data = session;
80*4882a593Smuzhiyun return 0;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun * Close device.
85*4882a593Smuzhiyun * Return: 0 on success, negated errno on failure.
86*4882a593Smuzhiyun * @inode: Pointer to inode info structure.
87*4882a593Smuzhiyun * @filp: Associated file pointer.
88*4882a593Smuzhiyun */
vbg_misc_device_close(struct inode * inode,struct file * filp)89*4882a593Smuzhiyun static int vbg_misc_device_close(struct inode *inode, struct file *filp)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun vbg_core_close_session(filp->private_data);
92*4882a593Smuzhiyun filp->private_data = NULL;
93*4882a593Smuzhiyun return 0;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun * Device I/O Control entry point.
98*4882a593Smuzhiyun * Return: 0 on success, negated errno on failure.
99*4882a593Smuzhiyun * @filp: Associated file pointer.
100*4882a593Smuzhiyun * @req: The request specified to ioctl().
101*4882a593Smuzhiyun * @arg: The argument specified to ioctl().
102*4882a593Smuzhiyun */
vbg_misc_device_ioctl(struct file * filp,unsigned int req,unsigned long arg)103*4882a593Smuzhiyun static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
104*4882a593Smuzhiyun unsigned long arg)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun struct vbg_session *session = filp->private_data;
107*4882a593Smuzhiyun size_t returned_size, size;
108*4882a593Smuzhiyun struct vbg_ioctl_hdr hdr;
109*4882a593Smuzhiyun bool is_vmmdev_req;
110*4882a593Smuzhiyun int ret = 0;
111*4882a593Smuzhiyun void *buf;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
114*4882a593Smuzhiyun return -EFAULT;
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (hdr.version != VBG_IOCTL_HDR_VERSION)
117*4882a593Smuzhiyun return -EINVAL;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun if (hdr.size_in < sizeof(hdr) ||
120*4882a593Smuzhiyun (hdr.size_out && hdr.size_out < sizeof(hdr)))
121*4882a593Smuzhiyun return -EINVAL;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun size = max(hdr.size_in, hdr.size_out);
124*4882a593Smuzhiyun if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
125*4882a593Smuzhiyun return -EINVAL;
126*4882a593Smuzhiyun if (size > SZ_16M)
127*4882a593Smuzhiyun return -E2BIG;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun * IOCTL_VMMDEV_REQUEST needs the buffer to be below 4G to avoid
131*4882a593Smuzhiyun * the need for a bounce-buffer and another copy later on.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun is_vmmdev_req = (req & ~IOCSIZE_MASK) == VBG_IOCTL_VMMDEV_REQUEST(0) ||
134*4882a593Smuzhiyun req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
135*4882a593Smuzhiyun req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun if (is_vmmdev_req)
138*4882a593Smuzhiyun buf = vbg_req_alloc(size, VBG_IOCTL_HDR_TYPE_DEFAULT,
139*4882a593Smuzhiyun session->requestor);
140*4882a593Smuzhiyun else
141*4882a593Smuzhiyun buf = kmalloc(size, GFP_KERNEL);
142*4882a593Smuzhiyun if (!buf)
143*4882a593Smuzhiyun return -ENOMEM;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun *((struct vbg_ioctl_hdr *)buf) = hdr;
146*4882a593Smuzhiyun if (copy_from_user(buf + sizeof(hdr), (void *)arg + sizeof(hdr),
147*4882a593Smuzhiyun hdr.size_in - sizeof(hdr))) {
148*4882a593Smuzhiyun ret = -EFAULT;
149*4882a593Smuzhiyun goto out;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun if (hdr.size_in < size)
152*4882a593Smuzhiyun memset(buf + hdr.size_in, 0, size - hdr.size_in);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun ret = vbg_core_ioctl(session, req, buf);
155*4882a593Smuzhiyun if (ret)
156*4882a593Smuzhiyun goto out;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out;
159*4882a593Smuzhiyun if (returned_size > size) {
160*4882a593Smuzhiyun vbg_debug("%s: too much output data %zu > %zu\n",
161*4882a593Smuzhiyun __func__, returned_size, size);
162*4882a593Smuzhiyun returned_size = size;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun if (copy_to_user((void *)arg, buf, returned_size) != 0)
165*4882a593Smuzhiyun ret = -EFAULT;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun out:
168*4882a593Smuzhiyun if (is_vmmdev_req)
169*4882a593Smuzhiyun vbg_req_free(buf, size);
170*4882a593Smuzhiyun else
171*4882a593Smuzhiyun kfree(buf);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return ret;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /** The file_operations structures. */
177*4882a593Smuzhiyun static const struct file_operations vbg_misc_device_fops = {
178*4882a593Smuzhiyun .owner = THIS_MODULE,
179*4882a593Smuzhiyun .open = vbg_misc_device_open,
180*4882a593Smuzhiyun .release = vbg_misc_device_close,
181*4882a593Smuzhiyun .unlocked_ioctl = vbg_misc_device_ioctl,
182*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
183*4882a593Smuzhiyun .compat_ioctl = vbg_misc_device_ioctl,
184*4882a593Smuzhiyun #endif
185*4882a593Smuzhiyun };
186*4882a593Smuzhiyun static const struct file_operations vbg_misc_device_user_fops = {
187*4882a593Smuzhiyun .owner = THIS_MODULE,
188*4882a593Smuzhiyun .open = vbg_misc_device_user_open,
189*4882a593Smuzhiyun .release = vbg_misc_device_close,
190*4882a593Smuzhiyun .unlocked_ioctl = vbg_misc_device_ioctl,
191*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
192*4882a593Smuzhiyun .compat_ioctl = vbg_misc_device_ioctl,
193*4882a593Smuzhiyun #endif
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun * Called when the input device is first opened.
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * Sets up absolute mouse reporting.
200*4882a593Smuzhiyun */
vbg_input_open(struct input_dev * input)201*4882a593Smuzhiyun static int vbg_input_open(struct input_dev *input)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun struct vbg_dev *gdev = input_get_drvdata(input);
204*4882a593Smuzhiyun u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun return vbg_core_set_mouse_status(gdev, feat);
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /**
210*4882a593Smuzhiyun * Called if all open handles to the input device are closed.
211*4882a593Smuzhiyun *
212*4882a593Smuzhiyun * Disables absolute reporting.
213*4882a593Smuzhiyun */
vbg_input_close(struct input_dev * input)214*4882a593Smuzhiyun static void vbg_input_close(struct input_dev *input)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun struct vbg_dev *gdev = input_get_drvdata(input);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun vbg_core_set_mouse_status(gdev, 0);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun * Creates the kernel input device.
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * Return: 0 on success, negated errno on failure.
225*4882a593Smuzhiyun */
vbg_create_input_device(struct vbg_dev * gdev)226*4882a593Smuzhiyun static int vbg_create_input_device(struct vbg_dev *gdev)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct input_dev *input;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun input = devm_input_allocate_device(gdev->dev);
231*4882a593Smuzhiyun if (!input)
232*4882a593Smuzhiyun return -ENOMEM;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun input->id.bustype = BUS_PCI;
235*4882a593Smuzhiyun input->id.vendor = VBOX_VENDORID;
236*4882a593Smuzhiyun input->id.product = VMMDEV_DEVICEID;
237*4882a593Smuzhiyun input->open = vbg_input_open;
238*4882a593Smuzhiyun input->close = vbg_input_close;
239*4882a593Smuzhiyun input->dev.parent = gdev->dev;
240*4882a593Smuzhiyun input->name = "VirtualBox mouse integration";
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
243*4882a593Smuzhiyun VMMDEV_MOUSE_RANGE_MAX, 0, 0);
244*4882a593Smuzhiyun input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
245*4882a593Smuzhiyun VMMDEV_MOUSE_RANGE_MAX, 0, 0);
246*4882a593Smuzhiyun input_set_capability(input, EV_KEY, BTN_MOUSE);
247*4882a593Smuzhiyun input_set_drvdata(input, gdev);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun gdev->input = input;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun return input_register_device(gdev->input);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
host_version_show(struct device * dev,struct device_attribute * attr,char * buf)254*4882a593Smuzhiyun static ssize_t host_version_show(struct device *dev,
255*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun struct vbg_dev *gdev = dev_get_drvdata(dev);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun return sprintf(buf, "%s\n", gdev->host_version);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
host_features_show(struct device * dev,struct device_attribute * attr,char * buf)262*4882a593Smuzhiyun static ssize_t host_features_show(struct device *dev,
263*4882a593Smuzhiyun struct device_attribute *attr, char *buf)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun struct vbg_dev *gdev = dev_get_drvdata(dev);
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun return sprintf(buf, "%#x\n", gdev->host_features);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun static DEVICE_ATTR_RO(host_version);
271*4882a593Smuzhiyun static DEVICE_ATTR_RO(host_features);
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /**
274*4882a593Smuzhiyun * Does the PCI detection and init of the device.
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * Return: 0 on success, negated errno on failure.
277*4882a593Smuzhiyun */
vbg_pci_probe(struct pci_dev * pci,const struct pci_device_id * id)278*4882a593Smuzhiyun static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct device *dev = &pci->dev;
281*4882a593Smuzhiyun resource_size_t io, io_len, mmio, mmio_len;
282*4882a593Smuzhiyun struct vmmdev_memory *vmmdev;
283*4882a593Smuzhiyun struct vbg_dev *gdev;
284*4882a593Smuzhiyun int ret;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
287*4882a593Smuzhiyun if (!gdev)
288*4882a593Smuzhiyun return -ENOMEM;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun ret = pci_enable_device(pci);
291*4882a593Smuzhiyun if (ret != 0) {
292*4882a593Smuzhiyun vbg_err("vboxguest: Error enabling device: %d\n", ret);
293*4882a593Smuzhiyun return ret;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun ret = -ENODEV;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun io = pci_resource_start(pci, 0);
299*4882a593Smuzhiyun io_len = pci_resource_len(pci, 0);
300*4882a593Smuzhiyun if (!io || !io_len) {
301*4882a593Smuzhiyun vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
302*4882a593Smuzhiyun goto err_disable_pcidev;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
305*4882a593Smuzhiyun vbg_err("vboxguest: Error could not claim IO resource\n");
306*4882a593Smuzhiyun ret = -EBUSY;
307*4882a593Smuzhiyun goto err_disable_pcidev;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun mmio = pci_resource_start(pci, 1);
311*4882a593Smuzhiyun mmio_len = pci_resource_len(pci, 1);
312*4882a593Smuzhiyun if (!mmio || !mmio_len) {
313*4882a593Smuzhiyun vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
314*4882a593Smuzhiyun goto err_disable_pcidev;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
318*4882a593Smuzhiyun vbg_err("vboxguest: Error could not claim MMIO resource\n");
319*4882a593Smuzhiyun ret = -EBUSY;
320*4882a593Smuzhiyun goto err_disable_pcidev;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun vmmdev = devm_ioremap(dev, mmio, mmio_len);
324*4882a593Smuzhiyun if (!vmmdev) {
325*4882a593Smuzhiyun vbg_err("vboxguest: Error ioremap failed; MMIO addr=%pap size=%pap\n",
326*4882a593Smuzhiyun &mmio, &mmio_len);
327*4882a593Smuzhiyun goto err_disable_pcidev;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Validate MMIO region version and size. */
331*4882a593Smuzhiyun if (vmmdev->version != VMMDEV_MEMORY_VERSION ||
332*4882a593Smuzhiyun vmmdev->size < 32 || vmmdev->size > mmio_len) {
333*4882a593Smuzhiyun vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n",
334*4882a593Smuzhiyun vmmdev->version, VMMDEV_MEMORY_VERSION,
335*4882a593Smuzhiyun vmmdev->size, (int)mmio_len);
336*4882a593Smuzhiyun goto err_disable_pcidev;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun gdev->io_port = io;
340*4882a593Smuzhiyun gdev->mmio = vmmdev;
341*4882a593Smuzhiyun gdev->dev = dev;
342*4882a593Smuzhiyun gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
343*4882a593Smuzhiyun gdev->misc_device.name = DEVICE_NAME;
344*4882a593Smuzhiyun gdev->misc_device.fops = &vbg_misc_device_fops;
345*4882a593Smuzhiyun gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
346*4882a593Smuzhiyun gdev->misc_device_user.name = DEVICE_NAME_USER;
347*4882a593Smuzhiyun gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
350*4882a593Smuzhiyun if (ret)
351*4882a593Smuzhiyun goto err_disable_pcidev;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun ret = vbg_create_input_device(gdev);
354*4882a593Smuzhiyun if (ret) {
355*4882a593Smuzhiyun vbg_err("vboxguest: Error creating input device: %d\n", ret);
356*4882a593Smuzhiyun goto err_vbg_core_exit;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun ret = request_irq(pci->irq, vbg_core_isr, IRQF_SHARED, DEVICE_NAME,
360*4882a593Smuzhiyun gdev);
361*4882a593Smuzhiyun if (ret) {
362*4882a593Smuzhiyun vbg_err("vboxguest: Error requesting irq: %d\n", ret);
363*4882a593Smuzhiyun goto err_vbg_core_exit;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun ret = misc_register(&gdev->misc_device);
367*4882a593Smuzhiyun if (ret) {
368*4882a593Smuzhiyun vbg_err("vboxguest: Error misc_register %s failed: %d\n",
369*4882a593Smuzhiyun DEVICE_NAME, ret);
370*4882a593Smuzhiyun goto err_free_irq;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun ret = misc_register(&gdev->misc_device_user);
374*4882a593Smuzhiyun if (ret) {
375*4882a593Smuzhiyun vbg_err("vboxguest: Error misc_register %s failed: %d\n",
376*4882a593Smuzhiyun DEVICE_NAME_USER, ret);
377*4882a593Smuzhiyun goto err_unregister_misc_device;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun mutex_lock(&vbg_gdev_mutex);
381*4882a593Smuzhiyun if (!vbg_gdev)
382*4882a593Smuzhiyun vbg_gdev = gdev;
383*4882a593Smuzhiyun else
384*4882a593Smuzhiyun ret = -EBUSY;
385*4882a593Smuzhiyun mutex_unlock(&vbg_gdev_mutex);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun if (ret) {
388*4882a593Smuzhiyun vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
389*4882a593Smuzhiyun goto err_unregister_misc_device_user;
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun pci_set_drvdata(pci, gdev);
393*4882a593Smuzhiyun device_create_file(dev, &dev_attr_host_version);
394*4882a593Smuzhiyun device_create_file(dev, &dev_attr_host_features);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %pap (size %pap)\n",
397*4882a593Smuzhiyun gdev->misc_device.minor, pci->irq, gdev->io_port,
398*4882a593Smuzhiyun &mmio, &mmio_len);
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun return 0;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun err_unregister_misc_device_user:
403*4882a593Smuzhiyun misc_deregister(&gdev->misc_device_user);
404*4882a593Smuzhiyun err_unregister_misc_device:
405*4882a593Smuzhiyun misc_deregister(&gdev->misc_device);
406*4882a593Smuzhiyun err_free_irq:
407*4882a593Smuzhiyun free_irq(pci->irq, gdev);
408*4882a593Smuzhiyun err_vbg_core_exit:
409*4882a593Smuzhiyun vbg_core_exit(gdev);
410*4882a593Smuzhiyun err_disable_pcidev:
411*4882a593Smuzhiyun pci_disable_device(pci);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
vbg_pci_remove(struct pci_dev * pci)416*4882a593Smuzhiyun static void vbg_pci_remove(struct pci_dev *pci)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun struct vbg_dev *gdev = pci_get_drvdata(pci);
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun mutex_lock(&vbg_gdev_mutex);
421*4882a593Smuzhiyun vbg_gdev = NULL;
422*4882a593Smuzhiyun mutex_unlock(&vbg_gdev_mutex);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun free_irq(pci->irq, gdev);
425*4882a593Smuzhiyun device_remove_file(gdev->dev, &dev_attr_host_features);
426*4882a593Smuzhiyun device_remove_file(gdev->dev, &dev_attr_host_version);
427*4882a593Smuzhiyun misc_deregister(&gdev->misc_device_user);
428*4882a593Smuzhiyun misc_deregister(&gdev->misc_device);
429*4882a593Smuzhiyun vbg_core_exit(gdev);
430*4882a593Smuzhiyun pci_disable_device(pci);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
vbg_get_gdev(void)433*4882a593Smuzhiyun struct vbg_dev *vbg_get_gdev(void)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun mutex_lock(&vbg_gdev_mutex);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /*
438*4882a593Smuzhiyun * Note on success we keep the mutex locked until vbg_put_gdev(),
439*4882a593Smuzhiyun * this stops vbg_pci_remove from removing the device from underneath
440*4882a593Smuzhiyun * vboxsf. vboxsf will only hold a reference for a short while.
441*4882a593Smuzhiyun */
442*4882a593Smuzhiyun if (vbg_gdev)
443*4882a593Smuzhiyun return vbg_gdev;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun mutex_unlock(&vbg_gdev_mutex);
446*4882a593Smuzhiyun return ERR_PTR(-ENODEV);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun EXPORT_SYMBOL(vbg_get_gdev);
449*4882a593Smuzhiyun
vbg_put_gdev(struct vbg_dev * gdev)450*4882a593Smuzhiyun void vbg_put_gdev(struct vbg_dev *gdev)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun WARN_ON(gdev != vbg_gdev);
453*4882a593Smuzhiyun mutex_unlock(&vbg_gdev_mutex);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun EXPORT_SYMBOL(vbg_put_gdev);
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun /**
458*4882a593Smuzhiyun * Callback for mouse events.
459*4882a593Smuzhiyun *
460*4882a593Smuzhiyun * This is called at the end of the ISR, after leaving the event spinlock, if
461*4882a593Smuzhiyun * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
462*4882a593Smuzhiyun *
463*4882a593Smuzhiyun * @gdev: The device extension.
464*4882a593Smuzhiyun */
vbg_linux_mouse_event(struct vbg_dev * gdev)465*4882a593Smuzhiyun void vbg_linux_mouse_event(struct vbg_dev *gdev)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun int rc;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Report events to the kernel input device */
470*4882a593Smuzhiyun gdev->mouse_status_req->mouse_features = 0;
471*4882a593Smuzhiyun gdev->mouse_status_req->pointer_pos_x = 0;
472*4882a593Smuzhiyun gdev->mouse_status_req->pointer_pos_y = 0;
473*4882a593Smuzhiyun rc = vbg_req_perform(gdev, gdev->mouse_status_req);
474*4882a593Smuzhiyun if (rc >= 0) {
475*4882a593Smuzhiyun input_report_abs(gdev->input, ABS_X,
476*4882a593Smuzhiyun gdev->mouse_status_req->pointer_pos_x);
477*4882a593Smuzhiyun input_report_abs(gdev->input, ABS_Y,
478*4882a593Smuzhiyun gdev->mouse_status_req->pointer_pos_y);
479*4882a593Smuzhiyun input_sync(gdev->input);
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun static const struct pci_device_id vbg_pci_ids[] = {
484*4882a593Smuzhiyun { .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
485*4882a593Smuzhiyun {}
486*4882a593Smuzhiyun };
487*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun static struct pci_driver vbg_pci_driver = {
490*4882a593Smuzhiyun .name = DEVICE_NAME,
491*4882a593Smuzhiyun .id_table = vbg_pci_ids,
492*4882a593Smuzhiyun .probe = vbg_pci_probe,
493*4882a593Smuzhiyun .remove = vbg_pci_remove,
494*4882a593Smuzhiyun };
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun module_pci_driver(vbg_pci_driver);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun MODULE_AUTHOR("Oracle Corporation");
499*4882a593Smuzhiyun MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
500*4882a593Smuzhiyun MODULE_LICENSE("GPL");
501