1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Kernel/userspace transport abstraction for Hyper-V util driver.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2015, Vitaly Kuznetsov <vkuznets@redhat.com>
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/poll.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "hyperv_vmbus.h"
13*4882a593Smuzhiyun #include "hv_utils_transport.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun static DEFINE_SPINLOCK(hvt_list_lock);
16*4882a593Smuzhiyun static struct list_head hvt_list = LIST_HEAD_INIT(hvt_list);
17*4882a593Smuzhiyun
hvt_reset(struct hvutil_transport * hvt)18*4882a593Smuzhiyun static void hvt_reset(struct hvutil_transport *hvt)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun kfree(hvt->outmsg);
21*4882a593Smuzhiyun hvt->outmsg = NULL;
22*4882a593Smuzhiyun hvt->outmsg_len = 0;
23*4882a593Smuzhiyun if (hvt->on_reset)
24*4882a593Smuzhiyun hvt->on_reset();
25*4882a593Smuzhiyun }
26*4882a593Smuzhiyun
hvt_op_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)27*4882a593Smuzhiyun static ssize_t hvt_op_read(struct file *file, char __user *buf,
28*4882a593Smuzhiyun size_t count, loff_t *ppos)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun struct hvutil_transport *hvt;
31*4882a593Smuzhiyun int ret;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun hvt = container_of(file->f_op, struct hvutil_transport, fops);
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun if (wait_event_interruptible(hvt->outmsg_q, hvt->outmsg_len > 0 ||
36*4882a593Smuzhiyun hvt->mode != HVUTIL_TRANSPORT_CHARDEV))
37*4882a593Smuzhiyun return -EINTR;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun mutex_lock(&hvt->lock);
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
42*4882a593Smuzhiyun ret = -EBADF;
43*4882a593Smuzhiyun goto out_unlock;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun if (!hvt->outmsg) {
47*4882a593Smuzhiyun ret = -EAGAIN;
48*4882a593Smuzhiyun goto out_unlock;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (count < hvt->outmsg_len) {
52*4882a593Smuzhiyun ret = -EINVAL;
53*4882a593Smuzhiyun goto out_unlock;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (!copy_to_user(buf, hvt->outmsg, hvt->outmsg_len))
57*4882a593Smuzhiyun ret = hvt->outmsg_len;
58*4882a593Smuzhiyun else
59*4882a593Smuzhiyun ret = -EFAULT;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun kfree(hvt->outmsg);
62*4882a593Smuzhiyun hvt->outmsg = NULL;
63*4882a593Smuzhiyun hvt->outmsg_len = 0;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (hvt->on_read)
66*4882a593Smuzhiyun hvt->on_read();
67*4882a593Smuzhiyun hvt->on_read = NULL;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun out_unlock:
70*4882a593Smuzhiyun mutex_unlock(&hvt->lock);
71*4882a593Smuzhiyun return ret;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
hvt_op_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)74*4882a593Smuzhiyun static ssize_t hvt_op_write(struct file *file, const char __user *buf,
75*4882a593Smuzhiyun size_t count, loff_t *ppos)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun struct hvutil_transport *hvt;
78*4882a593Smuzhiyun u8 *inmsg;
79*4882a593Smuzhiyun int ret;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun hvt = container_of(file->f_op, struct hvutil_transport, fops);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun inmsg = memdup_user(buf, count);
84*4882a593Smuzhiyun if (IS_ERR(inmsg))
85*4882a593Smuzhiyun return PTR_ERR(inmsg);
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
88*4882a593Smuzhiyun ret = -EBADF;
89*4882a593Smuzhiyun else
90*4882a593Smuzhiyun ret = hvt->on_msg(inmsg, count);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun kfree(inmsg);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return ret ? ret : count;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
hvt_op_poll(struct file * file,poll_table * wait)97*4882a593Smuzhiyun static __poll_t hvt_op_poll(struct file *file, poll_table *wait)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun struct hvutil_transport *hvt;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun hvt = container_of(file->f_op, struct hvutil_transport, fops);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun poll_wait(file, &hvt->outmsg_q, wait);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_DESTROY)
106*4882a593Smuzhiyun return EPOLLERR | EPOLLHUP;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun if (hvt->outmsg_len > 0)
109*4882a593Smuzhiyun return EPOLLIN | EPOLLRDNORM;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return 0;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
hvt_op_open(struct inode * inode,struct file * file)114*4882a593Smuzhiyun static int hvt_op_open(struct inode *inode, struct file *file)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun struct hvutil_transport *hvt;
117*4882a593Smuzhiyun int ret = 0;
118*4882a593Smuzhiyun bool issue_reset = false;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun hvt = container_of(file->f_op, struct hvutil_transport, fops);
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun mutex_lock(&hvt->lock);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
125*4882a593Smuzhiyun ret = -EBADF;
126*4882a593Smuzhiyun } else if (hvt->mode == HVUTIL_TRANSPORT_INIT) {
127*4882a593Smuzhiyun /*
128*4882a593Smuzhiyun * Switching to CHARDEV mode. We switch bach to INIT when
129*4882a593Smuzhiyun * device gets released.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * We're switching from netlink communication to using char
136*4882a593Smuzhiyun * device. Issue the reset first.
137*4882a593Smuzhiyun */
138*4882a593Smuzhiyun issue_reset = true;
139*4882a593Smuzhiyun hvt->mode = HVUTIL_TRANSPORT_CHARDEV;
140*4882a593Smuzhiyun } else {
141*4882a593Smuzhiyun ret = -EBUSY;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun if (issue_reset)
145*4882a593Smuzhiyun hvt_reset(hvt);
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun mutex_unlock(&hvt->lock);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun return ret;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
hvt_transport_free(struct hvutil_transport * hvt)152*4882a593Smuzhiyun static void hvt_transport_free(struct hvutil_transport *hvt)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun misc_deregister(&hvt->mdev);
155*4882a593Smuzhiyun kfree(hvt->outmsg);
156*4882a593Smuzhiyun kfree(hvt);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
hvt_op_release(struct inode * inode,struct file * file)159*4882a593Smuzhiyun static int hvt_op_release(struct inode *inode, struct file *file)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun struct hvutil_transport *hvt;
162*4882a593Smuzhiyun int mode_old;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun hvt = container_of(file->f_op, struct hvutil_transport, fops);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun mutex_lock(&hvt->lock);
167*4882a593Smuzhiyun mode_old = hvt->mode;
168*4882a593Smuzhiyun if (hvt->mode != HVUTIL_TRANSPORT_DESTROY)
169*4882a593Smuzhiyun hvt->mode = HVUTIL_TRANSPORT_INIT;
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Cleanup message buffers to avoid spurious messages when the daemon
172*4882a593Smuzhiyun * connects back.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun hvt_reset(hvt);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (mode_old == HVUTIL_TRANSPORT_DESTROY)
177*4882a593Smuzhiyun complete(&hvt->release);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun mutex_unlock(&hvt->lock);
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun return 0;
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
hvt_cn_callback(struct cn_msg * msg,struct netlink_skb_parms * nsp)184*4882a593Smuzhiyun static void hvt_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct hvutil_transport *hvt, *hvt_found = NULL;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun spin_lock(&hvt_list_lock);
189*4882a593Smuzhiyun list_for_each_entry(hvt, &hvt_list, list) {
190*4882a593Smuzhiyun if (hvt->cn_id.idx == msg->id.idx &&
191*4882a593Smuzhiyun hvt->cn_id.val == msg->id.val) {
192*4882a593Smuzhiyun hvt_found = hvt;
193*4882a593Smuzhiyun break;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun spin_unlock(&hvt_list_lock);
197*4882a593Smuzhiyun if (!hvt_found) {
198*4882a593Smuzhiyun pr_warn("hvt_cn_callback: spurious message received!\n");
199*4882a593Smuzhiyun return;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /*
203*4882a593Smuzhiyun * Switching to NETLINK mode. Switching to CHARDEV happens when someone
204*4882a593Smuzhiyun * opens the device.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun mutex_lock(&hvt->lock);
207*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_INIT)
208*4882a593Smuzhiyun hvt->mode = HVUTIL_TRANSPORT_NETLINK;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_NETLINK)
211*4882a593Smuzhiyun hvt_found->on_msg(msg->data, msg->len);
212*4882a593Smuzhiyun else
213*4882a593Smuzhiyun pr_warn("hvt_cn_callback: unexpected netlink message!\n");
214*4882a593Smuzhiyun mutex_unlock(&hvt->lock);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
hvutil_transport_send(struct hvutil_transport * hvt,void * msg,int len,void (* on_read_cb)(void))217*4882a593Smuzhiyun int hvutil_transport_send(struct hvutil_transport *hvt, void *msg, int len,
218*4882a593Smuzhiyun void (*on_read_cb)(void))
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun struct cn_msg *cn_msg;
221*4882a593Smuzhiyun int ret = 0;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (hvt->mode == HVUTIL_TRANSPORT_INIT ||
224*4882a593Smuzhiyun hvt->mode == HVUTIL_TRANSPORT_DESTROY) {
225*4882a593Smuzhiyun return -EINVAL;
226*4882a593Smuzhiyun } else if (hvt->mode == HVUTIL_TRANSPORT_NETLINK) {
227*4882a593Smuzhiyun cn_msg = kzalloc(sizeof(*cn_msg) + len, GFP_ATOMIC);
228*4882a593Smuzhiyun if (!cn_msg)
229*4882a593Smuzhiyun return -ENOMEM;
230*4882a593Smuzhiyun cn_msg->id.idx = hvt->cn_id.idx;
231*4882a593Smuzhiyun cn_msg->id.val = hvt->cn_id.val;
232*4882a593Smuzhiyun cn_msg->len = len;
233*4882a593Smuzhiyun memcpy(cn_msg->data, msg, len);
234*4882a593Smuzhiyun ret = cn_netlink_send(cn_msg, 0, 0, GFP_ATOMIC);
235*4882a593Smuzhiyun kfree(cn_msg);
236*4882a593Smuzhiyun /*
237*4882a593Smuzhiyun * We don't know when netlink messages are delivered but unlike
238*4882a593Smuzhiyun * in CHARDEV mode we're not blocked and we can send next
239*4882a593Smuzhiyun * messages right away.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun if (on_read_cb)
242*4882a593Smuzhiyun on_read_cb();
243*4882a593Smuzhiyun return ret;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun /* HVUTIL_TRANSPORT_CHARDEV */
246*4882a593Smuzhiyun mutex_lock(&hvt->lock);
247*4882a593Smuzhiyun if (hvt->mode != HVUTIL_TRANSPORT_CHARDEV) {
248*4882a593Smuzhiyun ret = -EINVAL;
249*4882a593Smuzhiyun goto out_unlock;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (hvt->outmsg) {
253*4882a593Smuzhiyun /* Previous message wasn't received */
254*4882a593Smuzhiyun ret = -EFAULT;
255*4882a593Smuzhiyun goto out_unlock;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun hvt->outmsg = kzalloc(len, GFP_KERNEL);
258*4882a593Smuzhiyun if (hvt->outmsg) {
259*4882a593Smuzhiyun memcpy(hvt->outmsg, msg, len);
260*4882a593Smuzhiyun hvt->outmsg_len = len;
261*4882a593Smuzhiyun hvt->on_read = on_read_cb;
262*4882a593Smuzhiyun wake_up_interruptible(&hvt->outmsg_q);
263*4882a593Smuzhiyun } else
264*4882a593Smuzhiyun ret = -ENOMEM;
265*4882a593Smuzhiyun out_unlock:
266*4882a593Smuzhiyun mutex_unlock(&hvt->lock);
267*4882a593Smuzhiyun return ret;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
hvutil_transport_init(const char * name,u32 cn_idx,u32 cn_val,int (* on_msg)(void *,int),void (* on_reset)(void))270*4882a593Smuzhiyun struct hvutil_transport *hvutil_transport_init(const char *name,
271*4882a593Smuzhiyun u32 cn_idx, u32 cn_val,
272*4882a593Smuzhiyun int (*on_msg)(void *, int),
273*4882a593Smuzhiyun void (*on_reset)(void))
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun struct hvutil_transport *hvt;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun hvt = kzalloc(sizeof(*hvt), GFP_KERNEL);
278*4882a593Smuzhiyun if (!hvt)
279*4882a593Smuzhiyun return NULL;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun hvt->cn_id.idx = cn_idx;
282*4882a593Smuzhiyun hvt->cn_id.val = cn_val;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun hvt->mdev.minor = MISC_DYNAMIC_MINOR;
285*4882a593Smuzhiyun hvt->mdev.name = name;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun hvt->fops.owner = THIS_MODULE;
288*4882a593Smuzhiyun hvt->fops.read = hvt_op_read;
289*4882a593Smuzhiyun hvt->fops.write = hvt_op_write;
290*4882a593Smuzhiyun hvt->fops.poll = hvt_op_poll;
291*4882a593Smuzhiyun hvt->fops.open = hvt_op_open;
292*4882a593Smuzhiyun hvt->fops.release = hvt_op_release;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun hvt->mdev.fops = &hvt->fops;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun init_waitqueue_head(&hvt->outmsg_q);
297*4882a593Smuzhiyun mutex_init(&hvt->lock);
298*4882a593Smuzhiyun init_completion(&hvt->release);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun spin_lock(&hvt_list_lock);
301*4882a593Smuzhiyun list_add(&hvt->list, &hvt_list);
302*4882a593Smuzhiyun spin_unlock(&hvt_list_lock);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun hvt->on_msg = on_msg;
305*4882a593Smuzhiyun hvt->on_reset = on_reset;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (misc_register(&hvt->mdev))
308*4882a593Smuzhiyun goto err_free_hvt;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Use cn_id.idx/cn_id.val to determine if we need to setup netlink */
311*4882a593Smuzhiyun if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0 &&
312*4882a593Smuzhiyun cn_add_callback(&hvt->cn_id, name, hvt_cn_callback))
313*4882a593Smuzhiyun goto err_free_hvt;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return hvt;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun err_free_hvt:
318*4882a593Smuzhiyun spin_lock(&hvt_list_lock);
319*4882a593Smuzhiyun list_del(&hvt->list);
320*4882a593Smuzhiyun spin_unlock(&hvt_list_lock);
321*4882a593Smuzhiyun kfree(hvt);
322*4882a593Smuzhiyun return NULL;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
hvutil_transport_destroy(struct hvutil_transport * hvt)325*4882a593Smuzhiyun void hvutil_transport_destroy(struct hvutil_transport *hvt)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun int mode_old;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun mutex_lock(&hvt->lock);
330*4882a593Smuzhiyun mode_old = hvt->mode;
331*4882a593Smuzhiyun hvt->mode = HVUTIL_TRANSPORT_DESTROY;
332*4882a593Smuzhiyun wake_up_interruptible(&hvt->outmsg_q);
333*4882a593Smuzhiyun mutex_unlock(&hvt->lock);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun /*
336*4882a593Smuzhiyun * In case we were in 'chardev' mode we still have an open fd so we
337*4882a593Smuzhiyun * have to defer freeing the device. Netlink interface can be freed
338*4882a593Smuzhiyun * now.
339*4882a593Smuzhiyun */
340*4882a593Smuzhiyun spin_lock(&hvt_list_lock);
341*4882a593Smuzhiyun list_del(&hvt->list);
342*4882a593Smuzhiyun spin_unlock(&hvt_list_lock);
343*4882a593Smuzhiyun if (hvt->cn_id.idx > 0 && hvt->cn_id.val > 0)
344*4882a593Smuzhiyun cn_del_callback(&hvt->cn_id);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun if (mode_old == HVUTIL_TRANSPORT_CHARDEV)
347*4882a593Smuzhiyun wait_for_completion(&hvt->release);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun hvt_transport_free(hvt);
350*4882a593Smuzhiyun }
351