xref: /OK3568_Linux_fs/kernel/drivers/hv/hv_snapshot.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * An implementation of host initiated guest snapshot.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2013, Microsoft, Inc.
6*4882a593Smuzhiyun  * Author : K. Y. Srinivasan <kys@microsoft.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/net.h>
11*4882a593Smuzhiyun #include <linux/nls.h>
12*4882a593Smuzhiyun #include <linux/connector.h>
13*4882a593Smuzhiyun #include <linux/workqueue.h>
14*4882a593Smuzhiyun #include <linux/hyperv.h>
15*4882a593Smuzhiyun #include <asm/hyperv-tlfs.h>
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #include "hyperv_vmbus.h"
18*4882a593Smuzhiyun #include "hv_utils_transport.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define VSS_MAJOR  5
21*4882a593Smuzhiyun #define VSS_MINOR  0
22*4882a593Smuzhiyun #define VSS_VERSION    (VSS_MAJOR << 16 | VSS_MINOR)
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #define VSS_VER_COUNT 1
25*4882a593Smuzhiyun static const int vss_versions[] = {
26*4882a593Smuzhiyun 	VSS_VERSION
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define FW_VER_COUNT 1
30*4882a593Smuzhiyun static const int fw_versions[] = {
31*4882a593Smuzhiyun 	UTIL_FW_VERSION
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun  * Timeout values are based on expecations from host
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun #define VSS_FREEZE_TIMEOUT (15 * 60)
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /*
40*4882a593Smuzhiyun  * Global state maintained for transaction that is being processed. For a class
41*4882a593Smuzhiyun  * of integration services, including the "VSS service", the specified protocol
42*4882a593Smuzhiyun  * is a "request/response" protocol which means that there can only be single
43*4882a593Smuzhiyun  * outstanding transaction from the host at any given point in time. We use
44*4882a593Smuzhiyun  * this to simplify memory management in this driver - we cache and process
45*4882a593Smuzhiyun  * only one message at a time.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * While the request/response protocol is guaranteed by the host, we further
48*4882a593Smuzhiyun  * ensure this by serializing packet processing in this driver - we do not
49*4882a593Smuzhiyun  * read additional packets from the VMBUs until the current packet is fully
50*4882a593Smuzhiyun  * handled.
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun static struct {
54*4882a593Smuzhiyun 	int state;   /* hvutil_device_state */
55*4882a593Smuzhiyun 	int recv_len; /* number of bytes received. */
56*4882a593Smuzhiyun 	struct vmbus_channel *recv_channel; /* chn we got the request */
57*4882a593Smuzhiyun 	u64 recv_req_id; /* request ID. */
58*4882a593Smuzhiyun 	struct hv_vss_msg  *msg; /* current message */
59*4882a593Smuzhiyun } vss_transaction;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun static void vss_respond_to_host(int error);
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /*
65*4882a593Smuzhiyun  * This state maintains the version number registered by the daemon.
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun static int dm_reg_value;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static const char vss_devname[] = "vmbus/hv_vss";
70*4882a593Smuzhiyun static __u8 *recv_buffer;
71*4882a593Smuzhiyun static struct hvutil_transport *hvt;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun static void vss_timeout_func(struct work_struct *dummy);
74*4882a593Smuzhiyun static void vss_handle_request(struct work_struct *dummy);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
77*4882a593Smuzhiyun static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
78*4882a593Smuzhiyun 
vss_poll_wrapper(void * channel)79*4882a593Smuzhiyun static void vss_poll_wrapper(void *channel)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	/* Transaction is finished, reset the state here to avoid races. */
82*4882a593Smuzhiyun 	vss_transaction.state = HVUTIL_READY;
83*4882a593Smuzhiyun 	tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /*
87*4882a593Smuzhiyun  * Callback when data is received from user mode.
88*4882a593Smuzhiyun  */
89*4882a593Smuzhiyun 
vss_timeout_func(struct work_struct * dummy)90*4882a593Smuzhiyun static void vss_timeout_func(struct work_struct *dummy)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun 	/*
93*4882a593Smuzhiyun 	 * Timeout waiting for userspace component to reply happened.
94*4882a593Smuzhiyun 	 */
95*4882a593Smuzhiyun 	pr_warn("VSS: timeout waiting for daemon to reply\n");
96*4882a593Smuzhiyun 	vss_respond_to_host(HV_E_FAIL);
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
vss_register_done(void)101*4882a593Smuzhiyun static void vss_register_done(void)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
104*4882a593Smuzhiyun 	pr_debug("VSS: userspace daemon registered\n");
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
vss_handle_handshake(struct hv_vss_msg * vss_msg)107*4882a593Smuzhiyun static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	u32 our_ver = VSS_OP_REGISTER1;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	switch (vss_msg->vss_hdr.operation) {
112*4882a593Smuzhiyun 	case VSS_OP_REGISTER:
113*4882a593Smuzhiyun 		/* Daemon doesn't expect us to reply */
114*4882a593Smuzhiyun 		dm_reg_value = VSS_OP_REGISTER;
115*4882a593Smuzhiyun 		break;
116*4882a593Smuzhiyun 	case VSS_OP_REGISTER1:
117*4882a593Smuzhiyun 		/* Daemon expects us to reply with our own version */
118*4882a593Smuzhiyun 		if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
119*4882a593Smuzhiyun 					  vss_register_done))
120*4882a593Smuzhiyun 			return -EFAULT;
121*4882a593Smuzhiyun 		dm_reg_value = VSS_OP_REGISTER1;
122*4882a593Smuzhiyun 		break;
123*4882a593Smuzhiyun 	default:
124*4882a593Smuzhiyun 		return -EINVAL;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 	pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
127*4882a593Smuzhiyun 	return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun 
vss_on_msg(void * msg,int len)130*4882a593Smuzhiyun static int vss_on_msg(void *msg, int len)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun 	struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (len != sizeof(*vss_msg)) {
135*4882a593Smuzhiyun 		pr_debug("VSS: Message size does not match length\n");
136*4882a593Smuzhiyun 		return -EINVAL;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
140*4882a593Smuzhiyun 	    vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
141*4882a593Smuzhiyun 		/*
142*4882a593Smuzhiyun 		 * Don't process registration messages if we're in the middle
143*4882a593Smuzhiyun 		 * of a transaction processing.
144*4882a593Smuzhiyun 		 */
145*4882a593Smuzhiyun 		if (vss_transaction.state > HVUTIL_READY) {
146*4882a593Smuzhiyun 			pr_debug("VSS: Got unexpected registration request\n");
147*4882a593Smuzhiyun 			return -EINVAL;
148*4882a593Smuzhiyun 		}
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 		return vss_handle_handshake(vss_msg);
151*4882a593Smuzhiyun 	} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
152*4882a593Smuzhiyun 		vss_transaction.state = HVUTIL_USERSPACE_RECV;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 		if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
155*4882a593Smuzhiyun 			vss_transaction.msg->vss_cf.flags =
156*4882a593Smuzhiyun 				VSS_HBU_NO_AUTO_RECOVERY;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 		if (cancel_delayed_work_sync(&vss_timeout_work)) {
159*4882a593Smuzhiyun 			vss_respond_to_host(vss_msg->error);
160*4882a593Smuzhiyun 			/* Transaction is finished, reset the state. */
161*4882a593Smuzhiyun 			hv_poll_channel(vss_transaction.recv_channel,
162*4882a593Smuzhiyun 					vss_poll_wrapper);
163*4882a593Smuzhiyun 		}
164*4882a593Smuzhiyun 	} else {
165*4882a593Smuzhiyun 		/* This is a spurious call! */
166*4882a593Smuzhiyun 		pr_debug("VSS: Transaction not active\n");
167*4882a593Smuzhiyun 		return -EINVAL;
168*4882a593Smuzhiyun 	}
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
vss_send_op(void)172*4882a593Smuzhiyun static void vss_send_op(void)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	int op = vss_transaction.msg->vss_hdr.operation;
175*4882a593Smuzhiyun 	int rc;
176*4882a593Smuzhiyun 	struct hv_vss_msg *vss_msg;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	/* The transaction state is wrong. */
179*4882a593Smuzhiyun 	if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
180*4882a593Smuzhiyun 		pr_debug("VSS: Unexpected attempt to send to daemon\n");
181*4882a593Smuzhiyun 		return;
182*4882a593Smuzhiyun 	}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
185*4882a593Smuzhiyun 	if (!vss_msg)
186*4882a593Smuzhiyun 		return;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	vss_msg->vss_hdr.operation = op;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	vss_transaction.state = HVUTIL_USERSPACE_REQ;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
193*4882a593Smuzhiyun 			VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
196*4882a593Smuzhiyun 	if (rc) {
197*4882a593Smuzhiyun 		pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
198*4882a593Smuzhiyun 		if (cancel_delayed_work_sync(&vss_timeout_work)) {
199*4882a593Smuzhiyun 			vss_respond_to_host(HV_E_FAIL);
200*4882a593Smuzhiyun 			vss_transaction.state = HVUTIL_READY;
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 	}
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	kfree(vss_msg);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
vss_handle_request(struct work_struct * dummy)207*4882a593Smuzhiyun static void vss_handle_request(struct work_struct *dummy)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	switch (vss_transaction.msg->vss_hdr.operation) {
210*4882a593Smuzhiyun 	/*
211*4882a593Smuzhiyun 	 * Initiate a "freeze/thaw" operation in the guest.
212*4882a593Smuzhiyun 	 * We respond to the host once the operation is complete.
213*4882a593Smuzhiyun 	 *
214*4882a593Smuzhiyun 	 * We send the message to the user space daemon and the operation is
215*4882a593Smuzhiyun 	 * performed in the daemon.
216*4882a593Smuzhiyun 	 */
217*4882a593Smuzhiyun 	case VSS_OP_THAW:
218*4882a593Smuzhiyun 	case VSS_OP_FREEZE:
219*4882a593Smuzhiyun 	case VSS_OP_HOT_BACKUP:
220*4882a593Smuzhiyun 		if (vss_transaction.state < HVUTIL_READY) {
221*4882a593Smuzhiyun 			/* Userspace is not registered yet */
222*4882a593Smuzhiyun 			pr_debug("VSS: Not ready for request.\n");
223*4882a593Smuzhiyun 			vss_respond_to_host(HV_E_FAIL);
224*4882a593Smuzhiyun 			return;
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		pr_debug("VSS: Received request for op code: %d\n",
228*4882a593Smuzhiyun 			vss_transaction.msg->vss_hdr.operation);
229*4882a593Smuzhiyun 		vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
230*4882a593Smuzhiyun 		vss_send_op();
231*4882a593Smuzhiyun 		return;
232*4882a593Smuzhiyun 	case VSS_OP_GET_DM_INFO:
233*4882a593Smuzhiyun 		vss_transaction.msg->dm_info.flags = 0;
234*4882a593Smuzhiyun 		break;
235*4882a593Smuzhiyun 	default:
236*4882a593Smuzhiyun 		break;
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	vss_respond_to_host(0);
240*4882a593Smuzhiyun 	hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /*
244*4882a593Smuzhiyun  * Send a response back to the host.
245*4882a593Smuzhiyun  */
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun static void
vss_respond_to_host(int error)248*4882a593Smuzhiyun vss_respond_to_host(int error)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct icmsg_hdr *icmsghdrp;
251*4882a593Smuzhiyun 	u32	buf_len;
252*4882a593Smuzhiyun 	struct vmbus_channel *channel;
253*4882a593Smuzhiyun 	u64	req_id;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/*
256*4882a593Smuzhiyun 	 * Copy the global state for completing the transaction. Note that
257*4882a593Smuzhiyun 	 * only one transaction can be active at a time.
258*4882a593Smuzhiyun 	 */
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	buf_len = vss_transaction.recv_len;
261*4882a593Smuzhiyun 	channel = vss_transaction.recv_channel;
262*4882a593Smuzhiyun 	req_id = vss_transaction.recv_req_id;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	icmsghdrp = (struct icmsg_hdr *)
265*4882a593Smuzhiyun 			&recv_buffer[sizeof(struct vmbuspipe_hdr)];
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (channel->onchannel_callback == NULL)
268*4882a593Smuzhiyun 		/*
269*4882a593Smuzhiyun 		 * We have raced with util driver being unloaded;
270*4882a593Smuzhiyun 		 * silently return.
271*4882a593Smuzhiyun 		 */
272*4882a593Smuzhiyun 		return;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	icmsghdrp->status = error;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
279*4882a593Smuzhiyun 				VM_PKT_DATA_INBAND, 0);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun /*
284*4882a593Smuzhiyun  * This callback is invoked when we get a VSS message from the host.
285*4882a593Smuzhiyun  * The host ensures that only one VSS transaction can be active at a time.
286*4882a593Smuzhiyun  */
287*4882a593Smuzhiyun 
hv_vss_onchannelcallback(void * context)288*4882a593Smuzhiyun void hv_vss_onchannelcallback(void *context)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct vmbus_channel *channel = context;
291*4882a593Smuzhiyun 	u32 recvlen;
292*4882a593Smuzhiyun 	u64 requestid;
293*4882a593Smuzhiyun 	struct hv_vss_msg *vss_msg;
294*4882a593Smuzhiyun 	int vss_srv_version;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	struct icmsg_hdr *icmsghdrp;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	if (vss_transaction.state > HVUTIL_READY)
299*4882a593Smuzhiyun 		return;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
302*4882a593Smuzhiyun 			 &requestid);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (recvlen > 0) {
305*4882a593Smuzhiyun 		icmsghdrp = (struct icmsg_hdr *)&recv_buffer[
306*4882a593Smuzhiyun 			sizeof(struct vmbuspipe_hdr)];
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 		if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
309*4882a593Smuzhiyun 			if (vmbus_prep_negotiate_resp(icmsghdrp,
310*4882a593Smuzhiyun 				 recv_buffer, fw_versions, FW_VER_COUNT,
311*4882a593Smuzhiyun 				 vss_versions, VSS_VER_COUNT,
312*4882a593Smuzhiyun 				 NULL, &vss_srv_version)) {
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 				pr_info("VSS IC version %d.%d\n",
315*4882a593Smuzhiyun 					vss_srv_version >> 16,
316*4882a593Smuzhiyun 					vss_srv_version & 0xFFFF);
317*4882a593Smuzhiyun 			}
318*4882a593Smuzhiyun 		} else {
319*4882a593Smuzhiyun 			vss_msg = (struct hv_vss_msg *)&recv_buffer[
320*4882a593Smuzhiyun 				sizeof(struct vmbuspipe_hdr) +
321*4882a593Smuzhiyun 				sizeof(struct icmsg_hdr)];
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 			/*
324*4882a593Smuzhiyun 			 * Stash away this global state for completing the
325*4882a593Smuzhiyun 			 * transaction; note transactions are serialized.
326*4882a593Smuzhiyun 			 */
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 			vss_transaction.recv_len = recvlen;
329*4882a593Smuzhiyun 			vss_transaction.recv_req_id = requestid;
330*4882a593Smuzhiyun 			vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 			schedule_work(&vss_handle_request_work);
333*4882a593Smuzhiyun 			return;
334*4882a593Smuzhiyun 		}
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 		icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION
337*4882a593Smuzhiyun 			| ICMSGHDRFLAG_RESPONSE;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 		vmbus_sendpacket(channel, recv_buffer,
340*4882a593Smuzhiyun 				       recvlen, requestid,
341*4882a593Smuzhiyun 				       VM_PKT_DATA_INBAND, 0);
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
vss_on_reset(void)346*4882a593Smuzhiyun static void vss_on_reset(void)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	if (cancel_delayed_work_sync(&vss_timeout_work))
349*4882a593Smuzhiyun 		vss_respond_to_host(HV_E_FAIL);
350*4882a593Smuzhiyun 	vss_transaction.state = HVUTIL_DEVICE_INIT;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun int
hv_vss_init(struct hv_util_service * srv)354*4882a593Smuzhiyun hv_vss_init(struct hv_util_service *srv)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun 	if (vmbus_proto_version < VERSION_WIN8_1) {
357*4882a593Smuzhiyun 		pr_warn("Integration service 'Backup (volume snapshot)'"
358*4882a593Smuzhiyun 			" not supported on this host version.\n");
359*4882a593Smuzhiyun 		return -ENOTSUPP;
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun 	recv_buffer = srv->recv_buffer;
362*4882a593Smuzhiyun 	vss_transaction.recv_channel = srv->channel;
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/*
365*4882a593Smuzhiyun 	 * When this driver loads, the user level daemon that
366*4882a593Smuzhiyun 	 * processes the host requests may not yet be running.
367*4882a593Smuzhiyun 	 * Defer processing channel callbacks until the daemon
368*4882a593Smuzhiyun 	 * has registered.
369*4882a593Smuzhiyun 	 */
370*4882a593Smuzhiyun 	vss_transaction.state = HVUTIL_DEVICE_INIT;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
373*4882a593Smuzhiyun 				    vss_on_msg, vss_on_reset);
374*4882a593Smuzhiyun 	if (!hvt) {
375*4882a593Smuzhiyun 		pr_warn("VSS: Failed to initialize transport\n");
376*4882a593Smuzhiyun 		return -EFAULT;
377*4882a593Smuzhiyun 	}
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun 
hv_vss_cancel_work(void)382*4882a593Smuzhiyun static void hv_vss_cancel_work(void)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun 	cancel_delayed_work_sync(&vss_timeout_work);
385*4882a593Smuzhiyun 	cancel_work_sync(&vss_handle_request_work);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
hv_vss_pre_suspend(void)388*4882a593Smuzhiyun int hv_vss_pre_suspend(void)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	struct vmbus_channel *channel = vss_transaction.recv_channel;
391*4882a593Smuzhiyun 	struct hv_vss_msg *vss_msg;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/*
394*4882a593Smuzhiyun 	 * Fake a THAW message for the user space daemon in case the daemon
395*4882a593Smuzhiyun 	 * has frozen the file systems. It doesn't matter if there is already
396*4882a593Smuzhiyun 	 * a message pending to be delivered to the user space since we force
397*4882a593Smuzhiyun 	 * vss_transaction.state to be HVUTIL_READY, so the user space daemon's
398*4882a593Smuzhiyun 	 * write() will fail with EINVAL (see vss_on_msg()), and the daemon
399*4882a593Smuzhiyun 	 * will reset the device by closing and re-opening it.
400*4882a593Smuzhiyun 	 */
401*4882a593Smuzhiyun 	vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
402*4882a593Smuzhiyun 	if (!vss_msg)
403*4882a593Smuzhiyun 		return -ENOMEM;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	tasklet_disable(&channel->callback_event);
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	vss_msg->vss_hdr.operation = VSS_OP_THAW;
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* Cancel any possible pending work. */
410*4882a593Smuzhiyun 	hv_vss_cancel_work();
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	/* We don't care about the return value. */
413*4882a593Smuzhiyun 	hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	kfree(vss_msg);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	vss_transaction.state = HVUTIL_READY;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	/* tasklet_enable() will be called in hv_vss_pre_resume(). */
420*4882a593Smuzhiyun 	return 0;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun 
hv_vss_pre_resume(void)423*4882a593Smuzhiyun int hv_vss_pre_resume(void)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct vmbus_channel *channel = vss_transaction.recv_channel;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	tasklet_enable(&channel->callback_event);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	return 0;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
hv_vss_deinit(void)432*4882a593Smuzhiyun void hv_vss_deinit(void)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	vss_transaction.state = HVUTIL_DEVICE_DYING;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	hv_vss_cancel_work();
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	hvutil_transport_destroy(hvt);
439*4882a593Smuzhiyun }
440