1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * An implementation of file copy service.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2014, Microsoft, Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Author : K. Y. Srinivasan <ksrinivasan@novell.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/nls.h>
13*4882a593Smuzhiyun #include <linux/workqueue.h>
14*4882a593Smuzhiyun #include <linux/hyperv.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <asm/hyperv-tlfs.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include "hyperv_vmbus.h"
19*4882a593Smuzhiyun #include "hv_utils_transport.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun #define WIN8_SRV_MAJOR 1
22*4882a593Smuzhiyun #define WIN8_SRV_MINOR 1
23*4882a593Smuzhiyun #define WIN8_SRV_VERSION (WIN8_SRV_MAJOR << 16 | WIN8_SRV_MINOR)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define FCOPY_VER_COUNT 1
26*4882a593Smuzhiyun static const int fcopy_versions[] = {
27*4882a593Smuzhiyun WIN8_SRV_VERSION
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #define FW_VER_COUNT 1
31*4882a593Smuzhiyun static const int fw_versions[] = {
32*4882a593Smuzhiyun UTIL_FW_VERSION
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * Global state maintained for transaction that is being processed.
37*4882a593Smuzhiyun * For a class of integration services, including the "file copy service",
38*4882a593Smuzhiyun * the specified protocol is a "request/response" protocol which means that
39*4882a593Smuzhiyun * there can only be single outstanding transaction from the host at any
40*4882a593Smuzhiyun * given point in time. We use this to simplify memory management in this
41*4882a593Smuzhiyun * driver - we cache and process only one message at a time.
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * While the request/response protocol is guaranteed by the host, we further
44*4882a593Smuzhiyun * ensure this by serializing packet processing in this driver - we do not
45*4882a593Smuzhiyun * read additional packets from the VMBUs until the current packet is fully
46*4882a593Smuzhiyun * handled.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static struct {
50*4882a593Smuzhiyun int state; /* hvutil_device_state */
51*4882a593Smuzhiyun int recv_len; /* number of bytes received. */
52*4882a593Smuzhiyun struct hv_fcopy_hdr *fcopy_msg; /* current message */
53*4882a593Smuzhiyun struct vmbus_channel *recv_channel; /* chn we got the request */
54*4882a593Smuzhiyun u64 recv_req_id; /* request ID. */
55*4882a593Smuzhiyun } fcopy_transaction;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static void fcopy_respond_to_host(int error);
58*4882a593Smuzhiyun static void fcopy_send_data(struct work_struct *dummy);
59*4882a593Smuzhiyun static void fcopy_timeout_func(struct work_struct *dummy);
60*4882a593Smuzhiyun static DECLARE_DELAYED_WORK(fcopy_timeout_work, fcopy_timeout_func);
61*4882a593Smuzhiyun static DECLARE_WORK(fcopy_send_work, fcopy_send_data);
62*4882a593Smuzhiyun static const char fcopy_devname[] = "vmbus/hv_fcopy";
63*4882a593Smuzhiyun static u8 *recv_buffer;
64*4882a593Smuzhiyun static struct hvutil_transport *hvt;
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun * This state maintains the version number registered by the daemon.
67*4882a593Smuzhiyun */
68*4882a593Smuzhiyun static int dm_reg_value;
69*4882a593Smuzhiyun
fcopy_poll_wrapper(void * channel)70*4882a593Smuzhiyun static void fcopy_poll_wrapper(void *channel)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun /* Transaction is finished, reset the state here to avoid races. */
73*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_READY;
74*4882a593Smuzhiyun tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
fcopy_timeout_func(struct work_struct * dummy)77*4882a593Smuzhiyun static void fcopy_timeout_func(struct work_struct *dummy)
78*4882a593Smuzhiyun {
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun * If the timer fires, the user-mode component has not responded;
81*4882a593Smuzhiyun * process the pending transaction.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun fcopy_respond_to_host(HV_E_FAIL);
84*4882a593Smuzhiyun hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
fcopy_register_done(void)87*4882a593Smuzhiyun static void fcopy_register_done(void)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun pr_debug("FCP: userspace daemon registered\n");
90*4882a593Smuzhiyun hv_poll_channel(fcopy_transaction.recv_channel, fcopy_poll_wrapper);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
fcopy_handle_handshake(u32 version)93*4882a593Smuzhiyun static int fcopy_handle_handshake(u32 version)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun u32 our_ver = FCOPY_CURRENT_VERSION;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun switch (version) {
98*4882a593Smuzhiyun case FCOPY_VERSION_0:
99*4882a593Smuzhiyun /* Daemon doesn't expect us to reply */
100*4882a593Smuzhiyun dm_reg_value = version;
101*4882a593Smuzhiyun break;
102*4882a593Smuzhiyun case FCOPY_VERSION_1:
103*4882a593Smuzhiyun /* Daemon expects us to reply with our own version */
104*4882a593Smuzhiyun if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
105*4882a593Smuzhiyun fcopy_register_done))
106*4882a593Smuzhiyun return -EFAULT;
107*4882a593Smuzhiyun dm_reg_value = version;
108*4882a593Smuzhiyun break;
109*4882a593Smuzhiyun default:
110*4882a593Smuzhiyun /*
111*4882a593Smuzhiyun * For now we will fail the registration.
112*4882a593Smuzhiyun * If and when we have multiple versions to
113*4882a593Smuzhiyun * deal with, we will be backward compatible.
114*4882a593Smuzhiyun * We will add this code when needed.
115*4882a593Smuzhiyun */
116*4882a593Smuzhiyun return -EINVAL;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun pr_debug("FCP: userspace daemon ver. %d connected\n", version);
119*4882a593Smuzhiyun return 0;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
fcopy_send_data(struct work_struct * dummy)122*4882a593Smuzhiyun static void fcopy_send_data(struct work_struct *dummy)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun struct hv_start_fcopy *smsg_out = NULL;
125*4882a593Smuzhiyun int operation = fcopy_transaction.fcopy_msg->operation;
126*4882a593Smuzhiyun struct hv_start_fcopy *smsg_in;
127*4882a593Smuzhiyun void *out_src;
128*4882a593Smuzhiyun int rc, out_len;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /*
131*4882a593Smuzhiyun * The strings sent from the host are encoded in
132*4882a593Smuzhiyun * in utf16; convert it to utf8 strings.
133*4882a593Smuzhiyun * The host assures us that the utf16 strings will not exceed
134*4882a593Smuzhiyun * the max lengths specified. We will however, reserve room
135*4882a593Smuzhiyun * for the string terminating character - in the utf16s_utf8s()
136*4882a593Smuzhiyun * function we limit the size of the buffer where the converted
137*4882a593Smuzhiyun * string is placed to W_MAX_PATH -1 to guarantee
138*4882a593Smuzhiyun * that the strings can be properly terminated!
139*4882a593Smuzhiyun */
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun switch (operation) {
142*4882a593Smuzhiyun case START_FILE_COPY:
143*4882a593Smuzhiyun out_len = sizeof(struct hv_start_fcopy);
144*4882a593Smuzhiyun smsg_out = kzalloc(sizeof(*smsg_out), GFP_KERNEL);
145*4882a593Smuzhiyun if (!smsg_out)
146*4882a593Smuzhiyun return;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun smsg_out->hdr.operation = operation;
149*4882a593Smuzhiyun smsg_in = (struct hv_start_fcopy *)fcopy_transaction.fcopy_msg;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun utf16s_to_utf8s((wchar_t *)smsg_in->file_name, W_MAX_PATH,
152*4882a593Smuzhiyun UTF16_LITTLE_ENDIAN,
153*4882a593Smuzhiyun (__u8 *)&smsg_out->file_name, W_MAX_PATH - 1);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun utf16s_to_utf8s((wchar_t *)smsg_in->path_name, W_MAX_PATH,
156*4882a593Smuzhiyun UTF16_LITTLE_ENDIAN,
157*4882a593Smuzhiyun (__u8 *)&smsg_out->path_name, W_MAX_PATH - 1);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun smsg_out->copy_flags = smsg_in->copy_flags;
160*4882a593Smuzhiyun smsg_out->file_size = smsg_in->file_size;
161*4882a593Smuzhiyun out_src = smsg_out;
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun case WRITE_TO_FILE:
165*4882a593Smuzhiyun out_src = fcopy_transaction.fcopy_msg;
166*4882a593Smuzhiyun out_len = sizeof(struct hv_do_fcopy);
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun default:
169*4882a593Smuzhiyun out_src = fcopy_transaction.fcopy_msg;
170*4882a593Smuzhiyun out_len = fcopy_transaction.recv_len;
171*4882a593Smuzhiyun break;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_USERSPACE_REQ;
175*4882a593Smuzhiyun rc = hvutil_transport_send(hvt, out_src, out_len, NULL);
176*4882a593Smuzhiyun if (rc) {
177*4882a593Smuzhiyun pr_debug("FCP: failed to communicate to the daemon: %d\n", rc);
178*4882a593Smuzhiyun if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
179*4882a593Smuzhiyun fcopy_respond_to_host(HV_E_FAIL);
180*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_READY;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun kfree(smsg_out);
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /*
187*4882a593Smuzhiyun * Send a response back to the host.
188*4882a593Smuzhiyun */
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun static void
fcopy_respond_to_host(int error)191*4882a593Smuzhiyun fcopy_respond_to_host(int error)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct icmsg_hdr *icmsghdr;
194*4882a593Smuzhiyun u32 buf_len;
195*4882a593Smuzhiyun struct vmbus_channel *channel;
196*4882a593Smuzhiyun u64 req_id;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun * Copy the global state for completing the transaction. Note that
200*4882a593Smuzhiyun * only one transaction can be active at a time. This is guaranteed
201*4882a593Smuzhiyun * by the file copy protocol implemented by the host. Furthermore,
202*4882a593Smuzhiyun * the "transaction active" state we maintain ensures that there can
203*4882a593Smuzhiyun * only be one active transaction at a time.
204*4882a593Smuzhiyun */
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun buf_len = fcopy_transaction.recv_len;
207*4882a593Smuzhiyun channel = fcopy_transaction.recv_channel;
208*4882a593Smuzhiyun req_id = fcopy_transaction.recv_req_id;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun icmsghdr = (struct icmsg_hdr *)
211*4882a593Smuzhiyun &recv_buffer[sizeof(struct vmbuspipe_hdr)];
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (channel->onchannel_callback == NULL)
214*4882a593Smuzhiyun /*
215*4882a593Smuzhiyun * We have raced with util driver being unloaded;
216*4882a593Smuzhiyun * silently return.
217*4882a593Smuzhiyun */
218*4882a593Smuzhiyun return;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun icmsghdr->status = error;
221*4882a593Smuzhiyun icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
222*4882a593Smuzhiyun vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
223*4882a593Smuzhiyun VM_PKT_DATA_INBAND, 0);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
hv_fcopy_onchannelcallback(void * context)226*4882a593Smuzhiyun void hv_fcopy_onchannelcallback(void *context)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun struct vmbus_channel *channel = context;
229*4882a593Smuzhiyun u32 recvlen;
230*4882a593Smuzhiyun u64 requestid;
231*4882a593Smuzhiyun struct hv_fcopy_hdr *fcopy_msg;
232*4882a593Smuzhiyun struct icmsg_hdr *icmsghdr;
233*4882a593Smuzhiyun int fcopy_srv_version;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (fcopy_transaction.state > HVUTIL_READY)
236*4882a593Smuzhiyun return;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen,
239*4882a593Smuzhiyun &requestid);
240*4882a593Smuzhiyun if (recvlen <= 0)
241*4882a593Smuzhiyun return;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun icmsghdr = (struct icmsg_hdr *)&recv_buffer[
244*4882a593Smuzhiyun sizeof(struct vmbuspipe_hdr)];
245*4882a593Smuzhiyun if (icmsghdr->icmsgtype == ICMSGTYPE_NEGOTIATE) {
246*4882a593Smuzhiyun if (vmbus_prep_negotiate_resp(icmsghdr, recv_buffer,
247*4882a593Smuzhiyun fw_versions, FW_VER_COUNT,
248*4882a593Smuzhiyun fcopy_versions, FCOPY_VER_COUNT,
249*4882a593Smuzhiyun NULL, &fcopy_srv_version)) {
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun pr_info("FCopy IC version %d.%d\n",
252*4882a593Smuzhiyun fcopy_srv_version >> 16,
253*4882a593Smuzhiyun fcopy_srv_version & 0xFFFF);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun } else {
256*4882a593Smuzhiyun fcopy_msg = (struct hv_fcopy_hdr *)&recv_buffer[
257*4882a593Smuzhiyun sizeof(struct vmbuspipe_hdr) +
258*4882a593Smuzhiyun sizeof(struct icmsg_hdr)];
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /*
261*4882a593Smuzhiyun * Stash away this global state for completing the
262*4882a593Smuzhiyun * transaction; note transactions are serialized.
263*4882a593Smuzhiyun */
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun fcopy_transaction.recv_len = recvlen;
266*4882a593Smuzhiyun fcopy_transaction.recv_req_id = requestid;
267*4882a593Smuzhiyun fcopy_transaction.fcopy_msg = fcopy_msg;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (fcopy_transaction.state < HVUTIL_READY) {
270*4882a593Smuzhiyun /* Userspace is not registered yet */
271*4882a593Smuzhiyun fcopy_respond_to_host(HV_E_FAIL);
272*4882a593Smuzhiyun return;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /*
277*4882a593Smuzhiyun * Send the information to the user-level daemon.
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun schedule_work(&fcopy_send_work);
280*4882a593Smuzhiyun schedule_delayed_work(&fcopy_timeout_work,
281*4882a593Smuzhiyun HV_UTIL_TIMEOUT * HZ);
282*4882a593Smuzhiyun return;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
285*4882a593Smuzhiyun vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
286*4882a593Smuzhiyun VM_PKT_DATA_INBAND, 0);
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* Callback when data is received from userspace */
fcopy_on_msg(void * msg,int len)290*4882a593Smuzhiyun static int fcopy_on_msg(void *msg, int len)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun int *val = (int *)msg;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (len != sizeof(int))
295*4882a593Smuzhiyun return -EINVAL;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun if (fcopy_transaction.state == HVUTIL_DEVICE_INIT)
298*4882a593Smuzhiyun return fcopy_handle_handshake(*val);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (fcopy_transaction.state != HVUTIL_USERSPACE_REQ)
301*4882a593Smuzhiyun return -EINVAL;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun /*
304*4882a593Smuzhiyun * Complete the transaction by forwarding the result
305*4882a593Smuzhiyun * to the host. But first, cancel the timeout.
306*4882a593Smuzhiyun */
307*4882a593Smuzhiyun if (cancel_delayed_work_sync(&fcopy_timeout_work)) {
308*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_USERSPACE_RECV;
309*4882a593Smuzhiyun fcopy_respond_to_host(*val);
310*4882a593Smuzhiyun hv_poll_channel(fcopy_transaction.recv_channel,
311*4882a593Smuzhiyun fcopy_poll_wrapper);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
fcopy_on_reset(void)317*4882a593Smuzhiyun static void fcopy_on_reset(void)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun * The daemon has exited; reset the state.
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_DEVICE_INIT;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (cancel_delayed_work_sync(&fcopy_timeout_work))
325*4882a593Smuzhiyun fcopy_respond_to_host(HV_E_FAIL);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
hv_fcopy_init(struct hv_util_service * srv)328*4882a593Smuzhiyun int hv_fcopy_init(struct hv_util_service *srv)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun recv_buffer = srv->recv_buffer;
331*4882a593Smuzhiyun fcopy_transaction.recv_channel = srv->channel;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun /*
334*4882a593Smuzhiyun * When this driver loads, the user level daemon that
335*4882a593Smuzhiyun * processes the host requests may not yet be running.
336*4882a593Smuzhiyun * Defer processing channel callbacks until the daemon
337*4882a593Smuzhiyun * has registered.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_DEVICE_INIT;
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun hvt = hvutil_transport_init(fcopy_devname, 0, 0,
342*4882a593Smuzhiyun fcopy_on_msg, fcopy_on_reset);
343*4882a593Smuzhiyun if (!hvt)
344*4882a593Smuzhiyun return -EFAULT;
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
hv_fcopy_cancel_work(void)349*4882a593Smuzhiyun static void hv_fcopy_cancel_work(void)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun cancel_delayed_work_sync(&fcopy_timeout_work);
352*4882a593Smuzhiyun cancel_work_sync(&fcopy_send_work);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
hv_fcopy_pre_suspend(void)355*4882a593Smuzhiyun int hv_fcopy_pre_suspend(void)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct vmbus_channel *channel = fcopy_transaction.recv_channel;
358*4882a593Smuzhiyun struct hv_fcopy_hdr *fcopy_msg;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /*
361*4882a593Smuzhiyun * Fake a CANCEL_FCOPY message for the user space daemon in case the
362*4882a593Smuzhiyun * daemon is in the middle of copying some file. It doesn't matter if
363*4882a593Smuzhiyun * there is already a message pending to be delivered to the user
364*4882a593Smuzhiyun * space since we force fcopy_transaction.state to be HVUTIL_READY, so
365*4882a593Smuzhiyun * the user space daemon's write() will fail with EINVAL (see
366*4882a593Smuzhiyun * fcopy_on_msg()), and the daemon will reset the device by closing
367*4882a593Smuzhiyun * and re-opening it.
368*4882a593Smuzhiyun */
369*4882a593Smuzhiyun fcopy_msg = kzalloc(sizeof(*fcopy_msg), GFP_KERNEL);
370*4882a593Smuzhiyun if (!fcopy_msg)
371*4882a593Smuzhiyun return -ENOMEM;
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun tasklet_disable(&channel->callback_event);
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun fcopy_msg->operation = CANCEL_FCOPY;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun hv_fcopy_cancel_work();
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* We don't care about the return value. */
380*4882a593Smuzhiyun hvutil_transport_send(hvt, fcopy_msg, sizeof(*fcopy_msg), NULL);
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun kfree(fcopy_msg);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_READY;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun /* tasklet_enable() will be called in hv_fcopy_pre_resume(). */
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
hv_fcopy_pre_resume(void)390*4882a593Smuzhiyun int hv_fcopy_pre_resume(void)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun struct vmbus_channel *channel = fcopy_transaction.recv_channel;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun tasklet_enable(&channel->callback_event);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun return 0;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
hv_fcopy_deinit(void)399*4882a593Smuzhiyun void hv_fcopy_deinit(void)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun fcopy_transaction.state = HVUTIL_DEVICE_DYING;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun hv_fcopy_cancel_work();
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun hvutil_transport_destroy(hvt);
406*4882a593Smuzhiyun }
407