1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2009, Microsoft Corporation.
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors:
6*4882a593Smuzhiyun * Haiyang Zhang <haiyangz@microsoft.com>
7*4882a593Smuzhiyun * Hank Janssen <hjanssen@microsoft.com>
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/sched.h>
14*4882a593Smuzhiyun #include <linux/wait.h>
15*4882a593Smuzhiyun #include <linux/mm.h>
16*4882a593Smuzhiyun #include <linux/slab.h>
17*4882a593Smuzhiyun #include <linux/list.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/completion.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/cpu.h>
22*4882a593Smuzhiyun #include <linux/hyperv.h>
23*4882a593Smuzhiyun #include <asm/mshyperv.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "hyperv_vmbus.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static void init_vp_index(struct vmbus_channel *channel);
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun const struct vmbus_device vmbus_devs[] = {
30*4882a593Smuzhiyun /* IDE */
31*4882a593Smuzhiyun { .dev_type = HV_IDE,
32*4882a593Smuzhiyun HV_IDE_GUID,
33*4882a593Smuzhiyun .perf_device = true,
34*4882a593Smuzhiyun },
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* SCSI */
37*4882a593Smuzhiyun { .dev_type = HV_SCSI,
38*4882a593Smuzhiyun HV_SCSI_GUID,
39*4882a593Smuzhiyun .perf_device = true,
40*4882a593Smuzhiyun },
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Fibre Channel */
43*4882a593Smuzhiyun { .dev_type = HV_FC,
44*4882a593Smuzhiyun HV_SYNTHFC_GUID,
45*4882a593Smuzhiyun .perf_device = true,
46*4882a593Smuzhiyun },
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Synthetic NIC */
49*4882a593Smuzhiyun { .dev_type = HV_NIC,
50*4882a593Smuzhiyun HV_NIC_GUID,
51*4882a593Smuzhiyun .perf_device = true,
52*4882a593Smuzhiyun },
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Network Direct */
55*4882a593Smuzhiyun { .dev_type = HV_ND,
56*4882a593Smuzhiyun HV_ND_GUID,
57*4882a593Smuzhiyun .perf_device = true,
58*4882a593Smuzhiyun },
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun /* PCIE */
61*4882a593Smuzhiyun { .dev_type = HV_PCIE,
62*4882a593Smuzhiyun HV_PCIE_GUID,
63*4882a593Smuzhiyun .perf_device = false,
64*4882a593Smuzhiyun },
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* Synthetic Frame Buffer */
67*4882a593Smuzhiyun { .dev_type = HV_FB,
68*4882a593Smuzhiyun HV_SYNTHVID_GUID,
69*4882a593Smuzhiyun .perf_device = false,
70*4882a593Smuzhiyun },
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Synthetic Keyboard */
73*4882a593Smuzhiyun { .dev_type = HV_KBD,
74*4882a593Smuzhiyun HV_KBD_GUID,
75*4882a593Smuzhiyun .perf_device = false,
76*4882a593Smuzhiyun },
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Synthetic MOUSE */
79*4882a593Smuzhiyun { .dev_type = HV_MOUSE,
80*4882a593Smuzhiyun HV_MOUSE_GUID,
81*4882a593Smuzhiyun .perf_device = false,
82*4882a593Smuzhiyun },
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* KVP */
85*4882a593Smuzhiyun { .dev_type = HV_KVP,
86*4882a593Smuzhiyun HV_KVP_GUID,
87*4882a593Smuzhiyun .perf_device = false,
88*4882a593Smuzhiyun },
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /* Time Synch */
91*4882a593Smuzhiyun { .dev_type = HV_TS,
92*4882a593Smuzhiyun HV_TS_GUID,
93*4882a593Smuzhiyun .perf_device = false,
94*4882a593Smuzhiyun },
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Heartbeat */
97*4882a593Smuzhiyun { .dev_type = HV_HB,
98*4882a593Smuzhiyun HV_HEART_BEAT_GUID,
99*4882a593Smuzhiyun .perf_device = false,
100*4882a593Smuzhiyun },
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* Shutdown */
103*4882a593Smuzhiyun { .dev_type = HV_SHUTDOWN,
104*4882a593Smuzhiyun HV_SHUTDOWN_GUID,
105*4882a593Smuzhiyun .perf_device = false,
106*4882a593Smuzhiyun },
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* File copy */
109*4882a593Smuzhiyun { .dev_type = HV_FCOPY,
110*4882a593Smuzhiyun HV_FCOPY_GUID,
111*4882a593Smuzhiyun .perf_device = false,
112*4882a593Smuzhiyun },
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* Backup */
115*4882a593Smuzhiyun { .dev_type = HV_BACKUP,
116*4882a593Smuzhiyun HV_VSS_GUID,
117*4882a593Smuzhiyun .perf_device = false,
118*4882a593Smuzhiyun },
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* Dynamic Memory */
121*4882a593Smuzhiyun { .dev_type = HV_DM,
122*4882a593Smuzhiyun HV_DM_GUID,
123*4882a593Smuzhiyun .perf_device = false,
124*4882a593Smuzhiyun },
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Unknown GUID */
127*4882a593Smuzhiyun { .dev_type = HV_UNKNOWN,
128*4882a593Smuzhiyun .perf_device = false,
129*4882a593Smuzhiyun },
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun static const struct {
133*4882a593Smuzhiyun guid_t guid;
134*4882a593Smuzhiyun } vmbus_unsupported_devs[] = {
135*4882a593Smuzhiyun { HV_AVMA1_GUID },
136*4882a593Smuzhiyun { HV_AVMA2_GUID },
137*4882a593Smuzhiyun { HV_RDV_GUID },
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /*
141*4882a593Smuzhiyun * The rescinded channel may be blocked waiting for a response from the host;
142*4882a593Smuzhiyun * take care of that.
143*4882a593Smuzhiyun */
vmbus_rescind_cleanup(struct vmbus_channel * channel)144*4882a593Smuzhiyun static void vmbus_rescind_cleanup(struct vmbus_channel *channel)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun struct vmbus_channel_msginfo *msginfo;
147*4882a593Smuzhiyun unsigned long flags;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
151*4882a593Smuzhiyun channel->rescind = true;
152*4882a593Smuzhiyun list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
153*4882a593Smuzhiyun msglistentry) {
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun if (msginfo->waiting_channel == channel) {
156*4882a593Smuzhiyun complete(&msginfo->waitevent);
157*4882a593Smuzhiyun break;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
is_unsupported_vmbus_devs(const guid_t * guid)163*4882a593Smuzhiyun static bool is_unsupported_vmbus_devs(const guid_t *guid)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun int i;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++)
168*4882a593Smuzhiyun if (guid_equal(guid, &vmbus_unsupported_devs[i].guid))
169*4882a593Smuzhiyun return true;
170*4882a593Smuzhiyun return false;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
hv_get_dev_type(const struct vmbus_channel * channel)173*4882a593Smuzhiyun static u16 hv_get_dev_type(const struct vmbus_channel *channel)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun const guid_t *guid = &channel->offermsg.offer.if_type;
176*4882a593Smuzhiyun u16 i;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid))
179*4882a593Smuzhiyun return HV_UNKNOWN;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun for (i = HV_IDE; i < HV_UNKNOWN; i++) {
182*4882a593Smuzhiyun if (guid_equal(guid, &vmbus_devs[i].guid))
183*4882a593Smuzhiyun return i;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun pr_info("Unknown GUID: %pUl\n", guid);
186*4882a593Smuzhiyun return i;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun /**
190*4882a593Smuzhiyun * vmbus_prep_negotiate_resp() - Create default response for Negotiate message
191*4882a593Smuzhiyun * @icmsghdrp: Pointer to msg header structure
192*4882a593Smuzhiyun * @buf: Raw buffer channel data
193*4882a593Smuzhiyun * @fw_version: The framework versions we can support.
194*4882a593Smuzhiyun * @fw_vercnt: The size of @fw_version.
195*4882a593Smuzhiyun * @srv_version: The service versions we can support.
196*4882a593Smuzhiyun * @srv_vercnt: The size of @srv_version.
197*4882a593Smuzhiyun * @nego_fw_version: The selected framework version.
198*4882a593Smuzhiyun * @nego_srv_version: The selected service version.
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * Note: Versions are given in decreasing order.
201*4882a593Smuzhiyun *
202*4882a593Smuzhiyun * Set up and fill in default negotiate response message.
203*4882a593Smuzhiyun * Mainly used by Hyper-V drivers.
204*4882a593Smuzhiyun */
vmbus_prep_negotiate_resp(struct icmsg_hdr * icmsghdrp,u8 * buf,const int * fw_version,int fw_vercnt,const int * srv_version,int srv_vercnt,int * nego_fw_version,int * nego_srv_version)205*4882a593Smuzhiyun bool vmbus_prep_negotiate_resp(struct icmsg_hdr *icmsghdrp,
206*4882a593Smuzhiyun u8 *buf, const int *fw_version, int fw_vercnt,
207*4882a593Smuzhiyun const int *srv_version, int srv_vercnt,
208*4882a593Smuzhiyun int *nego_fw_version, int *nego_srv_version)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun int icframe_major, icframe_minor;
211*4882a593Smuzhiyun int icmsg_major, icmsg_minor;
212*4882a593Smuzhiyun int fw_major, fw_minor;
213*4882a593Smuzhiyun int srv_major, srv_minor;
214*4882a593Smuzhiyun int i, j;
215*4882a593Smuzhiyun bool found_match = false;
216*4882a593Smuzhiyun struct icmsg_negotiate *negop;
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun icmsghdrp->icmsgsize = 0x10;
219*4882a593Smuzhiyun negop = (struct icmsg_negotiate *)&buf[
220*4882a593Smuzhiyun sizeof(struct vmbuspipe_hdr) +
221*4882a593Smuzhiyun sizeof(struct icmsg_hdr)];
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun icframe_major = negop->icframe_vercnt;
224*4882a593Smuzhiyun icframe_minor = 0;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun icmsg_major = negop->icmsg_vercnt;
227*4882a593Smuzhiyun icmsg_minor = 0;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * Select the framework version number we will
231*4882a593Smuzhiyun * support.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun for (i = 0; i < fw_vercnt; i++) {
235*4882a593Smuzhiyun fw_major = (fw_version[i] >> 16);
236*4882a593Smuzhiyun fw_minor = (fw_version[i] & 0xFFFF);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun for (j = 0; j < negop->icframe_vercnt; j++) {
239*4882a593Smuzhiyun if ((negop->icversion_data[j].major == fw_major) &&
240*4882a593Smuzhiyun (negop->icversion_data[j].minor == fw_minor)) {
241*4882a593Smuzhiyun icframe_major = negop->icversion_data[j].major;
242*4882a593Smuzhiyun icframe_minor = negop->icversion_data[j].minor;
243*4882a593Smuzhiyun found_match = true;
244*4882a593Smuzhiyun break;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (found_match)
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (!found_match)
253*4882a593Smuzhiyun goto fw_error;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun found_match = false;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun for (i = 0; i < srv_vercnt; i++) {
258*4882a593Smuzhiyun srv_major = (srv_version[i] >> 16);
259*4882a593Smuzhiyun srv_minor = (srv_version[i] & 0xFFFF);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun for (j = negop->icframe_vercnt;
262*4882a593Smuzhiyun (j < negop->icframe_vercnt + negop->icmsg_vercnt);
263*4882a593Smuzhiyun j++) {
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun if ((negop->icversion_data[j].major == srv_major) &&
266*4882a593Smuzhiyun (negop->icversion_data[j].minor == srv_minor)) {
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun icmsg_major = negop->icversion_data[j].major;
269*4882a593Smuzhiyun icmsg_minor = negop->icversion_data[j].minor;
270*4882a593Smuzhiyun found_match = true;
271*4882a593Smuzhiyun break;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (found_match)
276*4882a593Smuzhiyun break;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun * Respond with the framework and service
281*4882a593Smuzhiyun * version numbers we can support.
282*4882a593Smuzhiyun */
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun fw_error:
285*4882a593Smuzhiyun if (!found_match) {
286*4882a593Smuzhiyun negop->icframe_vercnt = 0;
287*4882a593Smuzhiyun negop->icmsg_vercnt = 0;
288*4882a593Smuzhiyun } else {
289*4882a593Smuzhiyun negop->icframe_vercnt = 1;
290*4882a593Smuzhiyun negop->icmsg_vercnt = 1;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (nego_fw_version)
294*4882a593Smuzhiyun *nego_fw_version = (icframe_major << 16) | icframe_minor;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (nego_srv_version)
297*4882a593Smuzhiyun *nego_srv_version = (icmsg_major << 16) | icmsg_minor;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun negop->icversion_data[0].major = icframe_major;
300*4882a593Smuzhiyun negop->icversion_data[0].minor = icframe_minor;
301*4882a593Smuzhiyun negop->icversion_data[1].major = icmsg_major;
302*4882a593Smuzhiyun negop->icversion_data[1].minor = icmsg_minor;
303*4882a593Smuzhiyun return found_match;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmbus_prep_negotiate_resp);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun * alloc_channel - Allocate and initialize a vmbus channel object
310*4882a593Smuzhiyun */
alloc_channel(void)311*4882a593Smuzhiyun static struct vmbus_channel *alloc_channel(void)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct vmbus_channel *channel;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun channel = kzalloc(sizeof(*channel), GFP_ATOMIC);
316*4882a593Smuzhiyun if (!channel)
317*4882a593Smuzhiyun return NULL;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun spin_lock_init(&channel->sched_lock);
320*4882a593Smuzhiyun init_completion(&channel->rescind_event);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun INIT_LIST_HEAD(&channel->sc_list);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun tasklet_init(&channel->callback_event,
325*4882a593Smuzhiyun vmbus_on_event, (unsigned long)channel);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun hv_ringbuffer_pre_init(channel);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun return channel;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /*
333*4882a593Smuzhiyun * free_channel - Release the resources used by the vmbus channel object
334*4882a593Smuzhiyun */
free_channel(struct vmbus_channel * channel)335*4882a593Smuzhiyun static void free_channel(struct vmbus_channel *channel)
336*4882a593Smuzhiyun {
337*4882a593Smuzhiyun tasklet_kill(&channel->callback_event);
338*4882a593Smuzhiyun vmbus_remove_channel_attr_group(channel);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun kobject_put(&channel->kobj);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
vmbus_channel_map_relid(struct vmbus_channel * channel)343*4882a593Smuzhiyun void vmbus_channel_map_relid(struct vmbus_channel *channel)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
346*4882a593Smuzhiyun return;
347*4882a593Smuzhiyun /*
348*4882a593Smuzhiyun * The mapping of the channel's relid is visible from the CPUs that
349*4882a593Smuzhiyun * execute vmbus_chan_sched() by the time that vmbus_chan_sched() will
350*4882a593Smuzhiyun * execute:
351*4882a593Smuzhiyun *
352*4882a593Smuzhiyun * (a) In the "normal (i.e., not resuming from hibernation)" path,
353*4882a593Smuzhiyun * the full barrier in virt_store_mb() guarantees that the store
354*4882a593Smuzhiyun * is propagated to all CPUs before the add_channel_work work
355*4882a593Smuzhiyun * is queued. In turn, add_channel_work is queued before the
356*4882a593Smuzhiyun * channel's ring buffer is allocated/initialized and the
357*4882a593Smuzhiyun * OPENCHANNEL message for the channel is sent in vmbus_open().
358*4882a593Smuzhiyun * Hyper-V won't start sending the interrupts for the channel
359*4882a593Smuzhiyun * before the OPENCHANNEL message is acked. The memory barrier
360*4882a593Smuzhiyun * in vmbus_chan_sched() -> sync_test_and_clear_bit() ensures
361*4882a593Smuzhiyun * that vmbus_chan_sched() must find the channel's relid in
362*4882a593Smuzhiyun * recv_int_page before retrieving the channel pointer from the
363*4882a593Smuzhiyun * array of channels.
364*4882a593Smuzhiyun *
365*4882a593Smuzhiyun * (b) In the "resuming from hibernation" path, the virt_store_mb()
366*4882a593Smuzhiyun * guarantees that the store is propagated to all CPUs before
367*4882a593Smuzhiyun * the VMBus connection is marked as ready for the resume event
368*4882a593Smuzhiyun * (cf. check_ready_for_resume_event()). The interrupt handler
369*4882a593Smuzhiyun * of the VMBus driver and vmbus_chan_sched() can not run before
370*4882a593Smuzhiyun * vmbus_bus_resume() has completed execution (cf. resume_noirq).
371*4882a593Smuzhiyun */
372*4882a593Smuzhiyun virt_store_mb(
373*4882a593Smuzhiyun vmbus_connection.channels[channel->offermsg.child_relid],
374*4882a593Smuzhiyun channel);
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
vmbus_channel_unmap_relid(struct vmbus_channel * channel)377*4882a593Smuzhiyun void vmbus_channel_unmap_relid(struct vmbus_channel *channel)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun if (WARN_ON(channel->offermsg.child_relid >= MAX_CHANNEL_RELIDS))
380*4882a593Smuzhiyun return;
381*4882a593Smuzhiyun WRITE_ONCE(
382*4882a593Smuzhiyun vmbus_connection.channels[channel->offermsg.child_relid],
383*4882a593Smuzhiyun NULL);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
vmbus_release_relid(u32 relid)386*4882a593Smuzhiyun static void vmbus_release_relid(u32 relid)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun struct vmbus_channel_relid_released msg;
389*4882a593Smuzhiyun int ret;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
392*4882a593Smuzhiyun msg.child_relid = relid;
393*4882a593Smuzhiyun msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
394*4882a593Smuzhiyun ret = vmbus_post_msg(&msg, sizeof(struct vmbus_channel_relid_released),
395*4882a593Smuzhiyun true);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun trace_vmbus_release_relid(&msg, ret);
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
hv_process_channel_removal(struct vmbus_channel * channel)400*4882a593Smuzhiyun void hv_process_channel_removal(struct vmbus_channel *channel)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun lockdep_assert_held(&vmbus_connection.channel_mutex);
403*4882a593Smuzhiyun BUG_ON(!channel->rescind);
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun /*
406*4882a593Smuzhiyun * hv_process_channel_removal() could find INVALID_RELID only for
407*4882a593Smuzhiyun * hv_sock channels. See the inline comments in vmbus_onoffer().
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun WARN_ON(channel->offermsg.child_relid == INVALID_RELID &&
410*4882a593Smuzhiyun !is_hvsock_channel(channel));
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /*
413*4882a593Smuzhiyun * Upon suspend, an in-use hv_sock channel is removed from the array of
414*4882a593Smuzhiyun * channels and the relid is invalidated. After hibernation, when the
415*4882a593Smuzhiyun * user-space appplication destroys the channel, it's unnecessary and
416*4882a593Smuzhiyun * unsafe to remove the channel from the array of channels. See also
417*4882a593Smuzhiyun * the inline comments before the call of vmbus_release_relid() below.
418*4882a593Smuzhiyun */
419*4882a593Smuzhiyun if (channel->offermsg.child_relid != INVALID_RELID)
420*4882a593Smuzhiyun vmbus_channel_unmap_relid(channel);
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (channel->primary_channel == NULL)
423*4882a593Smuzhiyun list_del(&channel->listentry);
424*4882a593Smuzhiyun else
425*4882a593Smuzhiyun list_del(&channel->sc_list);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun /*
428*4882a593Smuzhiyun * If this is a "perf" channel, updates the hv_numa_map[] masks so that
429*4882a593Smuzhiyun * init_vp_index() can (re-)use the CPU.
430*4882a593Smuzhiyun */
431*4882a593Smuzhiyun if (hv_is_perf_channel(channel))
432*4882a593Smuzhiyun hv_clear_alloced_cpu(channel->target_cpu);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun /*
435*4882a593Smuzhiyun * Upon suspend, an in-use hv_sock channel is marked as "rescinded" and
436*4882a593Smuzhiyun * the relid is invalidated; after hibernation, when the user-space app
437*4882a593Smuzhiyun * destroys the channel, the relid is INVALID_RELID, and in this case
438*4882a593Smuzhiyun * it's unnecessary and unsafe to release the old relid, since the same
439*4882a593Smuzhiyun * relid can refer to a completely different channel now.
440*4882a593Smuzhiyun */
441*4882a593Smuzhiyun if (channel->offermsg.child_relid != INVALID_RELID)
442*4882a593Smuzhiyun vmbus_release_relid(channel->offermsg.child_relid);
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun free_channel(channel);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun
vmbus_free_channels(void)447*4882a593Smuzhiyun void vmbus_free_channels(void)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun struct vmbus_channel *channel, *tmp;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun list_for_each_entry_safe(channel, tmp, &vmbus_connection.chn_list,
452*4882a593Smuzhiyun listentry) {
453*4882a593Smuzhiyun /* hv_process_channel_removal() needs this */
454*4882a593Smuzhiyun channel->rescind = true;
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun vmbus_device_unregister(channel->device_obj);
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* Note: the function can run concurrently for primary/sub channels. */
vmbus_add_channel_work(struct work_struct * work)461*4882a593Smuzhiyun static void vmbus_add_channel_work(struct work_struct *work)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct vmbus_channel *newchannel =
464*4882a593Smuzhiyun container_of(work, struct vmbus_channel, add_channel_work);
465*4882a593Smuzhiyun struct vmbus_channel *primary_channel = newchannel->primary_channel;
466*4882a593Smuzhiyun int ret;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /*
469*4882a593Smuzhiyun * This state is used to indicate a successful open
470*4882a593Smuzhiyun * so that when we do close the channel normally, we
471*4882a593Smuzhiyun * can cleanup properly.
472*4882a593Smuzhiyun */
473*4882a593Smuzhiyun newchannel->state = CHANNEL_OPEN_STATE;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun if (primary_channel != NULL) {
476*4882a593Smuzhiyun /* newchannel is a sub-channel. */
477*4882a593Smuzhiyun struct hv_device *dev = primary_channel->device_obj;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun if (vmbus_add_channel_kobj(dev, newchannel))
480*4882a593Smuzhiyun goto err_deq_chan;
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun if (primary_channel->sc_creation_callback != NULL)
483*4882a593Smuzhiyun primary_channel->sc_creation_callback(newchannel);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun newchannel->probe_done = true;
486*4882a593Smuzhiyun return;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun /*
490*4882a593Smuzhiyun * Start the process of binding the primary channel to the driver
491*4882a593Smuzhiyun */
492*4882a593Smuzhiyun newchannel->device_obj = vmbus_device_create(
493*4882a593Smuzhiyun &newchannel->offermsg.offer.if_type,
494*4882a593Smuzhiyun &newchannel->offermsg.offer.if_instance,
495*4882a593Smuzhiyun newchannel);
496*4882a593Smuzhiyun if (!newchannel->device_obj)
497*4882a593Smuzhiyun goto err_deq_chan;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun newchannel->device_obj->device_id = newchannel->device_id;
500*4882a593Smuzhiyun /*
501*4882a593Smuzhiyun * Add the new device to the bus. This will kick off device-driver
502*4882a593Smuzhiyun * binding which eventually invokes the device driver's AddDevice()
503*4882a593Smuzhiyun * method.
504*4882a593Smuzhiyun *
505*4882a593Smuzhiyun * If vmbus_device_register() fails, the 'device_obj' is freed in
506*4882a593Smuzhiyun * vmbus_device_release() as called by device_unregister() in the
507*4882a593Smuzhiyun * error path of vmbus_device_register(). In the outside error
508*4882a593Smuzhiyun * path, there's no need to free it.
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun ret = vmbus_device_register(newchannel->device_obj);
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun if (ret != 0) {
513*4882a593Smuzhiyun pr_err("unable to add child device object (relid %d)\n",
514*4882a593Smuzhiyun newchannel->offermsg.child_relid);
515*4882a593Smuzhiyun goto err_deq_chan;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun newchannel->probe_done = true;
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun err_deq_chan:
522*4882a593Smuzhiyun mutex_lock(&vmbus_connection.channel_mutex);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /*
525*4882a593Smuzhiyun * We need to set the flag, otherwise
526*4882a593Smuzhiyun * vmbus_onoffer_rescind() can be blocked.
527*4882a593Smuzhiyun */
528*4882a593Smuzhiyun newchannel->probe_done = true;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun if (primary_channel == NULL)
531*4882a593Smuzhiyun list_del(&newchannel->listentry);
532*4882a593Smuzhiyun else
533*4882a593Smuzhiyun list_del(&newchannel->sc_list);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun /* vmbus_process_offer() has mapped the channel. */
536*4882a593Smuzhiyun vmbus_channel_unmap_relid(newchannel);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun vmbus_release_relid(newchannel->offermsg.child_relid);
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun free_channel(newchannel);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /*
546*4882a593Smuzhiyun * vmbus_process_offer - Process the offer by creating a channel/device
547*4882a593Smuzhiyun * associated with this offer
548*4882a593Smuzhiyun */
vmbus_process_offer(struct vmbus_channel * newchannel)549*4882a593Smuzhiyun static void vmbus_process_offer(struct vmbus_channel *newchannel)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun struct vmbus_channel *channel;
552*4882a593Smuzhiyun struct workqueue_struct *wq;
553*4882a593Smuzhiyun bool fnew = true;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun /*
556*4882a593Smuzhiyun * Synchronize vmbus_process_offer() and CPU hotplugging:
557*4882a593Smuzhiyun *
558*4882a593Smuzhiyun * CPU1 CPU2
559*4882a593Smuzhiyun *
560*4882a593Smuzhiyun * [vmbus_process_offer()] [Hot removal of the CPU]
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * CPU_READ_LOCK CPUS_WRITE_LOCK
563*4882a593Smuzhiyun * LOAD cpu_online_mask SEARCH chn_list
564*4882a593Smuzhiyun * STORE target_cpu LOAD target_cpu
565*4882a593Smuzhiyun * INSERT chn_list STORE cpu_online_mask
566*4882a593Smuzhiyun * CPUS_READ_UNLOCK CPUS_WRITE_UNLOCK
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * Forbids: CPU1's LOAD from *not* seing CPU2's STORE &&
569*4882a593Smuzhiyun * CPU2's SEARCH from *not* seeing CPU1's INSERT
570*4882a593Smuzhiyun *
571*4882a593Smuzhiyun * Forbids: CPU2's SEARCH from seeing CPU1's INSERT &&
572*4882a593Smuzhiyun * CPU2's LOAD from *not* seing CPU1's STORE
573*4882a593Smuzhiyun */
574*4882a593Smuzhiyun cpus_read_lock();
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /*
577*4882a593Smuzhiyun * Serializes the modifications of the chn_list list as well as
578*4882a593Smuzhiyun * the accesses to next_numa_node_id in init_vp_index().
579*4882a593Smuzhiyun */
580*4882a593Smuzhiyun mutex_lock(&vmbus_connection.channel_mutex);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun init_vp_index(newchannel);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /* Remember the channels that should be cleaned up upon suspend. */
585*4882a593Smuzhiyun if (is_hvsock_channel(newchannel) || is_sub_channel(newchannel))
586*4882a593Smuzhiyun atomic_inc(&vmbus_connection.nr_chan_close_on_suspend);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*
589*4882a593Smuzhiyun * Now that we have acquired the channel_mutex,
590*4882a593Smuzhiyun * we can release the potentially racing rescind thread.
591*4882a593Smuzhiyun */
592*4882a593Smuzhiyun atomic_dec(&vmbus_connection.offer_in_progress);
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
595*4882a593Smuzhiyun if (guid_equal(&channel->offermsg.offer.if_type,
596*4882a593Smuzhiyun &newchannel->offermsg.offer.if_type) &&
597*4882a593Smuzhiyun guid_equal(&channel->offermsg.offer.if_instance,
598*4882a593Smuzhiyun &newchannel->offermsg.offer.if_instance)) {
599*4882a593Smuzhiyun fnew = false;
600*4882a593Smuzhiyun break;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (fnew) {
605*4882a593Smuzhiyun list_add_tail(&newchannel->listentry,
606*4882a593Smuzhiyun &vmbus_connection.chn_list);
607*4882a593Smuzhiyun } else {
608*4882a593Smuzhiyun /*
609*4882a593Smuzhiyun * Check to see if this is a valid sub-channel.
610*4882a593Smuzhiyun */
611*4882a593Smuzhiyun if (newchannel->offermsg.offer.sub_channel_index == 0) {
612*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
613*4882a593Smuzhiyun cpus_read_unlock();
614*4882a593Smuzhiyun /*
615*4882a593Smuzhiyun * Don't call free_channel(), because newchannel->kobj
616*4882a593Smuzhiyun * is not initialized yet.
617*4882a593Smuzhiyun */
618*4882a593Smuzhiyun kfree(newchannel);
619*4882a593Smuzhiyun WARN_ON_ONCE(1);
620*4882a593Smuzhiyun return;
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun /*
623*4882a593Smuzhiyun * Process the sub-channel.
624*4882a593Smuzhiyun */
625*4882a593Smuzhiyun newchannel->primary_channel = channel;
626*4882a593Smuzhiyun list_add_tail(&newchannel->sc_list, &channel->sc_list);
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun vmbus_channel_map_relid(newchannel);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
632*4882a593Smuzhiyun cpus_read_unlock();
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /*
635*4882a593Smuzhiyun * vmbus_process_offer() mustn't call channel->sc_creation_callback()
636*4882a593Smuzhiyun * directly for sub-channels, because sc_creation_callback() ->
637*4882a593Smuzhiyun * vmbus_open() may never get the host's response to the
638*4882a593Smuzhiyun * OPEN_CHANNEL message (the host may rescind a channel at any time,
639*4882a593Smuzhiyun * e.g. in the case of hot removing a NIC), and vmbus_onoffer_rescind()
640*4882a593Smuzhiyun * may not wake up the vmbus_open() as it's blocked due to a non-zero
641*4882a593Smuzhiyun * vmbus_connection.offer_in_progress, and finally we have a deadlock.
642*4882a593Smuzhiyun *
643*4882a593Smuzhiyun * The above is also true for primary channels, if the related device
644*4882a593Smuzhiyun * drivers use sync probing mode by default.
645*4882a593Smuzhiyun *
646*4882a593Smuzhiyun * And, usually the handling of primary channels and sub-channels can
647*4882a593Smuzhiyun * depend on each other, so we should offload them to different
648*4882a593Smuzhiyun * workqueues to avoid possible deadlock, e.g. in sync-probing mode,
649*4882a593Smuzhiyun * NIC1's netvsc_subchan_work() can race with NIC2's netvsc_probe() ->
650*4882a593Smuzhiyun * rtnl_lock(), and causes deadlock: the former gets the rtnl_lock
651*4882a593Smuzhiyun * and waits for all the sub-channels to appear, but the latter
652*4882a593Smuzhiyun * can't get the rtnl_lock and this blocks the handling of
653*4882a593Smuzhiyun * sub-channels.
654*4882a593Smuzhiyun */
655*4882a593Smuzhiyun INIT_WORK(&newchannel->add_channel_work, vmbus_add_channel_work);
656*4882a593Smuzhiyun wq = fnew ? vmbus_connection.handle_primary_chan_wq :
657*4882a593Smuzhiyun vmbus_connection.handle_sub_chan_wq;
658*4882a593Smuzhiyun queue_work(wq, &newchannel->add_channel_work);
659*4882a593Smuzhiyun }
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /*
662*4882a593Smuzhiyun * We use this state to statically distribute the channel interrupt load.
663*4882a593Smuzhiyun */
664*4882a593Smuzhiyun static int next_numa_node_id;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /*
667*4882a593Smuzhiyun * Starting with Win8, we can statically distribute the incoming
668*4882a593Smuzhiyun * channel interrupt load by binding a channel to VCPU.
669*4882a593Smuzhiyun *
670*4882a593Smuzhiyun * For pre-win8 hosts or non-performance critical channels we assign the
671*4882a593Smuzhiyun * VMBUS_CONNECT_CPU.
672*4882a593Smuzhiyun *
673*4882a593Smuzhiyun * Starting with win8, performance critical channels will be distributed
674*4882a593Smuzhiyun * evenly among all the available NUMA nodes. Once the node is assigned,
675*4882a593Smuzhiyun * we will assign the CPU based on a simple round robin scheme.
676*4882a593Smuzhiyun */
init_vp_index(struct vmbus_channel * channel)677*4882a593Smuzhiyun static void init_vp_index(struct vmbus_channel *channel)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun bool perf_chn = hv_is_perf_channel(channel);
680*4882a593Smuzhiyun cpumask_var_t available_mask;
681*4882a593Smuzhiyun struct cpumask *alloced_mask;
682*4882a593Smuzhiyun u32 target_cpu;
683*4882a593Smuzhiyun int numa_node;
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun if ((vmbus_proto_version == VERSION_WS2008) ||
686*4882a593Smuzhiyun (vmbus_proto_version == VERSION_WIN7) || (!perf_chn) ||
687*4882a593Smuzhiyun !alloc_cpumask_var(&available_mask, GFP_KERNEL)) {
688*4882a593Smuzhiyun /*
689*4882a593Smuzhiyun * Prior to win8, all channel interrupts are
690*4882a593Smuzhiyun * delivered on VMBUS_CONNECT_CPU.
691*4882a593Smuzhiyun * Also if the channel is not a performance critical
692*4882a593Smuzhiyun * channel, bind it to VMBUS_CONNECT_CPU.
693*4882a593Smuzhiyun * In case alloc_cpumask_var() fails, bind it to
694*4882a593Smuzhiyun * VMBUS_CONNECT_CPU.
695*4882a593Smuzhiyun */
696*4882a593Smuzhiyun channel->target_cpu = VMBUS_CONNECT_CPU;
697*4882a593Smuzhiyun if (perf_chn)
698*4882a593Smuzhiyun hv_set_alloced_cpu(VMBUS_CONNECT_CPU);
699*4882a593Smuzhiyun return;
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun while (true) {
703*4882a593Smuzhiyun numa_node = next_numa_node_id++;
704*4882a593Smuzhiyun if (numa_node == nr_node_ids) {
705*4882a593Smuzhiyun next_numa_node_id = 0;
706*4882a593Smuzhiyun continue;
707*4882a593Smuzhiyun }
708*4882a593Smuzhiyun if (cpumask_empty(cpumask_of_node(numa_node)))
709*4882a593Smuzhiyun continue;
710*4882a593Smuzhiyun break;
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun alloced_mask = &hv_context.hv_numa_map[numa_node];
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (cpumask_weight(alloced_mask) ==
715*4882a593Smuzhiyun cpumask_weight(cpumask_of_node(numa_node))) {
716*4882a593Smuzhiyun /*
717*4882a593Smuzhiyun * We have cycled through all the CPUs in the node;
718*4882a593Smuzhiyun * reset the alloced map.
719*4882a593Smuzhiyun */
720*4882a593Smuzhiyun cpumask_clear(alloced_mask);
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node));
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun target_cpu = cpumask_first(available_mask);
726*4882a593Smuzhiyun cpumask_set_cpu(target_cpu, alloced_mask);
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun channel->target_cpu = target_cpu;
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun free_cpumask_var(available_mask);
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun #define UNLOAD_DELAY_UNIT_MS 10 /* 10 milliseconds */
734*4882a593Smuzhiyun #define UNLOAD_WAIT_MS (100*1000) /* 100 seconds */
735*4882a593Smuzhiyun #define UNLOAD_WAIT_LOOPS (UNLOAD_WAIT_MS/UNLOAD_DELAY_UNIT_MS)
736*4882a593Smuzhiyun #define UNLOAD_MSG_MS (5*1000) /* Every 5 seconds */
737*4882a593Smuzhiyun #define UNLOAD_MSG_LOOPS (UNLOAD_MSG_MS/UNLOAD_DELAY_UNIT_MS)
738*4882a593Smuzhiyun
vmbus_wait_for_unload(void)739*4882a593Smuzhiyun static void vmbus_wait_for_unload(void)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun int cpu;
742*4882a593Smuzhiyun void *page_addr;
743*4882a593Smuzhiyun struct hv_message *msg;
744*4882a593Smuzhiyun struct vmbus_channel_message_header *hdr;
745*4882a593Smuzhiyun u32 message_type, i;
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /*
748*4882a593Smuzhiyun * CHANNELMSG_UNLOAD_RESPONSE is always delivered to the CPU which was
749*4882a593Smuzhiyun * used for initial contact or to CPU0 depending on host version. When
750*4882a593Smuzhiyun * we're crashing on a different CPU let's hope that IRQ handler on
751*4882a593Smuzhiyun * the cpu which receives CHANNELMSG_UNLOAD_RESPONSE is still
752*4882a593Smuzhiyun * functional and vmbus_unload_response() will complete
753*4882a593Smuzhiyun * vmbus_connection.unload_event. If not, the last thing we can do is
754*4882a593Smuzhiyun * read message pages for all CPUs directly.
755*4882a593Smuzhiyun *
756*4882a593Smuzhiyun * Wait up to 100 seconds since an Azure host must writeback any dirty
757*4882a593Smuzhiyun * data in its disk cache before the VMbus UNLOAD request will
758*4882a593Smuzhiyun * complete. This flushing has been empirically observed to take up
759*4882a593Smuzhiyun * to 50 seconds in cases with a lot of dirty data, so allow additional
760*4882a593Smuzhiyun * leeway and for inaccuracies in mdelay(). But eventually time out so
761*4882a593Smuzhiyun * that the panic path can't get hung forever in case the response
762*4882a593Smuzhiyun * message isn't seen.
763*4882a593Smuzhiyun */
764*4882a593Smuzhiyun for (i = 1; i <= UNLOAD_WAIT_LOOPS; i++) {
765*4882a593Smuzhiyun if (completion_done(&vmbus_connection.unload_event))
766*4882a593Smuzhiyun goto completed;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun for_each_online_cpu(cpu) {
769*4882a593Smuzhiyun struct hv_per_cpu_context *hv_cpu
770*4882a593Smuzhiyun = per_cpu_ptr(hv_context.cpu_context, cpu);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun page_addr = hv_cpu->synic_message_page;
773*4882a593Smuzhiyun msg = (struct hv_message *)page_addr
774*4882a593Smuzhiyun + VMBUS_MESSAGE_SINT;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun message_type = READ_ONCE(msg->header.message_type);
777*4882a593Smuzhiyun if (message_type == HVMSG_NONE)
778*4882a593Smuzhiyun continue;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun hdr = (struct vmbus_channel_message_header *)
781*4882a593Smuzhiyun msg->u.payload;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun if (hdr->msgtype == CHANNELMSG_UNLOAD_RESPONSE)
784*4882a593Smuzhiyun complete(&vmbus_connection.unload_event);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun vmbus_signal_eom(msg, message_type);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /*
790*4882a593Smuzhiyun * Give a notice periodically so someone watching the
791*4882a593Smuzhiyun * serial output won't think it is completely hung.
792*4882a593Smuzhiyun */
793*4882a593Smuzhiyun if (!(i % UNLOAD_MSG_LOOPS))
794*4882a593Smuzhiyun pr_notice("Waiting for VMBus UNLOAD to complete\n");
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun mdelay(UNLOAD_DELAY_UNIT_MS);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun pr_err("Continuing even though VMBus UNLOAD did not complete\n");
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun completed:
801*4882a593Smuzhiyun /*
802*4882a593Smuzhiyun * We're crashing and already got the UNLOAD_RESPONSE, cleanup all
803*4882a593Smuzhiyun * maybe-pending messages on all CPUs to be able to receive new
804*4882a593Smuzhiyun * messages after we reconnect.
805*4882a593Smuzhiyun */
806*4882a593Smuzhiyun for_each_online_cpu(cpu) {
807*4882a593Smuzhiyun struct hv_per_cpu_context *hv_cpu
808*4882a593Smuzhiyun = per_cpu_ptr(hv_context.cpu_context, cpu);
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun page_addr = hv_cpu->synic_message_page;
811*4882a593Smuzhiyun msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT;
812*4882a593Smuzhiyun msg->header.message_type = HVMSG_NONE;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun /*
817*4882a593Smuzhiyun * vmbus_unload_response - Handler for the unload response.
818*4882a593Smuzhiyun */
vmbus_unload_response(struct vmbus_channel_message_header * hdr)819*4882a593Smuzhiyun static void vmbus_unload_response(struct vmbus_channel_message_header *hdr)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun /*
822*4882a593Smuzhiyun * This is a global event; just wakeup the waiting thread.
823*4882a593Smuzhiyun * Once we successfully unload, we can cleanup the monitor state.
824*4882a593Smuzhiyun */
825*4882a593Smuzhiyun complete(&vmbus_connection.unload_event);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
vmbus_initiate_unload(bool crash)828*4882a593Smuzhiyun void vmbus_initiate_unload(bool crash)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun struct vmbus_channel_message_header hdr;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED)
833*4882a593Smuzhiyun return;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* Pre-Win2012R2 hosts don't support reconnect */
836*4882a593Smuzhiyun if (vmbus_proto_version < VERSION_WIN8_1)
837*4882a593Smuzhiyun return;
838*4882a593Smuzhiyun
839*4882a593Smuzhiyun init_completion(&vmbus_connection.unload_event);
840*4882a593Smuzhiyun memset(&hdr, 0, sizeof(struct vmbus_channel_message_header));
841*4882a593Smuzhiyun hdr.msgtype = CHANNELMSG_UNLOAD;
842*4882a593Smuzhiyun vmbus_post_msg(&hdr, sizeof(struct vmbus_channel_message_header),
843*4882a593Smuzhiyun !crash);
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /*
846*4882a593Smuzhiyun * vmbus_initiate_unload() is also called on crash and the crash can be
847*4882a593Smuzhiyun * happening in an interrupt context, where scheduling is impossible.
848*4882a593Smuzhiyun */
849*4882a593Smuzhiyun if (!crash)
850*4882a593Smuzhiyun wait_for_completion(&vmbus_connection.unload_event);
851*4882a593Smuzhiyun else
852*4882a593Smuzhiyun vmbus_wait_for_unload();
853*4882a593Smuzhiyun }
854*4882a593Smuzhiyun
check_ready_for_resume_event(void)855*4882a593Smuzhiyun static void check_ready_for_resume_event(void)
856*4882a593Smuzhiyun {
857*4882a593Smuzhiyun /*
858*4882a593Smuzhiyun * If all the old primary channels have been fixed up, then it's safe
859*4882a593Smuzhiyun * to resume.
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun if (atomic_dec_and_test(&vmbus_connection.nr_chan_fixup_on_resume))
862*4882a593Smuzhiyun complete(&vmbus_connection.ready_for_resume_event);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
vmbus_setup_channel_state(struct vmbus_channel * channel,struct vmbus_channel_offer_channel * offer)865*4882a593Smuzhiyun static void vmbus_setup_channel_state(struct vmbus_channel *channel,
866*4882a593Smuzhiyun struct vmbus_channel_offer_channel *offer)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun /*
869*4882a593Smuzhiyun * Setup state for signalling the host.
870*4882a593Smuzhiyun */
871*4882a593Smuzhiyun channel->sig_event = VMBUS_EVENT_CONNECTION_ID;
872*4882a593Smuzhiyun
873*4882a593Smuzhiyun if (vmbus_proto_version != VERSION_WS2008) {
874*4882a593Smuzhiyun channel->is_dedicated_interrupt =
875*4882a593Smuzhiyun (offer->is_dedicated_interrupt != 0);
876*4882a593Smuzhiyun channel->sig_event = offer->connection_id;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun memcpy(&channel->offermsg, offer,
880*4882a593Smuzhiyun sizeof(struct vmbus_channel_offer_channel));
881*4882a593Smuzhiyun channel->monitor_grp = (u8)offer->monitorid / 32;
882*4882a593Smuzhiyun channel->monitor_bit = (u8)offer->monitorid % 32;
883*4882a593Smuzhiyun channel->device_id = hv_get_dev_type(channel);
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /*
887*4882a593Smuzhiyun * find_primary_channel_by_offer - Get the channel object given the new offer.
888*4882a593Smuzhiyun * This is only used in the resume path of hibernation.
889*4882a593Smuzhiyun */
890*4882a593Smuzhiyun static struct vmbus_channel *
find_primary_channel_by_offer(const struct vmbus_channel_offer_channel * offer)891*4882a593Smuzhiyun find_primary_channel_by_offer(const struct vmbus_channel_offer_channel *offer)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun struct vmbus_channel *channel = NULL, *iter;
894*4882a593Smuzhiyun const guid_t *inst1, *inst2;
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /* Ignore sub-channel offers. */
897*4882a593Smuzhiyun if (offer->offer.sub_channel_index != 0)
898*4882a593Smuzhiyun return NULL;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun mutex_lock(&vmbus_connection.channel_mutex);
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun list_for_each_entry(iter, &vmbus_connection.chn_list, listentry) {
903*4882a593Smuzhiyun inst1 = &iter->offermsg.offer.if_instance;
904*4882a593Smuzhiyun inst2 = &offer->offer.if_instance;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (guid_equal(inst1, inst2)) {
907*4882a593Smuzhiyun channel = iter;
908*4882a593Smuzhiyun break;
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun return channel;
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /*
918*4882a593Smuzhiyun * vmbus_onoffer - Handler for channel offers from vmbus in parent partition.
919*4882a593Smuzhiyun *
920*4882a593Smuzhiyun */
vmbus_onoffer(struct vmbus_channel_message_header * hdr)921*4882a593Smuzhiyun static void vmbus_onoffer(struct vmbus_channel_message_header *hdr)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun struct vmbus_channel_offer_channel *offer;
924*4882a593Smuzhiyun struct vmbus_channel *oldchannel, *newchannel;
925*4882a593Smuzhiyun size_t offer_sz;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun offer = (struct vmbus_channel_offer_channel *)hdr;
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun trace_vmbus_onoffer(offer);
930*4882a593Smuzhiyun
931*4882a593Smuzhiyun oldchannel = find_primary_channel_by_offer(offer);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun if (oldchannel != NULL) {
934*4882a593Smuzhiyun /*
935*4882a593Smuzhiyun * We're resuming from hibernation: all the sub-channel and
936*4882a593Smuzhiyun * hv_sock channels we had before the hibernation should have
937*4882a593Smuzhiyun * been cleaned up, and now we must be seeing a re-offered
938*4882a593Smuzhiyun * primary channel that we had before the hibernation.
939*4882a593Smuzhiyun */
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun /*
942*4882a593Smuzhiyun * { Initially: channel relid = INVALID_RELID,
943*4882a593Smuzhiyun * channels[valid_relid] = NULL }
944*4882a593Smuzhiyun *
945*4882a593Smuzhiyun * CPU1 CPU2
946*4882a593Smuzhiyun *
947*4882a593Smuzhiyun * [vmbus_onoffer()] [vmbus_device_release()]
948*4882a593Smuzhiyun *
949*4882a593Smuzhiyun * LOCK channel_mutex LOCK channel_mutex
950*4882a593Smuzhiyun * STORE channel relid = valid_relid LOAD r1 = channel relid
951*4882a593Smuzhiyun * MAP_RELID channel if (r1 != INVALID_RELID)
952*4882a593Smuzhiyun * UNLOCK channel_mutex UNMAP_RELID channel
953*4882a593Smuzhiyun * UNLOCK channel_mutex
954*4882a593Smuzhiyun *
955*4882a593Smuzhiyun * Forbids: r1 == valid_relid &&
956*4882a593Smuzhiyun * channels[valid_relid] == channel
957*4882a593Smuzhiyun *
958*4882a593Smuzhiyun * Note. r1 can be INVALID_RELID only for an hv_sock channel.
959*4882a593Smuzhiyun * None of the hv_sock channels which were present before the
960*4882a593Smuzhiyun * suspend are re-offered upon the resume. See the WARN_ON()
961*4882a593Smuzhiyun * in hv_process_channel_removal().
962*4882a593Smuzhiyun */
963*4882a593Smuzhiyun mutex_lock(&vmbus_connection.channel_mutex);
964*4882a593Smuzhiyun
965*4882a593Smuzhiyun atomic_dec(&vmbus_connection.offer_in_progress);
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun WARN_ON(oldchannel->offermsg.child_relid != INVALID_RELID);
968*4882a593Smuzhiyun /* Fix up the relid. */
969*4882a593Smuzhiyun oldchannel->offermsg.child_relid = offer->child_relid;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun offer_sz = sizeof(*offer);
972*4882a593Smuzhiyun if (memcmp(offer, &oldchannel->offermsg, offer_sz) != 0) {
973*4882a593Smuzhiyun /*
974*4882a593Smuzhiyun * This is not an error, since the host can also change
975*4882a593Smuzhiyun * the other field(s) of the offer, e.g. on WS RS5
976*4882a593Smuzhiyun * (Build 17763), the offer->connection_id of the
977*4882a593Smuzhiyun * Mellanox VF vmbus device can change when the host
978*4882a593Smuzhiyun * reoffers the device upon resume.
979*4882a593Smuzhiyun */
980*4882a593Smuzhiyun pr_debug("vmbus offer changed: relid=%d\n",
981*4882a593Smuzhiyun offer->child_relid);
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun print_hex_dump_debug("Old vmbus offer: ",
984*4882a593Smuzhiyun DUMP_PREFIX_OFFSET, 16, 4,
985*4882a593Smuzhiyun &oldchannel->offermsg, offer_sz,
986*4882a593Smuzhiyun false);
987*4882a593Smuzhiyun print_hex_dump_debug("New vmbus offer: ",
988*4882a593Smuzhiyun DUMP_PREFIX_OFFSET, 16, 4,
989*4882a593Smuzhiyun offer, offer_sz, false);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* Fix up the old channel. */
992*4882a593Smuzhiyun vmbus_setup_channel_state(oldchannel, offer);
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun /* Add the channel back to the array of channels. */
996*4882a593Smuzhiyun vmbus_channel_map_relid(oldchannel);
997*4882a593Smuzhiyun check_ready_for_resume_event();
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
1000*4882a593Smuzhiyun return;
1001*4882a593Smuzhiyun }
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun /* Allocate the channel object and save this offer. */
1004*4882a593Smuzhiyun newchannel = alloc_channel();
1005*4882a593Smuzhiyun if (!newchannel) {
1006*4882a593Smuzhiyun vmbus_release_relid(offer->child_relid);
1007*4882a593Smuzhiyun atomic_dec(&vmbus_connection.offer_in_progress);
1008*4882a593Smuzhiyun pr_err("Unable to allocate channel object\n");
1009*4882a593Smuzhiyun return;
1010*4882a593Smuzhiyun }
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun vmbus_setup_channel_state(newchannel, offer);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun vmbus_process_offer(newchannel);
1015*4882a593Smuzhiyun }
1016*4882a593Smuzhiyun
check_ready_for_suspend_event(void)1017*4882a593Smuzhiyun static void check_ready_for_suspend_event(void)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun /*
1020*4882a593Smuzhiyun * If all the sub-channels or hv_sock channels have been cleaned up,
1021*4882a593Smuzhiyun * then it's safe to suspend.
1022*4882a593Smuzhiyun */
1023*4882a593Smuzhiyun if (atomic_dec_and_test(&vmbus_connection.nr_chan_close_on_suspend))
1024*4882a593Smuzhiyun complete(&vmbus_connection.ready_for_suspend_event);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun /*
1028*4882a593Smuzhiyun * vmbus_onoffer_rescind - Rescind offer handler.
1029*4882a593Smuzhiyun *
1030*4882a593Smuzhiyun * We queue a work item to process this offer synchronously
1031*4882a593Smuzhiyun */
vmbus_onoffer_rescind(struct vmbus_channel_message_header * hdr)1032*4882a593Smuzhiyun static void vmbus_onoffer_rescind(struct vmbus_channel_message_header *hdr)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun struct vmbus_channel_rescind_offer *rescind;
1035*4882a593Smuzhiyun struct vmbus_channel *channel;
1036*4882a593Smuzhiyun struct device *dev;
1037*4882a593Smuzhiyun bool clean_up_chan_for_suspend;
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun rescind = (struct vmbus_channel_rescind_offer *)hdr;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun trace_vmbus_onoffer_rescind(rescind);
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun /*
1044*4882a593Smuzhiyun * The offer msg and the corresponding rescind msg
1045*4882a593Smuzhiyun * from the host are guranteed to be ordered -
1046*4882a593Smuzhiyun * offer comes in first and then the rescind.
1047*4882a593Smuzhiyun * Since we process these events in work elements,
1048*4882a593Smuzhiyun * and with preemption, we may end up processing
1049*4882a593Smuzhiyun * the events out of order. We rely on the synchronization
1050*4882a593Smuzhiyun * provided by offer_in_progress and by channel_mutex for
1051*4882a593Smuzhiyun * ordering these events:
1052*4882a593Smuzhiyun *
1053*4882a593Smuzhiyun * { Initially: offer_in_progress = 1 }
1054*4882a593Smuzhiyun *
1055*4882a593Smuzhiyun * CPU1 CPU2
1056*4882a593Smuzhiyun *
1057*4882a593Smuzhiyun * [vmbus_onoffer()] [vmbus_onoffer_rescind()]
1058*4882a593Smuzhiyun *
1059*4882a593Smuzhiyun * LOCK channel_mutex WAIT_ON offer_in_progress == 0
1060*4882a593Smuzhiyun * DECREMENT offer_in_progress LOCK channel_mutex
1061*4882a593Smuzhiyun * STORE channels[] LOAD channels[]
1062*4882a593Smuzhiyun * UNLOCK channel_mutex UNLOCK channel_mutex
1063*4882a593Smuzhiyun *
1064*4882a593Smuzhiyun * Forbids: CPU2's LOAD from *not* seeing CPU1's STORE
1065*4882a593Smuzhiyun */
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun while (atomic_read(&vmbus_connection.offer_in_progress) != 0) {
1068*4882a593Smuzhiyun /*
1069*4882a593Smuzhiyun * We wait here until any channel offer is currently
1070*4882a593Smuzhiyun * being processed.
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun msleep(1);
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun mutex_lock(&vmbus_connection.channel_mutex);
1076*4882a593Smuzhiyun channel = relid2channel(rescind->child_relid);
1077*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun if (channel == NULL) {
1080*4882a593Smuzhiyun /*
1081*4882a593Smuzhiyun * We failed in processing the offer message;
1082*4882a593Smuzhiyun * we would have cleaned up the relid in that
1083*4882a593Smuzhiyun * failure path.
1084*4882a593Smuzhiyun */
1085*4882a593Smuzhiyun return;
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
1088*4882a593Smuzhiyun clean_up_chan_for_suspend = is_hvsock_channel(channel) ||
1089*4882a593Smuzhiyun is_sub_channel(channel);
1090*4882a593Smuzhiyun /*
1091*4882a593Smuzhiyun * Before setting channel->rescind in vmbus_rescind_cleanup(), we
1092*4882a593Smuzhiyun * should make sure the channel callback is not running any more.
1093*4882a593Smuzhiyun */
1094*4882a593Smuzhiyun vmbus_reset_channel_cb(channel);
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun /*
1097*4882a593Smuzhiyun * Now wait for offer handling to complete.
1098*4882a593Smuzhiyun */
1099*4882a593Smuzhiyun vmbus_rescind_cleanup(channel);
1100*4882a593Smuzhiyun while (READ_ONCE(channel->probe_done) == false) {
1101*4882a593Smuzhiyun /*
1102*4882a593Smuzhiyun * We wait here until any channel offer is currently
1103*4882a593Smuzhiyun * being processed.
1104*4882a593Smuzhiyun */
1105*4882a593Smuzhiyun msleep(1);
1106*4882a593Smuzhiyun }
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /*
1109*4882a593Smuzhiyun * At this point, the rescind handling can proceed safely.
1110*4882a593Smuzhiyun */
1111*4882a593Smuzhiyun
1112*4882a593Smuzhiyun if (channel->device_obj) {
1113*4882a593Smuzhiyun if (channel->chn_rescind_callback) {
1114*4882a593Smuzhiyun channel->chn_rescind_callback(channel);
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun if (clean_up_chan_for_suspend)
1117*4882a593Smuzhiyun check_ready_for_suspend_event();
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun return;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun /*
1122*4882a593Smuzhiyun * We will have to unregister this device from the
1123*4882a593Smuzhiyun * driver core.
1124*4882a593Smuzhiyun */
1125*4882a593Smuzhiyun dev = get_device(&channel->device_obj->device);
1126*4882a593Smuzhiyun if (dev) {
1127*4882a593Smuzhiyun vmbus_device_unregister(channel->device_obj);
1128*4882a593Smuzhiyun put_device(dev);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun } else if (channel->primary_channel != NULL) {
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun * Sub-channel is being rescinded. Following is the channel
1133*4882a593Smuzhiyun * close sequence when initiated from the driveri (refer to
1134*4882a593Smuzhiyun * vmbus_close() for details):
1135*4882a593Smuzhiyun * 1. Close all sub-channels first
1136*4882a593Smuzhiyun * 2. Then close the primary channel.
1137*4882a593Smuzhiyun */
1138*4882a593Smuzhiyun mutex_lock(&vmbus_connection.channel_mutex);
1139*4882a593Smuzhiyun if (channel->state == CHANNEL_OPEN_STATE) {
1140*4882a593Smuzhiyun /*
1141*4882a593Smuzhiyun * The channel is currently not open;
1142*4882a593Smuzhiyun * it is safe for us to cleanup the channel.
1143*4882a593Smuzhiyun */
1144*4882a593Smuzhiyun hv_process_channel_removal(channel);
1145*4882a593Smuzhiyun } else {
1146*4882a593Smuzhiyun complete(&channel->rescind_event);
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun mutex_unlock(&vmbus_connection.channel_mutex);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun /* The "channel" may have been freed. Do not access it any longer. */
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun if (clean_up_chan_for_suspend)
1154*4882a593Smuzhiyun check_ready_for_suspend_event();
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun
vmbus_hvsock_device_unregister(struct vmbus_channel * channel)1157*4882a593Smuzhiyun void vmbus_hvsock_device_unregister(struct vmbus_channel *channel)
1158*4882a593Smuzhiyun {
1159*4882a593Smuzhiyun BUG_ON(!is_hvsock_channel(channel));
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun /* We always get a rescind msg when a connection is closed. */
1162*4882a593Smuzhiyun while (!READ_ONCE(channel->probe_done) || !READ_ONCE(channel->rescind))
1163*4882a593Smuzhiyun msleep(1);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun vmbus_device_unregister(channel->device_obj);
1166*4882a593Smuzhiyun }
1167*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmbus_hvsock_device_unregister);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun /*
1171*4882a593Smuzhiyun * vmbus_onoffers_delivered -
1172*4882a593Smuzhiyun * This is invoked when all offers have been delivered.
1173*4882a593Smuzhiyun *
1174*4882a593Smuzhiyun * Nothing to do here.
1175*4882a593Smuzhiyun */
vmbus_onoffers_delivered(struct vmbus_channel_message_header * hdr)1176*4882a593Smuzhiyun static void vmbus_onoffers_delivered(
1177*4882a593Smuzhiyun struct vmbus_channel_message_header *hdr)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun /*
1182*4882a593Smuzhiyun * vmbus_onopen_result - Open result handler.
1183*4882a593Smuzhiyun *
1184*4882a593Smuzhiyun * This is invoked when we received a response to our channel open request.
1185*4882a593Smuzhiyun * Find the matching request, copy the response and signal the requesting
1186*4882a593Smuzhiyun * thread.
1187*4882a593Smuzhiyun */
vmbus_onopen_result(struct vmbus_channel_message_header * hdr)1188*4882a593Smuzhiyun static void vmbus_onopen_result(struct vmbus_channel_message_header *hdr)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun struct vmbus_channel_open_result *result;
1191*4882a593Smuzhiyun struct vmbus_channel_msginfo *msginfo;
1192*4882a593Smuzhiyun struct vmbus_channel_message_header *requestheader;
1193*4882a593Smuzhiyun struct vmbus_channel_open_channel *openmsg;
1194*4882a593Smuzhiyun unsigned long flags;
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun result = (struct vmbus_channel_open_result *)hdr;
1197*4882a593Smuzhiyun
1198*4882a593Smuzhiyun trace_vmbus_onopen_result(result);
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun /*
1201*4882a593Smuzhiyun * Find the open msg, copy the result and signal/unblock the wait event
1202*4882a593Smuzhiyun */
1203*4882a593Smuzhiyun spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1206*4882a593Smuzhiyun msglistentry) {
1207*4882a593Smuzhiyun requestheader =
1208*4882a593Smuzhiyun (struct vmbus_channel_message_header *)msginfo->msg;
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun if (requestheader->msgtype == CHANNELMSG_OPENCHANNEL) {
1211*4882a593Smuzhiyun openmsg =
1212*4882a593Smuzhiyun (struct vmbus_channel_open_channel *)msginfo->msg;
1213*4882a593Smuzhiyun if (openmsg->child_relid == result->child_relid &&
1214*4882a593Smuzhiyun openmsg->openid == result->openid) {
1215*4882a593Smuzhiyun memcpy(&msginfo->response.open_result,
1216*4882a593Smuzhiyun result,
1217*4882a593Smuzhiyun sizeof(
1218*4882a593Smuzhiyun struct vmbus_channel_open_result));
1219*4882a593Smuzhiyun complete(&msginfo->waitevent);
1220*4882a593Smuzhiyun break;
1221*4882a593Smuzhiyun }
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun /*
1228*4882a593Smuzhiyun * vmbus_ongpadl_created - GPADL created handler.
1229*4882a593Smuzhiyun *
1230*4882a593Smuzhiyun * This is invoked when we received a response to our gpadl create request.
1231*4882a593Smuzhiyun * Find the matching request, copy the response and signal the requesting
1232*4882a593Smuzhiyun * thread.
1233*4882a593Smuzhiyun */
vmbus_ongpadl_created(struct vmbus_channel_message_header * hdr)1234*4882a593Smuzhiyun static void vmbus_ongpadl_created(struct vmbus_channel_message_header *hdr)
1235*4882a593Smuzhiyun {
1236*4882a593Smuzhiyun struct vmbus_channel_gpadl_created *gpadlcreated;
1237*4882a593Smuzhiyun struct vmbus_channel_msginfo *msginfo;
1238*4882a593Smuzhiyun struct vmbus_channel_message_header *requestheader;
1239*4882a593Smuzhiyun struct vmbus_channel_gpadl_header *gpadlheader;
1240*4882a593Smuzhiyun unsigned long flags;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun gpadlcreated = (struct vmbus_channel_gpadl_created *)hdr;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun trace_vmbus_ongpadl_created(gpadlcreated);
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun /*
1247*4882a593Smuzhiyun * Find the establish msg, copy the result and signal/unblock the wait
1248*4882a593Smuzhiyun * event
1249*4882a593Smuzhiyun */
1250*4882a593Smuzhiyun spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1253*4882a593Smuzhiyun msglistentry) {
1254*4882a593Smuzhiyun requestheader =
1255*4882a593Smuzhiyun (struct vmbus_channel_message_header *)msginfo->msg;
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (requestheader->msgtype == CHANNELMSG_GPADL_HEADER) {
1258*4882a593Smuzhiyun gpadlheader =
1259*4882a593Smuzhiyun (struct vmbus_channel_gpadl_header *)requestheader;
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun if ((gpadlcreated->child_relid ==
1262*4882a593Smuzhiyun gpadlheader->child_relid) &&
1263*4882a593Smuzhiyun (gpadlcreated->gpadl == gpadlheader->gpadl)) {
1264*4882a593Smuzhiyun memcpy(&msginfo->response.gpadl_created,
1265*4882a593Smuzhiyun gpadlcreated,
1266*4882a593Smuzhiyun sizeof(
1267*4882a593Smuzhiyun struct vmbus_channel_gpadl_created));
1268*4882a593Smuzhiyun complete(&msginfo->waitevent);
1269*4882a593Smuzhiyun break;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun }
1273*4882a593Smuzhiyun spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun /*
1277*4882a593Smuzhiyun * vmbus_ongpadl_torndown - GPADL torndown handler.
1278*4882a593Smuzhiyun *
1279*4882a593Smuzhiyun * This is invoked when we received a response to our gpadl teardown request.
1280*4882a593Smuzhiyun * Find the matching request, copy the response and signal the requesting
1281*4882a593Smuzhiyun * thread.
1282*4882a593Smuzhiyun */
vmbus_ongpadl_torndown(struct vmbus_channel_message_header * hdr)1283*4882a593Smuzhiyun static void vmbus_ongpadl_torndown(
1284*4882a593Smuzhiyun struct vmbus_channel_message_header *hdr)
1285*4882a593Smuzhiyun {
1286*4882a593Smuzhiyun struct vmbus_channel_gpadl_torndown *gpadl_torndown;
1287*4882a593Smuzhiyun struct vmbus_channel_msginfo *msginfo;
1288*4882a593Smuzhiyun struct vmbus_channel_message_header *requestheader;
1289*4882a593Smuzhiyun struct vmbus_channel_gpadl_teardown *gpadl_teardown;
1290*4882a593Smuzhiyun unsigned long flags;
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun gpadl_torndown = (struct vmbus_channel_gpadl_torndown *)hdr;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun trace_vmbus_ongpadl_torndown(gpadl_torndown);
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun /*
1297*4882a593Smuzhiyun * Find the open msg, copy the result and signal/unblock the wait event
1298*4882a593Smuzhiyun */
1299*4882a593Smuzhiyun spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1302*4882a593Smuzhiyun msglistentry) {
1303*4882a593Smuzhiyun requestheader =
1304*4882a593Smuzhiyun (struct vmbus_channel_message_header *)msginfo->msg;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun if (requestheader->msgtype == CHANNELMSG_GPADL_TEARDOWN) {
1307*4882a593Smuzhiyun gpadl_teardown =
1308*4882a593Smuzhiyun (struct vmbus_channel_gpadl_teardown *)requestheader;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun if (gpadl_torndown->gpadl == gpadl_teardown->gpadl) {
1311*4882a593Smuzhiyun memcpy(&msginfo->response.gpadl_torndown,
1312*4882a593Smuzhiyun gpadl_torndown,
1313*4882a593Smuzhiyun sizeof(
1314*4882a593Smuzhiyun struct vmbus_channel_gpadl_torndown));
1315*4882a593Smuzhiyun complete(&msginfo->waitevent);
1316*4882a593Smuzhiyun break;
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun }
1320*4882a593Smuzhiyun spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun /*
1324*4882a593Smuzhiyun * vmbus_onversion_response - Version response handler
1325*4882a593Smuzhiyun *
1326*4882a593Smuzhiyun * This is invoked when we received a response to our initiate contact request.
1327*4882a593Smuzhiyun * Find the matching request, copy the response and signal the requesting
1328*4882a593Smuzhiyun * thread.
1329*4882a593Smuzhiyun */
vmbus_onversion_response(struct vmbus_channel_message_header * hdr)1330*4882a593Smuzhiyun static void vmbus_onversion_response(
1331*4882a593Smuzhiyun struct vmbus_channel_message_header *hdr)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun struct vmbus_channel_msginfo *msginfo;
1334*4882a593Smuzhiyun struct vmbus_channel_message_header *requestheader;
1335*4882a593Smuzhiyun struct vmbus_channel_version_response *version_response;
1336*4882a593Smuzhiyun unsigned long flags;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun version_response = (struct vmbus_channel_version_response *)hdr;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun trace_vmbus_onversion_response(version_response);
1341*4882a593Smuzhiyun
1342*4882a593Smuzhiyun spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun list_for_each_entry(msginfo, &vmbus_connection.chn_msg_list,
1345*4882a593Smuzhiyun msglistentry) {
1346*4882a593Smuzhiyun requestheader =
1347*4882a593Smuzhiyun (struct vmbus_channel_message_header *)msginfo->msg;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun if (requestheader->msgtype ==
1350*4882a593Smuzhiyun CHANNELMSG_INITIATE_CONTACT) {
1351*4882a593Smuzhiyun memcpy(&msginfo->response.version_response,
1352*4882a593Smuzhiyun version_response,
1353*4882a593Smuzhiyun sizeof(struct vmbus_channel_version_response));
1354*4882a593Smuzhiyun complete(&msginfo->waitevent);
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags);
1358*4882a593Smuzhiyun }
1359*4882a593Smuzhiyun
1360*4882a593Smuzhiyun /* Channel message dispatch table */
1361*4882a593Smuzhiyun const struct vmbus_channel_message_table_entry
1362*4882a593Smuzhiyun channel_message_table[CHANNELMSG_COUNT] = {
1363*4882a593Smuzhiyun { CHANNELMSG_INVALID, 0, NULL, 0},
1364*4882a593Smuzhiyun { CHANNELMSG_OFFERCHANNEL, 0, vmbus_onoffer,
1365*4882a593Smuzhiyun sizeof(struct vmbus_channel_offer_channel)},
1366*4882a593Smuzhiyun { CHANNELMSG_RESCIND_CHANNELOFFER, 0, vmbus_onoffer_rescind,
1367*4882a593Smuzhiyun sizeof(struct vmbus_channel_rescind_offer) },
1368*4882a593Smuzhiyun { CHANNELMSG_REQUESTOFFERS, 0, NULL, 0},
1369*4882a593Smuzhiyun { CHANNELMSG_ALLOFFERS_DELIVERED, 1, vmbus_onoffers_delivered, 0},
1370*4882a593Smuzhiyun { CHANNELMSG_OPENCHANNEL, 0, NULL, 0},
1371*4882a593Smuzhiyun { CHANNELMSG_OPENCHANNEL_RESULT, 1, vmbus_onopen_result,
1372*4882a593Smuzhiyun sizeof(struct vmbus_channel_open_result)},
1373*4882a593Smuzhiyun { CHANNELMSG_CLOSECHANNEL, 0, NULL, 0},
1374*4882a593Smuzhiyun { CHANNELMSG_GPADL_HEADER, 0, NULL, 0},
1375*4882a593Smuzhiyun { CHANNELMSG_GPADL_BODY, 0, NULL, 0},
1376*4882a593Smuzhiyun { CHANNELMSG_GPADL_CREATED, 1, vmbus_ongpadl_created,
1377*4882a593Smuzhiyun sizeof(struct vmbus_channel_gpadl_created)},
1378*4882a593Smuzhiyun { CHANNELMSG_GPADL_TEARDOWN, 0, NULL, 0},
1379*4882a593Smuzhiyun { CHANNELMSG_GPADL_TORNDOWN, 1, vmbus_ongpadl_torndown,
1380*4882a593Smuzhiyun sizeof(struct vmbus_channel_gpadl_torndown) },
1381*4882a593Smuzhiyun { CHANNELMSG_RELID_RELEASED, 0, NULL, 0},
1382*4882a593Smuzhiyun { CHANNELMSG_INITIATE_CONTACT, 0, NULL, 0},
1383*4882a593Smuzhiyun { CHANNELMSG_VERSION_RESPONSE, 1, vmbus_onversion_response,
1384*4882a593Smuzhiyun sizeof(struct vmbus_channel_version_response)},
1385*4882a593Smuzhiyun { CHANNELMSG_UNLOAD, 0, NULL, 0},
1386*4882a593Smuzhiyun { CHANNELMSG_UNLOAD_RESPONSE, 1, vmbus_unload_response, 0},
1387*4882a593Smuzhiyun { CHANNELMSG_18, 0, NULL, 0},
1388*4882a593Smuzhiyun { CHANNELMSG_19, 0, NULL, 0},
1389*4882a593Smuzhiyun { CHANNELMSG_20, 0, NULL, 0},
1390*4882a593Smuzhiyun { CHANNELMSG_TL_CONNECT_REQUEST, 0, NULL, 0},
1391*4882a593Smuzhiyun { CHANNELMSG_MODIFYCHANNEL, 0, NULL, 0},
1392*4882a593Smuzhiyun { CHANNELMSG_TL_CONNECT_RESULT, 0, NULL, 0},
1393*4882a593Smuzhiyun };
1394*4882a593Smuzhiyun
1395*4882a593Smuzhiyun /*
1396*4882a593Smuzhiyun * vmbus_onmessage - Handler for channel protocol messages.
1397*4882a593Smuzhiyun *
1398*4882a593Smuzhiyun * This is invoked in the vmbus worker thread context.
1399*4882a593Smuzhiyun */
vmbus_onmessage(struct vmbus_channel_message_header * hdr)1400*4882a593Smuzhiyun void vmbus_onmessage(struct vmbus_channel_message_header *hdr)
1401*4882a593Smuzhiyun {
1402*4882a593Smuzhiyun trace_vmbus_on_message(hdr);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun /*
1405*4882a593Smuzhiyun * vmbus_on_msg_dpc() makes sure the hdr->msgtype here can not go
1406*4882a593Smuzhiyun * out of bound and the message_handler pointer can not be NULL.
1407*4882a593Smuzhiyun */
1408*4882a593Smuzhiyun channel_message_table[hdr->msgtype].message_handler(hdr);
1409*4882a593Smuzhiyun }
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun /*
1412*4882a593Smuzhiyun * vmbus_request_offers - Send a request to get all our pending offers.
1413*4882a593Smuzhiyun */
vmbus_request_offers(void)1414*4882a593Smuzhiyun int vmbus_request_offers(void)
1415*4882a593Smuzhiyun {
1416*4882a593Smuzhiyun struct vmbus_channel_message_header *msg;
1417*4882a593Smuzhiyun struct vmbus_channel_msginfo *msginfo;
1418*4882a593Smuzhiyun int ret;
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun msginfo = kmalloc(sizeof(*msginfo) +
1421*4882a593Smuzhiyun sizeof(struct vmbus_channel_message_header),
1422*4882a593Smuzhiyun GFP_KERNEL);
1423*4882a593Smuzhiyun if (!msginfo)
1424*4882a593Smuzhiyun return -ENOMEM;
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun msg = (struct vmbus_channel_message_header *)msginfo->msg;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun msg->msgtype = CHANNELMSG_REQUESTOFFERS;
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun ret = vmbus_post_msg(msg, sizeof(struct vmbus_channel_message_header),
1431*4882a593Smuzhiyun true);
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun trace_vmbus_request_offers(ret);
1434*4882a593Smuzhiyun
1435*4882a593Smuzhiyun if (ret != 0) {
1436*4882a593Smuzhiyun pr_err("Unable to request offers - %d\n", ret);
1437*4882a593Smuzhiyun
1438*4882a593Smuzhiyun goto cleanup;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun cleanup:
1442*4882a593Smuzhiyun kfree(msginfo);
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun return ret;
1445*4882a593Smuzhiyun }
1446*4882a593Smuzhiyun
invoke_sc_cb(struct vmbus_channel * primary_channel)1447*4882a593Smuzhiyun static void invoke_sc_cb(struct vmbus_channel *primary_channel)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun struct list_head *cur, *tmp;
1450*4882a593Smuzhiyun struct vmbus_channel *cur_channel;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun if (primary_channel->sc_creation_callback == NULL)
1453*4882a593Smuzhiyun return;
1454*4882a593Smuzhiyun
1455*4882a593Smuzhiyun list_for_each_safe(cur, tmp, &primary_channel->sc_list) {
1456*4882a593Smuzhiyun cur_channel = list_entry(cur, struct vmbus_channel, sc_list);
1457*4882a593Smuzhiyun
1458*4882a593Smuzhiyun primary_channel->sc_creation_callback(cur_channel);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
vmbus_set_sc_create_callback(struct vmbus_channel * primary_channel,void (* sc_cr_cb)(struct vmbus_channel * new_sc))1462*4882a593Smuzhiyun void vmbus_set_sc_create_callback(struct vmbus_channel *primary_channel,
1463*4882a593Smuzhiyun void (*sc_cr_cb)(struct vmbus_channel *new_sc))
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun primary_channel->sc_creation_callback = sc_cr_cb;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmbus_set_sc_create_callback);
1468*4882a593Smuzhiyun
vmbus_are_subchannels_present(struct vmbus_channel * primary)1469*4882a593Smuzhiyun bool vmbus_are_subchannels_present(struct vmbus_channel *primary)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun bool ret;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun ret = !list_empty(&primary->sc_list);
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun if (ret) {
1476*4882a593Smuzhiyun /*
1477*4882a593Smuzhiyun * Invoke the callback on sub-channel creation.
1478*4882a593Smuzhiyun * This will present a uniform interface to the
1479*4882a593Smuzhiyun * clients.
1480*4882a593Smuzhiyun */
1481*4882a593Smuzhiyun invoke_sc_cb(primary);
1482*4882a593Smuzhiyun }
1483*4882a593Smuzhiyun
1484*4882a593Smuzhiyun return ret;
1485*4882a593Smuzhiyun }
1486*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmbus_are_subchannels_present);
1487*4882a593Smuzhiyun
vmbus_set_chn_rescind_callback(struct vmbus_channel * channel,void (* chn_rescind_cb)(struct vmbus_channel *))1488*4882a593Smuzhiyun void vmbus_set_chn_rescind_callback(struct vmbus_channel *channel,
1489*4882a593Smuzhiyun void (*chn_rescind_cb)(struct vmbus_channel *))
1490*4882a593Smuzhiyun {
1491*4882a593Smuzhiyun channel->chn_rescind_callback = chn_rescind_cb;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vmbus_set_chn_rescind_callback);
1494