xref: /OK3568_Linux_fs/kernel/drivers/hv/hyperv_vmbus.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2011, Microsoft Corporation.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Authors:
7*4882a593Smuzhiyun  *   Haiyang Zhang <haiyangz@microsoft.com>
8*4882a593Smuzhiyun  *   Hank Janssen  <hjanssen@microsoft.com>
9*4882a593Smuzhiyun  *   K. Y. Srinivasan <kys@microsoft.com>
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #ifndef _HYPERV_VMBUS_H
13*4882a593Smuzhiyun #define _HYPERV_VMBUS_H
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/bitops.h>
17*4882a593Smuzhiyun #include <asm/sync_bitops.h>
18*4882a593Smuzhiyun #include <asm/hyperv-tlfs.h>
19*4882a593Smuzhiyun #include <linux/atomic.h>
20*4882a593Smuzhiyun #include <linux/hyperv.h>
21*4882a593Smuzhiyun #include <linux/interrupt.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "hv_trace.h"
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun  * Timeout for services such as KVP and fcopy.
27*4882a593Smuzhiyun  */
28*4882a593Smuzhiyun #define HV_UTIL_TIMEOUT 30
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun  * Timeout for guest-host handshake for services.
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #define HV_UTIL_NEGO_TIMEOUT 55
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* Definitions for the monitored notification facility */
37*4882a593Smuzhiyun union hv_monitor_trigger_group {
38*4882a593Smuzhiyun 	u64 as_uint64;
39*4882a593Smuzhiyun 	struct {
40*4882a593Smuzhiyun 		u32 pending;
41*4882a593Smuzhiyun 		u32 armed;
42*4882a593Smuzhiyun 	};
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun struct hv_monitor_parameter {
46*4882a593Smuzhiyun 	union hv_connection_id connectionid;
47*4882a593Smuzhiyun 	u16 flagnumber;
48*4882a593Smuzhiyun 	u16 rsvdz;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun union hv_monitor_trigger_state {
52*4882a593Smuzhiyun 	u32 asu32;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	struct {
55*4882a593Smuzhiyun 		u32 group_enable:4;
56*4882a593Smuzhiyun 		u32 rsvdz:28;
57*4882a593Smuzhiyun 	};
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /* struct hv_monitor_page Layout */
61*4882a593Smuzhiyun /* ------------------------------------------------------ */
62*4882a593Smuzhiyun /* | 0   | TriggerState (4 bytes) | Rsvd1 (4 bytes)     | */
63*4882a593Smuzhiyun /* | 8   | TriggerGroup[0]                              | */
64*4882a593Smuzhiyun /* | 10  | TriggerGroup[1]                              | */
65*4882a593Smuzhiyun /* | 18  | TriggerGroup[2]                              | */
66*4882a593Smuzhiyun /* | 20  | TriggerGroup[3]                              | */
67*4882a593Smuzhiyun /* | 28  | Rsvd2[0]                                     | */
68*4882a593Smuzhiyun /* | 30  | Rsvd2[1]                                     | */
69*4882a593Smuzhiyun /* | 38  | Rsvd2[2]                                     | */
70*4882a593Smuzhiyun /* | 40  | NextCheckTime[0][0]    | NextCheckTime[0][1] | */
71*4882a593Smuzhiyun /* | ...                                                | */
72*4882a593Smuzhiyun /* | 240 | Latency[0][0..3]                             | */
73*4882a593Smuzhiyun /* | 340 | Rsvz3[0]                                     | */
74*4882a593Smuzhiyun /* | 440 | Parameter[0][0]                              | */
75*4882a593Smuzhiyun /* | 448 | Parameter[0][1]                              | */
76*4882a593Smuzhiyun /* | ...                                                | */
77*4882a593Smuzhiyun /* | 840 | Rsvd4[0]                                     | */
78*4882a593Smuzhiyun /* ------------------------------------------------------ */
79*4882a593Smuzhiyun struct hv_monitor_page {
80*4882a593Smuzhiyun 	union hv_monitor_trigger_state trigger_state;
81*4882a593Smuzhiyun 	u32 rsvdz1;
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	union hv_monitor_trigger_group trigger_group[4];
84*4882a593Smuzhiyun 	u64 rsvdz2[3];
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	s32 next_checktime[4][32];
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	u16 latency[4][32];
89*4882a593Smuzhiyun 	u64 rsvdz3[32];
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	struct hv_monitor_parameter parameter[4][32];
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	u8 rsvdz4[1984];
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #define HV_HYPERCALL_PARAM_ALIGN	sizeof(u64)
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /* Definition of the hv_post_message hypercall input structure. */
99*4882a593Smuzhiyun struct hv_input_post_message {
100*4882a593Smuzhiyun 	union hv_connection_id connectionid;
101*4882a593Smuzhiyun 	u32 reserved;
102*4882a593Smuzhiyun 	u32 message_type;
103*4882a593Smuzhiyun 	u32 payload_size;
104*4882a593Smuzhiyun 	u64 payload[HV_MESSAGE_PAYLOAD_QWORD_COUNT];
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun enum {
109*4882a593Smuzhiyun 	VMBUS_MESSAGE_CONNECTION_ID	= 1,
110*4882a593Smuzhiyun 	VMBUS_MESSAGE_CONNECTION_ID_4	= 4,
111*4882a593Smuzhiyun 	VMBUS_MESSAGE_PORT_ID		= 1,
112*4882a593Smuzhiyun 	VMBUS_EVENT_CONNECTION_ID	= 2,
113*4882a593Smuzhiyun 	VMBUS_EVENT_PORT_ID		= 2,
114*4882a593Smuzhiyun 	VMBUS_MONITOR_CONNECTION_ID	= 3,
115*4882a593Smuzhiyun 	VMBUS_MONITOR_PORT_ID		= 3,
116*4882a593Smuzhiyun 	VMBUS_MESSAGE_SINT		= 2,
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * Per cpu state for channel handling
121*4882a593Smuzhiyun  */
122*4882a593Smuzhiyun struct hv_per_cpu_context {
123*4882a593Smuzhiyun 	void *synic_message_page;
124*4882a593Smuzhiyun 	void *synic_event_page;
125*4882a593Smuzhiyun 	/*
126*4882a593Smuzhiyun 	 * buffer to post messages to the host.
127*4882a593Smuzhiyun 	 */
128*4882a593Smuzhiyun 	void *post_msg_page;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/*
131*4882a593Smuzhiyun 	 * Starting with win8, we can take channel interrupts on any CPU;
132*4882a593Smuzhiyun 	 * we will manage the tasklet that handles events messages on a per CPU
133*4882a593Smuzhiyun 	 * basis.
134*4882a593Smuzhiyun 	 */
135*4882a593Smuzhiyun 	struct tasklet_struct msg_dpc;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct hv_context {
139*4882a593Smuzhiyun 	/* We only support running on top of Hyper-V
140*4882a593Smuzhiyun 	 * So at this point this really can only contain the Hyper-V ID
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	u64 guestid;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	struct hv_per_cpu_context __percpu *cpu_context;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/*
147*4882a593Smuzhiyun 	 * To manage allocations in a NUMA node.
148*4882a593Smuzhiyun 	 * Array indexed by numa node ID.
149*4882a593Smuzhiyun 	 */
150*4882a593Smuzhiyun 	struct cpumask *hv_numa_map;
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun extern struct hv_context hv_context;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /* Hv Interface */
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun extern int hv_init(void);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun extern int hv_post_message(union hv_connection_id connection_id,
160*4882a593Smuzhiyun 			 enum hv_message_type message_type,
161*4882a593Smuzhiyun 			 void *payload, size_t payload_size);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun extern int hv_synic_alloc(void);
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun extern void hv_synic_free(void);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun extern void hv_synic_enable_regs(unsigned int cpu);
168*4882a593Smuzhiyun extern int hv_synic_init(unsigned int cpu);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun extern void hv_synic_disable_regs(unsigned int cpu);
171*4882a593Smuzhiyun extern int hv_synic_cleanup(unsigned int cpu);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /* Interface */
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun void hv_ringbuffer_pre_init(struct vmbus_channel *channel);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info,
178*4882a593Smuzhiyun 		       struct page *pages, u32 pagecnt);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun int hv_ringbuffer_write(struct vmbus_channel *channel,
183*4882a593Smuzhiyun 			const struct kvec *kv_list, u32 kv_count);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun int hv_ringbuffer_read(struct vmbus_channel *channel,
186*4882a593Smuzhiyun 		       void *buffer, u32 buflen, u32 *buffer_actual_len,
187*4882a593Smuzhiyun 		       u64 *requestid, bool raw);
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /*
190*4882a593Smuzhiyun  * The Maximum number of channels (16348) is determined by the size of the
191*4882a593Smuzhiyun  * interrupt page, which is HV_HYP_PAGE_SIZE. 1/2 of HV_HYP_PAGE_SIZE is to
192*4882a593Smuzhiyun  * send endpoint interrupts, and the other is to receive endpoint interrupts.
193*4882a593Smuzhiyun  */
194*4882a593Smuzhiyun #define MAX_NUM_CHANNELS	((HV_HYP_PAGE_SIZE >> 1) << 3)
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /* The value here must be in multiple of 32 */
197*4882a593Smuzhiyun /* TODO: Need to make this configurable */
198*4882a593Smuzhiyun #define MAX_NUM_CHANNELS_SUPPORTED	256
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun #define MAX_CHANNEL_RELIDS					\
201*4882a593Smuzhiyun 	max(MAX_NUM_CHANNELS_SUPPORTED, HV_EVENT_FLAGS_COUNT)
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun enum vmbus_connect_state {
204*4882a593Smuzhiyun 	DISCONNECTED,
205*4882a593Smuzhiyun 	CONNECTING,
206*4882a593Smuzhiyun 	CONNECTED,
207*4882a593Smuzhiyun 	DISCONNECTING
208*4882a593Smuzhiyun };
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define MAX_SIZE_CHANNEL_MESSAGE	HV_MESSAGE_PAYLOAD_BYTE_COUNT
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /*
213*4882a593Smuzhiyun  * The CPU that Hyper-V will interrupt for VMBUS messages, such as
214*4882a593Smuzhiyun  * CHANNELMSG_OFFERCHANNEL and CHANNELMSG_RESCIND_CHANNELOFFER.
215*4882a593Smuzhiyun  */
216*4882a593Smuzhiyun #define VMBUS_CONNECT_CPU	0
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun struct vmbus_connection {
219*4882a593Smuzhiyun 	u32 msg_conn_id;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	atomic_t offer_in_progress;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	enum vmbus_connect_state conn_state;
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	atomic_t next_gpadl_handle;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	struct completion  unload_event;
228*4882a593Smuzhiyun 	/*
229*4882a593Smuzhiyun 	 * Represents channel interrupts. Each bit position represents a
230*4882a593Smuzhiyun 	 * channel.  When a channel sends an interrupt via VMBUS, it finds its
231*4882a593Smuzhiyun 	 * bit in the sendInterruptPage, set it and calls Hv to generate a port
232*4882a593Smuzhiyun 	 * event. The other end receives the port event and parse the
233*4882a593Smuzhiyun 	 * recvInterruptPage to see which bit is set
234*4882a593Smuzhiyun 	 */
235*4882a593Smuzhiyun 	void *int_page;
236*4882a593Smuzhiyun 	void *send_int_page;
237*4882a593Smuzhiyun 	void *recv_int_page;
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	/*
240*4882a593Smuzhiyun 	 * 2 pages - 1st page for parent->child notification and 2nd
241*4882a593Smuzhiyun 	 * is child->parent notification
242*4882a593Smuzhiyun 	 */
243*4882a593Smuzhiyun 	struct hv_monitor_page *monitor_pages[2];
244*4882a593Smuzhiyun 	struct list_head chn_msg_list;
245*4882a593Smuzhiyun 	spinlock_t channelmsg_lock;
246*4882a593Smuzhiyun 
247*4882a593Smuzhiyun 	/* List of channels */
248*4882a593Smuzhiyun 	struct list_head chn_list;
249*4882a593Smuzhiyun 	struct mutex channel_mutex;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* Array of channels */
252*4882a593Smuzhiyun 	struct vmbus_channel **channels;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/*
255*4882a593Smuzhiyun 	 * An offer message is handled first on the work_queue, and then
256*4882a593Smuzhiyun 	 * is further handled on handle_primary_chan_wq or
257*4882a593Smuzhiyun 	 * handle_sub_chan_wq.
258*4882a593Smuzhiyun 	 */
259*4882a593Smuzhiyun 	struct workqueue_struct *work_queue;
260*4882a593Smuzhiyun 	struct workqueue_struct *handle_primary_chan_wq;
261*4882a593Smuzhiyun 	struct workqueue_struct *handle_sub_chan_wq;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	/*
264*4882a593Smuzhiyun 	 * The number of sub-channels and hv_sock channels that should be
265*4882a593Smuzhiyun 	 * cleaned up upon suspend: sub-channels will be re-created upon
266*4882a593Smuzhiyun 	 * resume, and hv_sock channels should not survive suspend.
267*4882a593Smuzhiyun 	 */
268*4882a593Smuzhiyun 	atomic_t nr_chan_close_on_suspend;
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * vmbus_bus_suspend() waits for "nr_chan_close_on_suspend" to
271*4882a593Smuzhiyun 	 * drop to zero.
272*4882a593Smuzhiyun 	 */
273*4882a593Smuzhiyun 	struct completion ready_for_suspend_event;
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 	/*
276*4882a593Smuzhiyun 	 * The number of primary channels that should be "fixed up"
277*4882a593Smuzhiyun 	 * upon resume: these channels are re-offered upon resume, and some
278*4882a593Smuzhiyun 	 * fields of the channel offers (i.e. child_relid and connection_id)
279*4882a593Smuzhiyun 	 * can change, so the old offermsg must be fixed up, before the resume
280*4882a593Smuzhiyun 	 * callbacks of the VSC drivers start to further touch the channels.
281*4882a593Smuzhiyun 	 */
282*4882a593Smuzhiyun 	atomic_t nr_chan_fixup_on_resume;
283*4882a593Smuzhiyun 	/*
284*4882a593Smuzhiyun 	 * vmbus_bus_resume() waits for "nr_chan_fixup_on_resume" to
285*4882a593Smuzhiyun 	 * drop to zero.
286*4882a593Smuzhiyun 	 */
287*4882a593Smuzhiyun 	struct completion ready_for_resume_event;
288*4882a593Smuzhiyun };
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun struct vmbus_msginfo {
292*4882a593Smuzhiyun 	/* Bookkeeping stuff */
293*4882a593Smuzhiyun 	struct list_head msglist_entry;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	/* The message itself */
296*4882a593Smuzhiyun 	unsigned char msg[];
297*4882a593Smuzhiyun };
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun extern struct vmbus_connection vmbus_connection;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo, u32 version);
303*4882a593Smuzhiyun 
vmbus_send_interrupt(u32 relid)304*4882a593Smuzhiyun static inline void vmbus_send_interrupt(u32 relid)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	sync_set_bit(relid, vmbus_connection.send_int_page);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun enum vmbus_message_handler_type {
310*4882a593Smuzhiyun 	/* The related handler can sleep. */
311*4882a593Smuzhiyun 	VMHT_BLOCKING = 0,
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* The related handler must NOT sleep. */
314*4882a593Smuzhiyun 	VMHT_NON_BLOCKING = 1,
315*4882a593Smuzhiyun };
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun struct vmbus_channel_message_table_entry {
318*4882a593Smuzhiyun 	enum vmbus_channel_message_type message_type;
319*4882a593Smuzhiyun 	enum vmbus_message_handler_type handler_type;
320*4882a593Smuzhiyun 	void (*message_handler)(struct vmbus_channel_message_header *msg);
321*4882a593Smuzhiyun 	u32 min_payload_len;
322*4882a593Smuzhiyun };
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun extern const struct vmbus_channel_message_table_entry
325*4882a593Smuzhiyun 	channel_message_table[CHANNELMSG_COUNT];
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun /* General vmbus interface */
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun struct hv_device *vmbus_device_create(const guid_t *type,
331*4882a593Smuzhiyun 				      const guid_t *instance,
332*4882a593Smuzhiyun 				      struct vmbus_channel *channel);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun int vmbus_device_register(struct hv_device *child_device_obj);
335*4882a593Smuzhiyun void vmbus_device_unregister(struct hv_device *device_obj);
336*4882a593Smuzhiyun int vmbus_add_channel_kobj(struct hv_device *device_obj,
337*4882a593Smuzhiyun 			   struct vmbus_channel *channel);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun void vmbus_remove_channel_attr_group(struct vmbus_channel *channel);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun void vmbus_channel_map_relid(struct vmbus_channel *channel);
342*4882a593Smuzhiyun void vmbus_channel_unmap_relid(struct vmbus_channel *channel);
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun struct vmbus_channel *relid2channel(u32 relid);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun void vmbus_free_channels(void);
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun /* Connection interface */
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun int vmbus_connect(void);
351*4882a593Smuzhiyun void vmbus_disconnect(void);
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun int vmbus_post_msg(void *buffer, size_t buflen, bool can_sleep);
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun void vmbus_on_event(unsigned long data);
356*4882a593Smuzhiyun void vmbus_on_msg_dpc(unsigned long data);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun int hv_kvp_init(struct hv_util_service *srv);
359*4882a593Smuzhiyun void hv_kvp_deinit(void);
360*4882a593Smuzhiyun int hv_kvp_pre_suspend(void);
361*4882a593Smuzhiyun int hv_kvp_pre_resume(void);
362*4882a593Smuzhiyun void hv_kvp_onchannelcallback(void *context);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun int hv_vss_init(struct hv_util_service *srv);
365*4882a593Smuzhiyun void hv_vss_deinit(void);
366*4882a593Smuzhiyun int hv_vss_pre_suspend(void);
367*4882a593Smuzhiyun int hv_vss_pre_resume(void);
368*4882a593Smuzhiyun void hv_vss_onchannelcallback(void *context);
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun int hv_fcopy_init(struct hv_util_service *srv);
371*4882a593Smuzhiyun void hv_fcopy_deinit(void);
372*4882a593Smuzhiyun int hv_fcopy_pre_suspend(void);
373*4882a593Smuzhiyun int hv_fcopy_pre_resume(void);
374*4882a593Smuzhiyun void hv_fcopy_onchannelcallback(void *context);
375*4882a593Smuzhiyun void vmbus_initiate_unload(bool crash);
376*4882a593Smuzhiyun 
hv_poll_channel(struct vmbus_channel * channel,void (* cb)(void *))377*4882a593Smuzhiyun static inline void hv_poll_channel(struct vmbus_channel *channel,
378*4882a593Smuzhiyun 				   void (*cb)(void *))
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	if (!channel)
381*4882a593Smuzhiyun 		return;
382*4882a593Smuzhiyun 	cb(channel);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun enum hvutil_device_state {
386*4882a593Smuzhiyun 	HVUTIL_DEVICE_INIT = 0,  /* driver is loaded, waiting for userspace */
387*4882a593Smuzhiyun 	HVUTIL_READY,            /* userspace is registered */
388*4882a593Smuzhiyun 	HVUTIL_HOSTMSG_RECEIVED, /* message from the host was received */
389*4882a593Smuzhiyun 	HVUTIL_USERSPACE_REQ,    /* request to userspace was sent */
390*4882a593Smuzhiyun 	HVUTIL_USERSPACE_RECV,   /* reply from userspace was received */
391*4882a593Smuzhiyun 	HVUTIL_DEVICE_DYING,     /* driver unload is in progress */
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun enum delay {
395*4882a593Smuzhiyun 	INTERRUPT_DELAY = 0,
396*4882a593Smuzhiyun 	MESSAGE_DELAY   = 1,
397*4882a593Smuzhiyun };
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun extern const struct vmbus_device vmbus_devs[];
400*4882a593Smuzhiyun 
hv_is_perf_channel(struct vmbus_channel * channel)401*4882a593Smuzhiyun static inline bool hv_is_perf_channel(struct vmbus_channel *channel)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	return vmbus_devs[channel->device_id].perf_device;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
hv_is_alloced_cpu(unsigned int cpu)406*4882a593Smuzhiyun static inline bool hv_is_alloced_cpu(unsigned int cpu)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	struct vmbus_channel *channel, *sc;
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	lockdep_assert_held(&vmbus_connection.channel_mutex);
411*4882a593Smuzhiyun 	/*
412*4882a593Smuzhiyun 	 * List additions/deletions as well as updates of the target CPUs are
413*4882a593Smuzhiyun 	 * protected by channel_mutex.
414*4882a593Smuzhiyun 	 */
415*4882a593Smuzhiyun 	list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) {
416*4882a593Smuzhiyun 		if (!hv_is_perf_channel(channel))
417*4882a593Smuzhiyun 			continue;
418*4882a593Smuzhiyun 		if (channel->target_cpu == cpu)
419*4882a593Smuzhiyun 			return true;
420*4882a593Smuzhiyun 		list_for_each_entry(sc, &channel->sc_list, sc_list) {
421*4882a593Smuzhiyun 			if (sc->target_cpu == cpu)
422*4882a593Smuzhiyun 				return true;
423*4882a593Smuzhiyun 		}
424*4882a593Smuzhiyun 	}
425*4882a593Smuzhiyun 	return false;
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun 
hv_set_alloced_cpu(unsigned int cpu)428*4882a593Smuzhiyun static inline void hv_set_alloced_cpu(unsigned int cpu)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun 	cpumask_set_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun 
hv_clear_alloced_cpu(unsigned int cpu)433*4882a593Smuzhiyun static inline void hv_clear_alloced_cpu(unsigned int cpu)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	if (hv_is_alloced_cpu(cpu))
436*4882a593Smuzhiyun 		return;
437*4882a593Smuzhiyun 	cpumask_clear_cpu(cpu, &hv_context.hv_numa_map[cpu_to_node(cpu)]);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
hv_update_alloced_cpus(unsigned int old_cpu,unsigned int new_cpu)440*4882a593Smuzhiyun static inline void hv_update_alloced_cpus(unsigned int old_cpu,
441*4882a593Smuzhiyun 					  unsigned int new_cpu)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	hv_set_alloced_cpu(new_cpu);
444*4882a593Smuzhiyun 	hv_clear_alloced_cpu(old_cpu);
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun #ifdef CONFIG_HYPERV_TESTING
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun int hv_debug_add_dev_dir(struct hv_device *dev);
450*4882a593Smuzhiyun void hv_debug_rm_dev_dir(struct hv_device *dev);
451*4882a593Smuzhiyun void hv_debug_rm_all_dir(void);
452*4882a593Smuzhiyun int hv_debug_init(void);
453*4882a593Smuzhiyun void hv_debug_delay_test(struct vmbus_channel *channel, enum delay delay_type);
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun #else /* CONFIG_HYPERV_TESTING */
456*4882a593Smuzhiyun 
hv_debug_rm_dev_dir(struct hv_device * dev)457*4882a593Smuzhiyun static inline void hv_debug_rm_dev_dir(struct hv_device *dev) {};
hv_debug_rm_all_dir(void)458*4882a593Smuzhiyun static inline void hv_debug_rm_all_dir(void) {};
hv_debug_delay_test(struct vmbus_channel * channel,enum delay delay_type)459*4882a593Smuzhiyun static inline void hv_debug_delay_test(struct vmbus_channel *channel,
460*4882a593Smuzhiyun 				       enum delay delay_type) {};
hv_debug_init(void)461*4882a593Smuzhiyun static inline int hv_debug_init(void)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	return -1;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
hv_debug_add_dev_dir(struct hv_device * dev)466*4882a593Smuzhiyun static inline int hv_debug_add_dev_dir(struct hv_device *dev)
467*4882a593Smuzhiyun {
468*4882a593Smuzhiyun 	return -1;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun #endif /* CONFIG_HYPERV_TESTING */
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun #endif /* _HYPERV_VMBUS_H */
474