xref: /OK3568_Linux_fs/kernel/net/vmw_vsock/vmci_transport.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VMware vSockets Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/bitops.h>
10*4882a593Smuzhiyun #include <linux/cred.h>
11*4882a593Smuzhiyun #include <linux/init.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/kmod.h>
15*4882a593Smuzhiyun #include <linux/list.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/net.h>
19*4882a593Smuzhiyun #include <linux/poll.h>
20*4882a593Smuzhiyun #include <linux/skbuff.h>
21*4882a593Smuzhiyun #include <linux/smp.h>
22*4882a593Smuzhiyun #include <linux/socket.h>
23*4882a593Smuzhiyun #include <linux/stddef.h>
24*4882a593Smuzhiyun #include <linux/unistd.h>
25*4882a593Smuzhiyun #include <linux/wait.h>
26*4882a593Smuzhiyun #include <linux/workqueue.h>
27*4882a593Smuzhiyun #include <net/sock.h>
28*4882a593Smuzhiyun #include <net/af_vsock.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include "vmci_transport_notify.h"
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg);
33*4882a593Smuzhiyun static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg);
34*4882a593Smuzhiyun static void vmci_transport_peer_detach_cb(u32 sub_id,
35*4882a593Smuzhiyun 					  const struct vmci_event_data *ed,
36*4882a593Smuzhiyun 					  void *client_data);
37*4882a593Smuzhiyun static void vmci_transport_recv_pkt_work(struct work_struct *work);
38*4882a593Smuzhiyun static void vmci_transport_cleanup(struct work_struct *work);
39*4882a593Smuzhiyun static int vmci_transport_recv_listen(struct sock *sk,
40*4882a593Smuzhiyun 				      struct vmci_transport_packet *pkt);
41*4882a593Smuzhiyun static int vmci_transport_recv_connecting_server(
42*4882a593Smuzhiyun 					struct sock *sk,
43*4882a593Smuzhiyun 					struct sock *pending,
44*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt);
45*4882a593Smuzhiyun static int vmci_transport_recv_connecting_client(
46*4882a593Smuzhiyun 					struct sock *sk,
47*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt);
48*4882a593Smuzhiyun static int vmci_transport_recv_connecting_client_negotiate(
49*4882a593Smuzhiyun 					struct sock *sk,
50*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt);
51*4882a593Smuzhiyun static int vmci_transport_recv_connecting_client_invalid(
52*4882a593Smuzhiyun 					struct sock *sk,
53*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt);
54*4882a593Smuzhiyun static int vmci_transport_recv_connected(struct sock *sk,
55*4882a593Smuzhiyun 					 struct vmci_transport_packet *pkt);
56*4882a593Smuzhiyun static bool vmci_transport_old_proto_override(bool *old_pkt_proto);
57*4882a593Smuzhiyun static u16 vmci_transport_new_proto_supported_versions(void);
58*4882a593Smuzhiyun static bool vmci_transport_proto_to_notify_struct(struct sock *sk, u16 *proto,
59*4882a593Smuzhiyun 						  bool old_pkt_proto);
60*4882a593Smuzhiyun static bool vmci_check_transport(struct vsock_sock *vsk);
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun struct vmci_transport_recv_pkt_info {
63*4882a593Smuzhiyun 	struct work_struct work;
64*4882a593Smuzhiyun 	struct sock *sk;
65*4882a593Smuzhiyun 	struct vmci_transport_packet pkt;
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun static LIST_HEAD(vmci_transport_cleanup_list);
69*4882a593Smuzhiyun static DEFINE_SPINLOCK(vmci_transport_cleanup_lock);
70*4882a593Smuzhiyun static DECLARE_WORK(vmci_transport_cleanup_work, vmci_transport_cleanup);
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun static struct vmci_handle vmci_transport_stream_handle = { VMCI_INVALID_ID,
73*4882a593Smuzhiyun 							   VMCI_INVALID_ID };
74*4882a593Smuzhiyun static u32 vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun static int PROTOCOL_OVERRIDE = -1;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun /* Helper function to convert from a VMCI error code to a VSock error code. */
79*4882a593Smuzhiyun 
vmci_transport_error_to_vsock_error(s32 vmci_error)80*4882a593Smuzhiyun static s32 vmci_transport_error_to_vsock_error(s32 vmci_error)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	switch (vmci_error) {
83*4882a593Smuzhiyun 	case VMCI_ERROR_NO_MEM:
84*4882a593Smuzhiyun 		return -ENOMEM;
85*4882a593Smuzhiyun 	case VMCI_ERROR_DUPLICATE_ENTRY:
86*4882a593Smuzhiyun 	case VMCI_ERROR_ALREADY_EXISTS:
87*4882a593Smuzhiyun 		return -EADDRINUSE;
88*4882a593Smuzhiyun 	case VMCI_ERROR_NO_ACCESS:
89*4882a593Smuzhiyun 		return -EPERM;
90*4882a593Smuzhiyun 	case VMCI_ERROR_NO_RESOURCES:
91*4882a593Smuzhiyun 		return -ENOBUFS;
92*4882a593Smuzhiyun 	case VMCI_ERROR_INVALID_RESOURCE:
93*4882a593Smuzhiyun 		return -EHOSTUNREACH;
94*4882a593Smuzhiyun 	case VMCI_ERROR_INVALID_ARGS:
95*4882a593Smuzhiyun 	default:
96*4882a593Smuzhiyun 		break;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 	return -EINVAL;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
vmci_transport_peer_rid(u32 peer_cid)101*4882a593Smuzhiyun static u32 vmci_transport_peer_rid(u32 peer_cid)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	if (VMADDR_CID_HYPERVISOR == peer_cid)
104*4882a593Smuzhiyun 		return VMCI_TRANSPORT_HYPERVISOR_PACKET_RID;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return VMCI_TRANSPORT_PACKET_RID;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun static inline void
vmci_transport_packet_init(struct vmci_transport_packet * pkt,struct sockaddr_vm * src,struct sockaddr_vm * dst,u8 type,u64 size,u64 mode,struct vmci_transport_waiting_info * wait,u16 proto,struct vmci_handle handle)110*4882a593Smuzhiyun vmci_transport_packet_init(struct vmci_transport_packet *pkt,
111*4882a593Smuzhiyun 			   struct sockaddr_vm *src,
112*4882a593Smuzhiyun 			   struct sockaddr_vm *dst,
113*4882a593Smuzhiyun 			   u8 type,
114*4882a593Smuzhiyun 			   u64 size,
115*4882a593Smuzhiyun 			   u64 mode,
116*4882a593Smuzhiyun 			   struct vmci_transport_waiting_info *wait,
117*4882a593Smuzhiyun 			   u16 proto,
118*4882a593Smuzhiyun 			   struct vmci_handle handle)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	/* We register the stream control handler as an any cid handle so we
121*4882a593Smuzhiyun 	 * must always send from a source address of VMADDR_CID_ANY
122*4882a593Smuzhiyun 	 */
123*4882a593Smuzhiyun 	pkt->dg.src = vmci_make_handle(VMADDR_CID_ANY,
124*4882a593Smuzhiyun 				       VMCI_TRANSPORT_PACKET_RID);
125*4882a593Smuzhiyun 	pkt->dg.dst = vmci_make_handle(dst->svm_cid,
126*4882a593Smuzhiyun 				       vmci_transport_peer_rid(dst->svm_cid));
127*4882a593Smuzhiyun 	pkt->dg.payload_size = sizeof(*pkt) - sizeof(pkt->dg);
128*4882a593Smuzhiyun 	pkt->version = VMCI_TRANSPORT_PACKET_VERSION;
129*4882a593Smuzhiyun 	pkt->type = type;
130*4882a593Smuzhiyun 	pkt->src_port = src->svm_port;
131*4882a593Smuzhiyun 	pkt->dst_port = dst->svm_port;
132*4882a593Smuzhiyun 	memset(&pkt->proto, 0, sizeof(pkt->proto));
133*4882a593Smuzhiyun 	memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	switch (pkt->type) {
136*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
137*4882a593Smuzhiyun 		pkt->u.size = 0;
138*4882a593Smuzhiyun 		break;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_REQUEST:
141*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
142*4882a593Smuzhiyun 		pkt->u.size = size;
143*4882a593Smuzhiyun 		break;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
146*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
147*4882a593Smuzhiyun 		pkt->u.handle = handle;
148*4882a593Smuzhiyun 		break;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_WROTE:
151*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_READ:
152*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_RST:
153*4882a593Smuzhiyun 		pkt->u.size = 0;
154*4882a593Smuzhiyun 		break;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
157*4882a593Smuzhiyun 		pkt->u.mode = mode;
158*4882a593Smuzhiyun 		break;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ:
161*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE:
162*4882a593Smuzhiyun 		memcpy(&pkt->u.wait, wait, sizeof(pkt->u.wait));
163*4882a593Smuzhiyun 		break;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_REQUEST2:
166*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
167*4882a593Smuzhiyun 		pkt->u.size = size;
168*4882a593Smuzhiyun 		pkt->proto = proto;
169*4882a593Smuzhiyun 		break;
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun static inline void
vmci_transport_packet_get_addresses(struct vmci_transport_packet * pkt,struct sockaddr_vm * local,struct sockaddr_vm * remote)174*4882a593Smuzhiyun vmci_transport_packet_get_addresses(struct vmci_transport_packet *pkt,
175*4882a593Smuzhiyun 				    struct sockaddr_vm *local,
176*4882a593Smuzhiyun 				    struct sockaddr_vm *remote)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	vsock_addr_init(local, pkt->dg.dst.context, pkt->dst_port);
179*4882a593Smuzhiyun 	vsock_addr_init(remote, pkt->dg.src.context, pkt->src_port);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun static int
__vmci_transport_send_control_pkt(struct vmci_transport_packet * pkt,struct sockaddr_vm * src,struct sockaddr_vm * dst,enum vmci_transport_packet_type type,u64 size,u64 mode,struct vmci_transport_waiting_info * wait,u16 proto,struct vmci_handle handle,bool convert_error)183*4882a593Smuzhiyun __vmci_transport_send_control_pkt(struct vmci_transport_packet *pkt,
184*4882a593Smuzhiyun 				  struct sockaddr_vm *src,
185*4882a593Smuzhiyun 				  struct sockaddr_vm *dst,
186*4882a593Smuzhiyun 				  enum vmci_transport_packet_type type,
187*4882a593Smuzhiyun 				  u64 size,
188*4882a593Smuzhiyun 				  u64 mode,
189*4882a593Smuzhiyun 				  struct vmci_transport_waiting_info *wait,
190*4882a593Smuzhiyun 				  u16 proto,
191*4882a593Smuzhiyun 				  struct vmci_handle handle,
192*4882a593Smuzhiyun 				  bool convert_error)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	int err;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	vmci_transport_packet_init(pkt, src, dst, type, size, mode, wait,
197*4882a593Smuzhiyun 				   proto, handle);
198*4882a593Smuzhiyun 	err = vmci_datagram_send(&pkt->dg);
199*4882a593Smuzhiyun 	if (convert_error && (err < 0))
200*4882a593Smuzhiyun 		return vmci_transport_error_to_vsock_error(err);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return err;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun static int
vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet * pkt,enum vmci_transport_packet_type type,u64 size,u64 mode,struct vmci_transport_waiting_info * wait,struct vmci_handle handle)206*4882a593Smuzhiyun vmci_transport_reply_control_pkt_fast(struct vmci_transport_packet *pkt,
207*4882a593Smuzhiyun 				      enum vmci_transport_packet_type type,
208*4882a593Smuzhiyun 				      u64 size,
209*4882a593Smuzhiyun 				      u64 mode,
210*4882a593Smuzhiyun 				      struct vmci_transport_waiting_info *wait,
211*4882a593Smuzhiyun 				      struct vmci_handle handle)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	struct vmci_transport_packet reply;
214*4882a593Smuzhiyun 	struct sockaddr_vm src, dst;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST) {
217*4882a593Smuzhiyun 		return 0;
218*4882a593Smuzhiyun 	} else {
219*4882a593Smuzhiyun 		vmci_transport_packet_get_addresses(pkt, &src, &dst);
220*4882a593Smuzhiyun 		return __vmci_transport_send_control_pkt(&reply, &src, &dst,
221*4882a593Smuzhiyun 							 type,
222*4882a593Smuzhiyun 							 size, mode, wait,
223*4882a593Smuzhiyun 							 VSOCK_PROTO_INVALID,
224*4882a593Smuzhiyun 							 handle, true);
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun static int
vmci_transport_send_control_pkt_bh(struct sockaddr_vm * src,struct sockaddr_vm * dst,enum vmci_transport_packet_type type,u64 size,u64 mode,struct vmci_transport_waiting_info * wait,struct vmci_handle handle)229*4882a593Smuzhiyun vmci_transport_send_control_pkt_bh(struct sockaddr_vm *src,
230*4882a593Smuzhiyun 				   struct sockaddr_vm *dst,
231*4882a593Smuzhiyun 				   enum vmci_transport_packet_type type,
232*4882a593Smuzhiyun 				   u64 size,
233*4882a593Smuzhiyun 				   u64 mode,
234*4882a593Smuzhiyun 				   struct vmci_transport_waiting_info *wait,
235*4882a593Smuzhiyun 				   struct vmci_handle handle)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun 	/* Note that it is safe to use a single packet across all CPUs since
238*4882a593Smuzhiyun 	 * two tasklets of the same type are guaranteed to not ever run
239*4882a593Smuzhiyun 	 * simultaneously. If that ever changes, or VMCI stops using tasklets,
240*4882a593Smuzhiyun 	 * we can use per-cpu packets.
241*4882a593Smuzhiyun 	 */
242*4882a593Smuzhiyun 	static struct vmci_transport_packet pkt;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	return __vmci_transport_send_control_pkt(&pkt, src, dst, type,
245*4882a593Smuzhiyun 						 size, mode, wait,
246*4882a593Smuzhiyun 						 VSOCK_PROTO_INVALID, handle,
247*4882a593Smuzhiyun 						 false);
248*4882a593Smuzhiyun }
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun static int
vmci_transport_alloc_send_control_pkt(struct sockaddr_vm * src,struct sockaddr_vm * dst,enum vmci_transport_packet_type type,u64 size,u64 mode,struct vmci_transport_waiting_info * wait,u16 proto,struct vmci_handle handle)251*4882a593Smuzhiyun vmci_transport_alloc_send_control_pkt(struct sockaddr_vm *src,
252*4882a593Smuzhiyun 				      struct sockaddr_vm *dst,
253*4882a593Smuzhiyun 				      enum vmci_transport_packet_type type,
254*4882a593Smuzhiyun 				      u64 size,
255*4882a593Smuzhiyun 				      u64 mode,
256*4882a593Smuzhiyun 				      struct vmci_transport_waiting_info *wait,
257*4882a593Smuzhiyun 				      u16 proto,
258*4882a593Smuzhiyun 				      struct vmci_handle handle)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct vmci_transport_packet *pkt;
261*4882a593Smuzhiyun 	int err;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	pkt = kmalloc(sizeof(*pkt), GFP_KERNEL);
264*4882a593Smuzhiyun 	if (!pkt)
265*4882a593Smuzhiyun 		return -ENOMEM;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	err = __vmci_transport_send_control_pkt(pkt, src, dst, type, size,
268*4882a593Smuzhiyun 						mode, wait, proto, handle,
269*4882a593Smuzhiyun 						true);
270*4882a593Smuzhiyun 	kfree(pkt);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	return err;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun static int
vmci_transport_send_control_pkt(struct sock * sk,enum vmci_transport_packet_type type,u64 size,u64 mode,struct vmci_transport_waiting_info * wait,u16 proto,struct vmci_handle handle)276*4882a593Smuzhiyun vmci_transport_send_control_pkt(struct sock *sk,
277*4882a593Smuzhiyun 				enum vmci_transport_packet_type type,
278*4882a593Smuzhiyun 				u64 size,
279*4882a593Smuzhiyun 				u64 mode,
280*4882a593Smuzhiyun 				struct vmci_transport_waiting_info *wait,
281*4882a593Smuzhiyun 				u16 proto,
282*4882a593Smuzhiyun 				struct vmci_handle handle)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun 	struct vsock_sock *vsk;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if (!vsock_addr_bound(&vsk->local_addr))
289*4882a593Smuzhiyun 		return -EINVAL;
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	if (!vsock_addr_bound(&vsk->remote_addr))
292*4882a593Smuzhiyun 		return -EINVAL;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr,
295*4882a593Smuzhiyun 						     &vsk->remote_addr,
296*4882a593Smuzhiyun 						     type, size, mode,
297*4882a593Smuzhiyun 						     wait, proto, handle);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
vmci_transport_send_reset_bh(struct sockaddr_vm * dst,struct sockaddr_vm * src,struct vmci_transport_packet * pkt)300*4882a593Smuzhiyun static int vmci_transport_send_reset_bh(struct sockaddr_vm *dst,
301*4882a593Smuzhiyun 					struct sockaddr_vm *src,
302*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
305*4882a593Smuzhiyun 		return 0;
306*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt_bh(
307*4882a593Smuzhiyun 					dst, src,
308*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_RST, 0,
309*4882a593Smuzhiyun 					0, NULL, VMCI_INVALID_HANDLE);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
vmci_transport_send_reset(struct sock * sk,struct vmci_transport_packet * pkt)312*4882a593Smuzhiyun static int vmci_transport_send_reset(struct sock *sk,
313*4882a593Smuzhiyun 				     struct vmci_transport_packet *pkt)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct sockaddr_vm *dst_ptr;
316*4882a593Smuzhiyun 	struct sockaddr_vm dst;
317*4882a593Smuzhiyun 	struct vsock_sock *vsk;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST)
320*4882a593Smuzhiyun 		return 0;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	if (!vsock_addr_bound(&vsk->local_addr))
325*4882a593Smuzhiyun 		return -EINVAL;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	if (vsock_addr_bound(&vsk->remote_addr)) {
328*4882a593Smuzhiyun 		dst_ptr = &vsk->remote_addr;
329*4882a593Smuzhiyun 	} else {
330*4882a593Smuzhiyun 		vsock_addr_init(&dst, pkt->dg.src.context,
331*4882a593Smuzhiyun 				pkt->src_port);
332*4882a593Smuzhiyun 		dst_ptr = &dst;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 	return vmci_transport_alloc_send_control_pkt(&vsk->local_addr, dst_ptr,
335*4882a593Smuzhiyun 					     VMCI_TRANSPORT_PACKET_TYPE_RST,
336*4882a593Smuzhiyun 					     0, 0, NULL, VSOCK_PROTO_INVALID,
337*4882a593Smuzhiyun 					     VMCI_INVALID_HANDLE);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
vmci_transport_send_negotiate(struct sock * sk,size_t size)340*4882a593Smuzhiyun static int vmci_transport_send_negotiate(struct sock *sk, size_t size)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
343*4882a593Smuzhiyun 					sk,
344*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE,
345*4882a593Smuzhiyun 					size, 0, NULL,
346*4882a593Smuzhiyun 					VSOCK_PROTO_INVALID,
347*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
vmci_transport_send_negotiate2(struct sock * sk,size_t size,u16 version)350*4882a593Smuzhiyun static int vmci_transport_send_negotiate2(struct sock *sk, size_t size,
351*4882a593Smuzhiyun 					  u16 version)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
354*4882a593Smuzhiyun 					sk,
355*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2,
356*4882a593Smuzhiyun 					size, 0, NULL, version,
357*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
vmci_transport_send_qp_offer(struct sock * sk,struct vmci_handle handle)360*4882a593Smuzhiyun static int vmci_transport_send_qp_offer(struct sock *sk,
361*4882a593Smuzhiyun 					struct vmci_handle handle)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
364*4882a593Smuzhiyun 					sk, VMCI_TRANSPORT_PACKET_TYPE_OFFER, 0,
365*4882a593Smuzhiyun 					0, NULL,
366*4882a593Smuzhiyun 					VSOCK_PROTO_INVALID, handle);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
vmci_transport_send_attach(struct sock * sk,struct vmci_handle handle)369*4882a593Smuzhiyun static int vmci_transport_send_attach(struct sock *sk,
370*4882a593Smuzhiyun 				      struct vmci_handle handle)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
373*4882a593Smuzhiyun 					sk, VMCI_TRANSPORT_PACKET_TYPE_ATTACH,
374*4882a593Smuzhiyun 					0, 0, NULL, VSOCK_PROTO_INVALID,
375*4882a593Smuzhiyun 					handle);
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun 
vmci_transport_reply_reset(struct vmci_transport_packet * pkt)378*4882a593Smuzhiyun static int vmci_transport_reply_reset(struct vmci_transport_packet *pkt)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	return vmci_transport_reply_control_pkt_fast(
381*4882a593Smuzhiyun 						pkt,
382*4882a593Smuzhiyun 						VMCI_TRANSPORT_PACKET_TYPE_RST,
383*4882a593Smuzhiyun 						0, 0, NULL,
384*4882a593Smuzhiyun 						VMCI_INVALID_HANDLE);
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun 
vmci_transport_send_invalid_bh(struct sockaddr_vm * dst,struct sockaddr_vm * src)387*4882a593Smuzhiyun static int vmci_transport_send_invalid_bh(struct sockaddr_vm *dst,
388*4882a593Smuzhiyun 					  struct sockaddr_vm *src)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt_bh(
391*4882a593Smuzhiyun 					dst, src,
392*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_INVALID,
393*4882a593Smuzhiyun 					0, 0, NULL, VMCI_INVALID_HANDLE);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun 
vmci_transport_send_wrote_bh(struct sockaddr_vm * dst,struct sockaddr_vm * src)396*4882a593Smuzhiyun int vmci_transport_send_wrote_bh(struct sockaddr_vm *dst,
397*4882a593Smuzhiyun 				 struct sockaddr_vm *src)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt_bh(
400*4882a593Smuzhiyun 					dst, src,
401*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
402*4882a593Smuzhiyun 					0, NULL, VMCI_INVALID_HANDLE);
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun 
vmci_transport_send_read_bh(struct sockaddr_vm * dst,struct sockaddr_vm * src)405*4882a593Smuzhiyun int vmci_transport_send_read_bh(struct sockaddr_vm *dst,
406*4882a593Smuzhiyun 				struct sockaddr_vm *src)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt_bh(
409*4882a593Smuzhiyun 					dst, src,
410*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
411*4882a593Smuzhiyun 					0, NULL, VMCI_INVALID_HANDLE);
412*4882a593Smuzhiyun }
413*4882a593Smuzhiyun 
vmci_transport_send_wrote(struct sock * sk)414*4882a593Smuzhiyun int vmci_transport_send_wrote(struct sock *sk)
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
417*4882a593Smuzhiyun 					sk, VMCI_TRANSPORT_PACKET_TYPE_WROTE, 0,
418*4882a593Smuzhiyun 					0, NULL, VSOCK_PROTO_INVALID,
419*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
vmci_transport_send_read(struct sock * sk)422*4882a593Smuzhiyun int vmci_transport_send_read(struct sock *sk)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
425*4882a593Smuzhiyun 					sk, VMCI_TRANSPORT_PACKET_TYPE_READ, 0,
426*4882a593Smuzhiyun 					0, NULL, VSOCK_PROTO_INVALID,
427*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
vmci_transport_send_waiting_write(struct sock * sk,struct vmci_transport_waiting_info * wait)430*4882a593Smuzhiyun int vmci_transport_send_waiting_write(struct sock *sk,
431*4882a593Smuzhiyun 				      struct vmci_transport_waiting_info *wait)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
434*4882a593Smuzhiyun 				sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_WRITE,
435*4882a593Smuzhiyun 				0, 0, wait, VSOCK_PROTO_INVALID,
436*4882a593Smuzhiyun 				VMCI_INVALID_HANDLE);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun 
vmci_transport_send_waiting_read(struct sock * sk,struct vmci_transport_waiting_info * wait)439*4882a593Smuzhiyun int vmci_transport_send_waiting_read(struct sock *sk,
440*4882a593Smuzhiyun 				     struct vmci_transport_waiting_info *wait)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
443*4882a593Smuzhiyun 				sk, VMCI_TRANSPORT_PACKET_TYPE_WAITING_READ,
444*4882a593Smuzhiyun 				0, 0, wait, VSOCK_PROTO_INVALID,
445*4882a593Smuzhiyun 				VMCI_INVALID_HANDLE);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
vmci_transport_shutdown(struct vsock_sock * vsk,int mode)448*4882a593Smuzhiyun static int vmci_transport_shutdown(struct vsock_sock *vsk, int mode)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
451*4882a593Smuzhiyun 					&vsk->sk,
452*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN,
453*4882a593Smuzhiyun 					0, mode, NULL,
454*4882a593Smuzhiyun 					VSOCK_PROTO_INVALID,
455*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
vmci_transport_send_conn_request(struct sock * sk,size_t size)458*4882a593Smuzhiyun static int vmci_transport_send_conn_request(struct sock *sk, size_t size)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(sk,
461*4882a593Smuzhiyun 					VMCI_TRANSPORT_PACKET_TYPE_REQUEST,
462*4882a593Smuzhiyun 					size, 0, NULL,
463*4882a593Smuzhiyun 					VSOCK_PROTO_INVALID,
464*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun 
vmci_transport_send_conn_request2(struct sock * sk,size_t size,u16 version)467*4882a593Smuzhiyun static int vmci_transport_send_conn_request2(struct sock *sk, size_t size,
468*4882a593Smuzhiyun 					     u16 version)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	return vmci_transport_send_control_pkt(
471*4882a593Smuzhiyun 					sk, VMCI_TRANSPORT_PACKET_TYPE_REQUEST2,
472*4882a593Smuzhiyun 					size, 0, NULL, version,
473*4882a593Smuzhiyun 					VMCI_INVALID_HANDLE);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
vmci_transport_get_pending(struct sock * listener,struct vmci_transport_packet * pkt)476*4882a593Smuzhiyun static struct sock *vmci_transport_get_pending(
477*4882a593Smuzhiyun 					struct sock *listener,
478*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt)
479*4882a593Smuzhiyun {
480*4882a593Smuzhiyun 	struct vsock_sock *vlistener;
481*4882a593Smuzhiyun 	struct vsock_sock *vpending;
482*4882a593Smuzhiyun 	struct sock *pending;
483*4882a593Smuzhiyun 	struct sockaddr_vm src;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	vlistener = vsock_sk(listener);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	list_for_each_entry(vpending, &vlistener->pending_links,
490*4882a593Smuzhiyun 			    pending_links) {
491*4882a593Smuzhiyun 		if (vsock_addr_equals_addr(&src, &vpending->remote_addr) &&
492*4882a593Smuzhiyun 		    pkt->dst_port == vpending->local_addr.svm_port) {
493*4882a593Smuzhiyun 			pending = sk_vsock(vpending);
494*4882a593Smuzhiyun 			sock_hold(pending);
495*4882a593Smuzhiyun 			goto found;
496*4882a593Smuzhiyun 		}
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	pending = NULL;
500*4882a593Smuzhiyun found:
501*4882a593Smuzhiyun 	return pending;
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
vmci_transport_release_pending(struct sock * pending)505*4882a593Smuzhiyun static void vmci_transport_release_pending(struct sock *pending)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	sock_put(pending);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /* We allow two kinds of sockets to communicate with a restricted VM: 1)
511*4882a593Smuzhiyun  * trusted sockets 2) sockets from applications running as the same user as the
512*4882a593Smuzhiyun  * VM (this is only true for the host side and only when using hosted products)
513*4882a593Smuzhiyun  */
514*4882a593Smuzhiyun 
vmci_transport_is_trusted(struct vsock_sock * vsock,u32 peer_cid)515*4882a593Smuzhiyun static bool vmci_transport_is_trusted(struct vsock_sock *vsock, u32 peer_cid)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	return vsock->trusted ||
518*4882a593Smuzhiyun 	       vmci_is_context_owner(peer_cid, vsock->owner->uid);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun /* We allow sending datagrams to and receiving datagrams from a restricted VM
522*4882a593Smuzhiyun  * only if it is trusted as described in vmci_transport_is_trusted.
523*4882a593Smuzhiyun  */
524*4882a593Smuzhiyun 
vmci_transport_allow_dgram(struct vsock_sock * vsock,u32 peer_cid)525*4882a593Smuzhiyun static bool vmci_transport_allow_dgram(struct vsock_sock *vsock, u32 peer_cid)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	if (VMADDR_CID_HYPERVISOR == peer_cid)
528*4882a593Smuzhiyun 		return true;
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	if (vsock->cached_peer != peer_cid) {
531*4882a593Smuzhiyun 		vsock->cached_peer = peer_cid;
532*4882a593Smuzhiyun 		if (!vmci_transport_is_trusted(vsock, peer_cid) &&
533*4882a593Smuzhiyun 		    (vmci_context_get_priv_flags(peer_cid) &
534*4882a593Smuzhiyun 		     VMCI_PRIVILEGE_FLAG_RESTRICTED)) {
535*4882a593Smuzhiyun 			vsock->cached_peer_allow_dgram = false;
536*4882a593Smuzhiyun 		} else {
537*4882a593Smuzhiyun 			vsock->cached_peer_allow_dgram = true;
538*4882a593Smuzhiyun 		}
539*4882a593Smuzhiyun 	}
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return vsock->cached_peer_allow_dgram;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun static int
vmci_transport_queue_pair_alloc(struct vmci_qp ** qpair,struct vmci_handle * handle,u64 produce_size,u64 consume_size,u32 peer,u32 flags,bool trusted)545*4882a593Smuzhiyun vmci_transport_queue_pair_alloc(struct vmci_qp **qpair,
546*4882a593Smuzhiyun 				struct vmci_handle *handle,
547*4882a593Smuzhiyun 				u64 produce_size,
548*4882a593Smuzhiyun 				u64 consume_size,
549*4882a593Smuzhiyun 				u32 peer, u32 flags, bool trusted)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	int err = 0;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (trusted) {
554*4882a593Smuzhiyun 		/* Try to allocate our queue pair as trusted. This will only
555*4882a593Smuzhiyun 		 * work if vsock is running in the host.
556*4882a593Smuzhiyun 		 */
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		err = vmci_qpair_alloc(qpair, handle, produce_size,
559*4882a593Smuzhiyun 				       consume_size,
560*4882a593Smuzhiyun 				       peer, flags,
561*4882a593Smuzhiyun 				       VMCI_PRIVILEGE_FLAG_TRUSTED);
562*4882a593Smuzhiyun 		if (err != VMCI_ERROR_NO_ACCESS)
563*4882a593Smuzhiyun 			goto out;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	}
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	err = vmci_qpair_alloc(qpair, handle, produce_size, consume_size,
568*4882a593Smuzhiyun 			       peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
569*4882a593Smuzhiyun out:
570*4882a593Smuzhiyun 	if (err < 0) {
571*4882a593Smuzhiyun 		pr_err_once("Could not attach to queue pair with %d\n", err);
572*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	return err;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun static int
vmci_transport_datagram_create_hnd(u32 resource_id,u32 flags,vmci_datagram_recv_cb recv_cb,void * client_data,struct vmci_handle * out_handle)579*4882a593Smuzhiyun vmci_transport_datagram_create_hnd(u32 resource_id,
580*4882a593Smuzhiyun 				   u32 flags,
581*4882a593Smuzhiyun 				   vmci_datagram_recv_cb recv_cb,
582*4882a593Smuzhiyun 				   void *client_data,
583*4882a593Smuzhiyun 				   struct vmci_handle *out_handle)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	int err = 0;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* Try to allocate our datagram handler as trusted. This will only work
588*4882a593Smuzhiyun 	 * if vsock is running in the host.
589*4882a593Smuzhiyun 	 */
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	err = vmci_datagram_create_handle_priv(resource_id, flags,
592*4882a593Smuzhiyun 					       VMCI_PRIVILEGE_FLAG_TRUSTED,
593*4882a593Smuzhiyun 					       recv_cb,
594*4882a593Smuzhiyun 					       client_data, out_handle);
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if (err == VMCI_ERROR_NO_ACCESS)
597*4882a593Smuzhiyun 		err = vmci_datagram_create_handle(resource_id, flags,
598*4882a593Smuzhiyun 						  recv_cb, client_data,
599*4882a593Smuzhiyun 						  out_handle);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	return err;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun /* This is invoked as part of a tasklet that's scheduled when the VMCI
605*4882a593Smuzhiyun  * interrupt fires.  This is run in bottom-half context and if it ever needs to
606*4882a593Smuzhiyun  * sleep it should defer that work to a work queue.
607*4882a593Smuzhiyun  */
608*4882a593Smuzhiyun 
vmci_transport_recv_dgram_cb(void * data,struct vmci_datagram * dg)609*4882a593Smuzhiyun static int vmci_transport_recv_dgram_cb(void *data, struct vmci_datagram *dg)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct sock *sk;
612*4882a593Smuzhiyun 	size_t size;
613*4882a593Smuzhiyun 	struct sk_buff *skb;
614*4882a593Smuzhiyun 	struct vsock_sock *vsk;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	sk = (struct sock *)data;
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 	/* This handler is privileged when this module is running on the host.
619*4882a593Smuzhiyun 	 * We will get datagrams from all endpoints (even VMs that are in a
620*4882a593Smuzhiyun 	 * restricted context). If we get one from a restricted context then
621*4882a593Smuzhiyun 	 * the destination socket must be trusted.
622*4882a593Smuzhiyun 	 *
623*4882a593Smuzhiyun 	 * NOTE: We access the socket struct without holding the lock here.
624*4882a593Smuzhiyun 	 * This is ok because the field we are interested is never modified
625*4882a593Smuzhiyun 	 * outside of the create and destruct socket functions.
626*4882a593Smuzhiyun 	 */
627*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
628*4882a593Smuzhiyun 	if (!vmci_transport_allow_dgram(vsk, dg->src.context))
629*4882a593Smuzhiyun 		return VMCI_ERROR_NO_ACCESS;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	size = VMCI_DG_SIZE(dg);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	/* Attach the packet to the socket's receive queue as an sk_buff. */
634*4882a593Smuzhiyun 	skb = alloc_skb(size, GFP_ATOMIC);
635*4882a593Smuzhiyun 	if (!skb)
636*4882a593Smuzhiyun 		return VMCI_ERROR_NO_MEM;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	/* sk_receive_skb() will do a sock_put(), so hold here. */
639*4882a593Smuzhiyun 	sock_hold(sk);
640*4882a593Smuzhiyun 	skb_put(skb, size);
641*4882a593Smuzhiyun 	memcpy(skb->data, dg, size);
642*4882a593Smuzhiyun 	sk_receive_skb(sk, skb, 0);
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	return VMCI_SUCCESS;
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
vmci_transport_stream_allow(u32 cid,u32 port)647*4882a593Smuzhiyun static bool vmci_transport_stream_allow(u32 cid, u32 port)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	static const u32 non_socket_contexts[] = {
650*4882a593Smuzhiyun 		VMADDR_CID_LOCAL,
651*4882a593Smuzhiyun 	};
652*4882a593Smuzhiyun 	int i;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(cid) != sizeof(*non_socket_contexts));
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(non_socket_contexts); i++) {
657*4882a593Smuzhiyun 		if (cid == non_socket_contexts[i])
658*4882a593Smuzhiyun 			return false;
659*4882a593Smuzhiyun 	}
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	return true;
662*4882a593Smuzhiyun }
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun /* This is invoked as part of a tasklet that's scheduled when the VMCI
665*4882a593Smuzhiyun  * interrupt fires.  This is run in bottom-half context but it defers most of
666*4882a593Smuzhiyun  * its work to the packet handling work queue.
667*4882a593Smuzhiyun  */
668*4882a593Smuzhiyun 
vmci_transport_recv_stream_cb(void * data,struct vmci_datagram * dg)669*4882a593Smuzhiyun static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)
670*4882a593Smuzhiyun {
671*4882a593Smuzhiyun 	struct sock *sk;
672*4882a593Smuzhiyun 	struct sockaddr_vm dst;
673*4882a593Smuzhiyun 	struct sockaddr_vm src;
674*4882a593Smuzhiyun 	struct vmci_transport_packet *pkt;
675*4882a593Smuzhiyun 	struct vsock_sock *vsk;
676*4882a593Smuzhiyun 	bool bh_process_pkt;
677*4882a593Smuzhiyun 	int err;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	sk = NULL;
680*4882a593Smuzhiyun 	err = VMCI_SUCCESS;
681*4882a593Smuzhiyun 	bh_process_pkt = false;
682*4882a593Smuzhiyun 
683*4882a593Smuzhiyun 	/* Ignore incoming packets from contexts without sockets, or resources
684*4882a593Smuzhiyun 	 * that aren't vsock implementations.
685*4882a593Smuzhiyun 	 */
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	if (!vmci_transport_stream_allow(dg->src.context, -1)
688*4882a593Smuzhiyun 	    || vmci_transport_peer_rid(dg->src.context) != dg->src.resource)
689*4882a593Smuzhiyun 		return VMCI_ERROR_NO_ACCESS;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	if (VMCI_DG_SIZE(dg) < sizeof(*pkt))
692*4882a593Smuzhiyun 		/* Drop datagrams that do not contain full VSock packets. */
693*4882a593Smuzhiyun 		return VMCI_ERROR_INVALID_ARGS;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	pkt = (struct vmci_transport_packet *)dg;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	/* Find the socket that should handle this packet.  First we look for a
698*4882a593Smuzhiyun 	 * connected socket and if there is none we look for a socket bound to
699*4882a593Smuzhiyun 	 * the destintation address.
700*4882a593Smuzhiyun 	 */
701*4882a593Smuzhiyun 	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);
702*4882a593Smuzhiyun 	vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port);
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	sk = vsock_find_connected_socket(&src, &dst);
705*4882a593Smuzhiyun 	if (!sk) {
706*4882a593Smuzhiyun 		sk = vsock_find_bound_socket(&dst);
707*4882a593Smuzhiyun 		if (!sk) {
708*4882a593Smuzhiyun 			/* We could not find a socket for this specified
709*4882a593Smuzhiyun 			 * address.  If this packet is a RST, we just drop it.
710*4882a593Smuzhiyun 			 * If it is another packet, we send a RST.  Note that
711*4882a593Smuzhiyun 			 * we do not send a RST reply to RSTs so that we do not
712*4882a593Smuzhiyun 			 * continually send RSTs between two endpoints.
713*4882a593Smuzhiyun 			 *
714*4882a593Smuzhiyun 			 * Note that since this is a reply, dst is src and src
715*4882a593Smuzhiyun 			 * is dst.
716*4882a593Smuzhiyun 			 */
717*4882a593Smuzhiyun 			if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
718*4882a593Smuzhiyun 				pr_err("unable to send reset\n");
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 			err = VMCI_ERROR_NOT_FOUND;
721*4882a593Smuzhiyun 			goto out;
722*4882a593Smuzhiyun 		}
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	/* If the received packet type is beyond all types known to this
726*4882a593Smuzhiyun 	 * implementation, reply with an invalid message.  Hopefully this will
727*4882a593Smuzhiyun 	 * help when implementing backwards compatibility in the future.
728*4882a593Smuzhiyun 	 */
729*4882a593Smuzhiyun 	if (pkt->type >= VMCI_TRANSPORT_PACKET_TYPE_MAX) {
730*4882a593Smuzhiyun 		vmci_transport_send_invalid_bh(&dst, &src);
731*4882a593Smuzhiyun 		err = VMCI_ERROR_INVALID_ARGS;
732*4882a593Smuzhiyun 		goto out;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	/* This handler is privileged when this module is running on the host.
736*4882a593Smuzhiyun 	 * We will get datagram connect requests from all endpoints (even VMs
737*4882a593Smuzhiyun 	 * that are in a restricted context). If we get one from a restricted
738*4882a593Smuzhiyun 	 * context then the destination socket must be trusted.
739*4882a593Smuzhiyun 	 *
740*4882a593Smuzhiyun 	 * NOTE: We access the socket struct without holding the lock here.
741*4882a593Smuzhiyun 	 * This is ok because the field we are interested is never modified
742*4882a593Smuzhiyun 	 * outside of the create and destruct socket functions.
743*4882a593Smuzhiyun 	 */
744*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
745*4882a593Smuzhiyun 	if (!vmci_transport_allow_dgram(vsk, pkt->dg.src.context)) {
746*4882a593Smuzhiyun 		err = VMCI_ERROR_NO_ACCESS;
747*4882a593Smuzhiyun 		goto out;
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/* We do most everything in a work queue, but let's fast path the
751*4882a593Smuzhiyun 	 * notification of reads and writes to help data transfer performance.
752*4882a593Smuzhiyun 	 * We can only do this if there is no process context code executing
753*4882a593Smuzhiyun 	 * for this socket since that may change the state.
754*4882a593Smuzhiyun 	 */
755*4882a593Smuzhiyun 	bh_lock_sock(sk);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (!sock_owned_by_user(sk)) {
758*4882a593Smuzhiyun 		/* The local context ID may be out of date, update it. */
759*4882a593Smuzhiyun 		vsk->local_addr.svm_cid = dst.svm_cid;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		if (sk->sk_state == TCP_ESTABLISHED)
762*4882a593Smuzhiyun 			vmci_trans(vsk)->notify_ops->handle_notify_pkt(
763*4882a593Smuzhiyun 					sk, pkt, true, &dst, &src,
764*4882a593Smuzhiyun 					&bh_process_pkt);
765*4882a593Smuzhiyun 	}
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	bh_unlock_sock(sk);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	if (!bh_process_pkt) {
770*4882a593Smuzhiyun 		struct vmci_transport_recv_pkt_info *recv_pkt_info;
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun 		recv_pkt_info = kmalloc(sizeof(*recv_pkt_info), GFP_ATOMIC);
773*4882a593Smuzhiyun 		if (!recv_pkt_info) {
774*4882a593Smuzhiyun 			if (vmci_transport_send_reset_bh(&dst, &src, pkt) < 0)
775*4882a593Smuzhiyun 				pr_err("unable to send reset\n");
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 			err = VMCI_ERROR_NO_MEM;
778*4882a593Smuzhiyun 			goto out;
779*4882a593Smuzhiyun 		}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 		recv_pkt_info->sk = sk;
782*4882a593Smuzhiyun 		memcpy(&recv_pkt_info->pkt, pkt, sizeof(recv_pkt_info->pkt));
783*4882a593Smuzhiyun 		INIT_WORK(&recv_pkt_info->work, vmci_transport_recv_pkt_work);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		schedule_work(&recv_pkt_info->work);
786*4882a593Smuzhiyun 		/* Clear sk so that the reference count incremented by one of
787*4882a593Smuzhiyun 		 * the Find functions above is not decremented below.  We need
788*4882a593Smuzhiyun 		 * that reference count for the packet handler we've scheduled
789*4882a593Smuzhiyun 		 * to run.
790*4882a593Smuzhiyun 		 */
791*4882a593Smuzhiyun 		sk = NULL;
792*4882a593Smuzhiyun 	}
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun out:
795*4882a593Smuzhiyun 	if (sk)
796*4882a593Smuzhiyun 		sock_put(sk);
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	return err;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun 
vmci_transport_handle_detach(struct sock * sk)801*4882a593Smuzhiyun static void vmci_transport_handle_detach(struct sock *sk)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	struct vsock_sock *vsk;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
806*4882a593Smuzhiyun 	if (!vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)) {
807*4882a593Smuzhiyun 		sock_set_flag(sk, SOCK_DONE);
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 		/* On a detach the peer will not be sending or receiving
810*4882a593Smuzhiyun 		 * anymore.
811*4882a593Smuzhiyun 		 */
812*4882a593Smuzhiyun 		vsk->peer_shutdown = SHUTDOWN_MASK;
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 		/* We should not be sending anymore since the peer won't be
815*4882a593Smuzhiyun 		 * there to receive, but we can still receive if there is data
816*4882a593Smuzhiyun 		 * left in our consume queue. If the local endpoint is a host,
817*4882a593Smuzhiyun 		 * we can't call vsock_stream_has_data, since that may block,
818*4882a593Smuzhiyun 		 * but a host endpoint can't read data once the VM has
819*4882a593Smuzhiyun 		 * detached, so there is no available data in that case.
820*4882a593Smuzhiyun 		 */
821*4882a593Smuzhiyun 		if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
822*4882a593Smuzhiyun 		    vsock_stream_has_data(vsk) <= 0) {
823*4882a593Smuzhiyun 			if (sk->sk_state == TCP_SYN_SENT) {
824*4882a593Smuzhiyun 				/* The peer may detach from a queue pair while
825*4882a593Smuzhiyun 				 * we are still in the connecting state, i.e.,
826*4882a593Smuzhiyun 				 * if the peer VM is killed after attaching to
827*4882a593Smuzhiyun 				 * a queue pair, but before we complete the
828*4882a593Smuzhiyun 				 * handshake. In that case, we treat the detach
829*4882a593Smuzhiyun 				 * event like a reset.
830*4882a593Smuzhiyun 				 */
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 				sk->sk_state = TCP_CLOSE;
833*4882a593Smuzhiyun 				sk->sk_err = ECONNRESET;
834*4882a593Smuzhiyun 				sk->sk_error_report(sk);
835*4882a593Smuzhiyun 				return;
836*4882a593Smuzhiyun 			}
837*4882a593Smuzhiyun 			sk->sk_state = TCP_CLOSE;
838*4882a593Smuzhiyun 		}
839*4882a593Smuzhiyun 		sk->sk_state_change(sk);
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun 
vmci_transport_peer_detach_cb(u32 sub_id,const struct vmci_event_data * e_data,void * client_data)843*4882a593Smuzhiyun static void vmci_transport_peer_detach_cb(u32 sub_id,
844*4882a593Smuzhiyun 					  const struct vmci_event_data *e_data,
845*4882a593Smuzhiyun 					  void *client_data)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun 	struct vmci_transport *trans = client_data;
848*4882a593Smuzhiyun 	const struct vmci_event_payload_qp *e_payload;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	e_payload = vmci_event_data_const_payload(e_data);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	/* XXX This is lame, we should provide a way to lookup sockets by
853*4882a593Smuzhiyun 	 * qp_handle.
854*4882a593Smuzhiyun 	 */
855*4882a593Smuzhiyun 	if (vmci_handle_is_invalid(e_payload->handle) ||
856*4882a593Smuzhiyun 	    !vmci_handle_is_equal(trans->qp_handle, e_payload->handle))
857*4882a593Smuzhiyun 		return;
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* We don't ask for delayed CBs when we subscribe to this event (we
860*4882a593Smuzhiyun 	 * pass 0 as flags to vmci_event_subscribe()).  VMCI makes no
861*4882a593Smuzhiyun 	 * guarantees in that case about what context we might be running in,
862*4882a593Smuzhiyun 	 * so it could be BH or process, blockable or non-blockable.  So we
863*4882a593Smuzhiyun 	 * need to account for all possible contexts here.
864*4882a593Smuzhiyun 	 */
865*4882a593Smuzhiyun 	spin_lock_bh(&trans->lock);
866*4882a593Smuzhiyun 	if (!trans->sk)
867*4882a593Smuzhiyun 		goto out;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	/* Apart from here, trans->lock is only grabbed as part of sk destruct,
870*4882a593Smuzhiyun 	 * where trans->sk isn't locked.
871*4882a593Smuzhiyun 	 */
872*4882a593Smuzhiyun 	bh_lock_sock(trans->sk);
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	vmci_transport_handle_detach(trans->sk);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	bh_unlock_sock(trans->sk);
877*4882a593Smuzhiyun  out:
878*4882a593Smuzhiyun 	spin_unlock_bh(&trans->lock);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
vmci_transport_qp_resumed_cb(u32 sub_id,const struct vmci_event_data * e_data,void * client_data)881*4882a593Smuzhiyun static void vmci_transport_qp_resumed_cb(u32 sub_id,
882*4882a593Smuzhiyun 					 const struct vmci_event_data *e_data,
883*4882a593Smuzhiyun 					 void *client_data)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun 	vsock_for_each_connected_socket(vmci_transport_handle_detach);
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun 
vmci_transport_recv_pkt_work(struct work_struct * work)888*4882a593Smuzhiyun static void vmci_transport_recv_pkt_work(struct work_struct *work)
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	struct vmci_transport_recv_pkt_info *recv_pkt_info;
891*4882a593Smuzhiyun 	struct vmci_transport_packet *pkt;
892*4882a593Smuzhiyun 	struct sock *sk;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	recv_pkt_info =
895*4882a593Smuzhiyun 		container_of(work, struct vmci_transport_recv_pkt_info, work);
896*4882a593Smuzhiyun 	sk = recv_pkt_info->sk;
897*4882a593Smuzhiyun 	pkt = &recv_pkt_info->pkt;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	lock_sock(sk);
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun 	/* The local context ID may be out of date. */
902*4882a593Smuzhiyun 	vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	switch (sk->sk_state) {
905*4882a593Smuzhiyun 	case TCP_LISTEN:
906*4882a593Smuzhiyun 		vmci_transport_recv_listen(sk, pkt);
907*4882a593Smuzhiyun 		break;
908*4882a593Smuzhiyun 	case TCP_SYN_SENT:
909*4882a593Smuzhiyun 		/* Processing of pending connections for servers goes through
910*4882a593Smuzhiyun 		 * the listening socket, so see vmci_transport_recv_listen()
911*4882a593Smuzhiyun 		 * for that path.
912*4882a593Smuzhiyun 		 */
913*4882a593Smuzhiyun 		vmci_transport_recv_connecting_client(sk, pkt);
914*4882a593Smuzhiyun 		break;
915*4882a593Smuzhiyun 	case TCP_ESTABLISHED:
916*4882a593Smuzhiyun 		vmci_transport_recv_connected(sk, pkt);
917*4882a593Smuzhiyun 		break;
918*4882a593Smuzhiyun 	default:
919*4882a593Smuzhiyun 		/* Because this function does not run in the same context as
920*4882a593Smuzhiyun 		 * vmci_transport_recv_stream_cb it is possible that the
921*4882a593Smuzhiyun 		 * socket has closed. We need to let the other side know or it
922*4882a593Smuzhiyun 		 * could be sitting in a connect and hang forever. Send a
923*4882a593Smuzhiyun 		 * reset to prevent that.
924*4882a593Smuzhiyun 		 */
925*4882a593Smuzhiyun 		vmci_transport_send_reset(sk, pkt);
926*4882a593Smuzhiyun 		break;
927*4882a593Smuzhiyun 	}
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	release_sock(sk);
930*4882a593Smuzhiyun 	kfree(recv_pkt_info);
931*4882a593Smuzhiyun 	/* Release reference obtained in the stream callback when we fetched
932*4882a593Smuzhiyun 	 * this socket out of the bound or connected list.
933*4882a593Smuzhiyun 	 */
934*4882a593Smuzhiyun 	sock_put(sk);
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
vmci_transport_recv_listen(struct sock * sk,struct vmci_transport_packet * pkt)937*4882a593Smuzhiyun static int vmci_transport_recv_listen(struct sock *sk,
938*4882a593Smuzhiyun 				      struct vmci_transport_packet *pkt)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	struct sock *pending;
941*4882a593Smuzhiyun 	struct vsock_sock *vpending;
942*4882a593Smuzhiyun 	int err;
943*4882a593Smuzhiyun 	u64 qp_size;
944*4882a593Smuzhiyun 	bool old_request = false;
945*4882a593Smuzhiyun 	bool old_pkt_proto = false;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	err = 0;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	/* Because we are in the listen state, we could be receiving a packet
950*4882a593Smuzhiyun 	 * for ourself or any previous connection requests that we received.
951*4882a593Smuzhiyun 	 * If it's the latter, we try to find a socket in our list of pending
952*4882a593Smuzhiyun 	 * connections and, if we do, call the appropriate handler for the
953*4882a593Smuzhiyun 	 * state that that socket is in.  Otherwise we try to service the
954*4882a593Smuzhiyun 	 * connection request.
955*4882a593Smuzhiyun 	 */
956*4882a593Smuzhiyun 	pending = vmci_transport_get_pending(sk, pkt);
957*4882a593Smuzhiyun 	if (pending) {
958*4882a593Smuzhiyun 		lock_sock(pending);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 		/* The local context ID may be out of date. */
961*4882a593Smuzhiyun 		vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context;
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun 		switch (pending->sk_state) {
964*4882a593Smuzhiyun 		case TCP_SYN_SENT:
965*4882a593Smuzhiyun 			err = vmci_transport_recv_connecting_server(sk,
966*4882a593Smuzhiyun 								    pending,
967*4882a593Smuzhiyun 								    pkt);
968*4882a593Smuzhiyun 			break;
969*4882a593Smuzhiyun 		default:
970*4882a593Smuzhiyun 			vmci_transport_send_reset(pending, pkt);
971*4882a593Smuzhiyun 			err = -EINVAL;
972*4882a593Smuzhiyun 		}
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 		if (err < 0)
975*4882a593Smuzhiyun 			vsock_remove_pending(sk, pending);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 		release_sock(pending);
978*4882a593Smuzhiyun 		vmci_transport_release_pending(pending);
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun 		return err;
981*4882a593Smuzhiyun 	}
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	/* The listen state only accepts connection requests.  Reply with a
984*4882a593Smuzhiyun 	 * reset unless we received a reset.
985*4882a593Smuzhiyun 	 */
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	if (!(pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST ||
988*4882a593Smuzhiyun 	      pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)) {
989*4882a593Smuzhiyun 		vmci_transport_reply_reset(pkt);
990*4882a593Smuzhiyun 		return -EINVAL;
991*4882a593Smuzhiyun 	}
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	if (pkt->u.size == 0) {
994*4882a593Smuzhiyun 		vmci_transport_reply_reset(pkt);
995*4882a593Smuzhiyun 		return -EINVAL;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	/* If this socket can't accommodate this connection request, we send a
999*4882a593Smuzhiyun 	 * reset.  Otherwise we create and initialize a child socket and reply
1000*4882a593Smuzhiyun 	 * with a connection negotiation.
1001*4882a593Smuzhiyun 	 */
1002*4882a593Smuzhiyun 	if (sk->sk_ack_backlog >= sk->sk_max_ack_backlog) {
1003*4882a593Smuzhiyun 		vmci_transport_reply_reset(pkt);
1004*4882a593Smuzhiyun 		return -ECONNREFUSED;
1005*4882a593Smuzhiyun 	}
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	pending = vsock_create_connected(sk);
1008*4882a593Smuzhiyun 	if (!pending) {
1009*4882a593Smuzhiyun 		vmci_transport_send_reset(sk, pkt);
1010*4882a593Smuzhiyun 		return -ENOMEM;
1011*4882a593Smuzhiyun 	}
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	vpending = vsock_sk(pending);
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	vsock_addr_init(&vpending->local_addr, pkt->dg.dst.context,
1016*4882a593Smuzhiyun 			pkt->dst_port);
1017*4882a593Smuzhiyun 	vsock_addr_init(&vpending->remote_addr, pkt->dg.src.context,
1018*4882a593Smuzhiyun 			pkt->src_port);
1019*4882a593Smuzhiyun 
1020*4882a593Smuzhiyun 	err = vsock_assign_transport(vpending, vsock_sk(sk));
1021*4882a593Smuzhiyun 	/* Transport assigned (looking at remote_addr) must be the same
1022*4882a593Smuzhiyun 	 * where we received the request.
1023*4882a593Smuzhiyun 	 */
1024*4882a593Smuzhiyun 	if (err || !vmci_check_transport(vpending)) {
1025*4882a593Smuzhiyun 		vmci_transport_send_reset(sk, pkt);
1026*4882a593Smuzhiyun 		sock_put(pending);
1027*4882a593Smuzhiyun 		return err;
1028*4882a593Smuzhiyun 	}
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	/* If the proposed size fits within our min/max, accept it. Otherwise
1031*4882a593Smuzhiyun 	 * propose our own size.
1032*4882a593Smuzhiyun 	 */
1033*4882a593Smuzhiyun 	if (pkt->u.size >= vpending->buffer_min_size &&
1034*4882a593Smuzhiyun 	    pkt->u.size <= vpending->buffer_max_size) {
1035*4882a593Smuzhiyun 		qp_size = pkt->u.size;
1036*4882a593Smuzhiyun 	} else {
1037*4882a593Smuzhiyun 		qp_size = vpending->buffer_size;
1038*4882a593Smuzhiyun 	}
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 	/* Figure out if we are using old or new requests based on the
1041*4882a593Smuzhiyun 	 * overrides pkt types sent by our peer.
1042*4882a593Smuzhiyun 	 */
1043*4882a593Smuzhiyun 	if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1044*4882a593Smuzhiyun 		old_request = old_pkt_proto;
1045*4882a593Smuzhiyun 	} else {
1046*4882a593Smuzhiyun 		if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST)
1047*4882a593Smuzhiyun 			old_request = true;
1048*4882a593Smuzhiyun 		else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_REQUEST2)
1049*4882a593Smuzhiyun 			old_request = false;
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun 	}
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	if (old_request) {
1054*4882a593Smuzhiyun 		/* Handle a REQUEST (or override) */
1055*4882a593Smuzhiyun 		u16 version = VSOCK_PROTO_INVALID;
1056*4882a593Smuzhiyun 		if (vmci_transport_proto_to_notify_struct(
1057*4882a593Smuzhiyun 			pending, &version, true))
1058*4882a593Smuzhiyun 			err = vmci_transport_send_negotiate(pending, qp_size);
1059*4882a593Smuzhiyun 		else
1060*4882a593Smuzhiyun 			err = -EINVAL;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	} else {
1063*4882a593Smuzhiyun 		/* Handle a REQUEST2 (or override) */
1064*4882a593Smuzhiyun 		int proto_int = pkt->proto;
1065*4882a593Smuzhiyun 		int pos;
1066*4882a593Smuzhiyun 		u16 active_proto_version = 0;
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 		/* The list of possible protocols is the intersection of all
1069*4882a593Smuzhiyun 		 * protocols the client supports ... plus all the protocols we
1070*4882a593Smuzhiyun 		 * support.
1071*4882a593Smuzhiyun 		 */
1072*4882a593Smuzhiyun 		proto_int &= vmci_transport_new_proto_supported_versions();
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 		/* We choose the highest possible protocol version and use that
1075*4882a593Smuzhiyun 		 * one.
1076*4882a593Smuzhiyun 		 */
1077*4882a593Smuzhiyun 		pos = fls(proto_int);
1078*4882a593Smuzhiyun 		if (pos) {
1079*4882a593Smuzhiyun 			active_proto_version = (1 << (pos - 1));
1080*4882a593Smuzhiyun 			if (vmci_transport_proto_to_notify_struct(
1081*4882a593Smuzhiyun 				pending, &active_proto_version, false))
1082*4882a593Smuzhiyun 				err = vmci_transport_send_negotiate2(pending,
1083*4882a593Smuzhiyun 							qp_size,
1084*4882a593Smuzhiyun 							active_proto_version);
1085*4882a593Smuzhiyun 			else
1086*4882a593Smuzhiyun 				err = -EINVAL;
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 		} else {
1089*4882a593Smuzhiyun 			err = -EINVAL;
1090*4882a593Smuzhiyun 		}
1091*4882a593Smuzhiyun 	}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 	if (err < 0) {
1094*4882a593Smuzhiyun 		vmci_transport_send_reset(sk, pkt);
1095*4882a593Smuzhiyun 		sock_put(pending);
1096*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
1097*4882a593Smuzhiyun 		goto out;
1098*4882a593Smuzhiyun 	}
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 	vsock_add_pending(sk, pending);
1101*4882a593Smuzhiyun 	sk_acceptq_added(sk);
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	pending->sk_state = TCP_SYN_SENT;
1104*4882a593Smuzhiyun 	vmci_trans(vpending)->produce_size =
1105*4882a593Smuzhiyun 		vmci_trans(vpending)->consume_size = qp_size;
1106*4882a593Smuzhiyun 	vpending->buffer_size = qp_size;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	vmci_trans(vpending)->notify_ops->process_request(pending);
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	/* We might never receive another message for this socket and it's not
1111*4882a593Smuzhiyun 	 * connected to any process, so we have to ensure it gets cleaned up
1112*4882a593Smuzhiyun 	 * ourself.  Our delayed work function will take care of that.  Note
1113*4882a593Smuzhiyun 	 * that we do not ever cancel this function since we have few
1114*4882a593Smuzhiyun 	 * guarantees about its state when calling cancel_delayed_work().
1115*4882a593Smuzhiyun 	 * Instead we hold a reference on the socket for that function and make
1116*4882a593Smuzhiyun 	 * it capable of handling cases where it needs to do nothing but
1117*4882a593Smuzhiyun 	 * release that reference.
1118*4882a593Smuzhiyun 	 */
1119*4882a593Smuzhiyun 	vpending->listener = sk;
1120*4882a593Smuzhiyun 	sock_hold(sk);
1121*4882a593Smuzhiyun 	sock_hold(pending);
1122*4882a593Smuzhiyun 	schedule_delayed_work(&vpending->pending_work, HZ);
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun out:
1125*4882a593Smuzhiyun 	return err;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun static int
vmci_transport_recv_connecting_server(struct sock * listener,struct sock * pending,struct vmci_transport_packet * pkt)1129*4882a593Smuzhiyun vmci_transport_recv_connecting_server(struct sock *listener,
1130*4882a593Smuzhiyun 				      struct sock *pending,
1131*4882a593Smuzhiyun 				      struct vmci_transport_packet *pkt)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	struct vsock_sock *vpending;
1134*4882a593Smuzhiyun 	struct vmci_handle handle;
1135*4882a593Smuzhiyun 	struct vmci_qp *qpair;
1136*4882a593Smuzhiyun 	bool is_local;
1137*4882a593Smuzhiyun 	u32 flags;
1138*4882a593Smuzhiyun 	u32 detach_sub_id;
1139*4882a593Smuzhiyun 	int err;
1140*4882a593Smuzhiyun 	int skerr;
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	vpending = vsock_sk(pending);
1143*4882a593Smuzhiyun 	detach_sub_id = VMCI_INVALID_ID;
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	switch (pkt->type) {
1146*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_OFFER:
1147*4882a593Smuzhiyun 		if (vmci_handle_is_invalid(pkt->u.handle)) {
1148*4882a593Smuzhiyun 			vmci_transport_send_reset(pending, pkt);
1149*4882a593Smuzhiyun 			skerr = EPROTO;
1150*4882a593Smuzhiyun 			err = -EINVAL;
1151*4882a593Smuzhiyun 			goto destroy;
1152*4882a593Smuzhiyun 		}
1153*4882a593Smuzhiyun 		break;
1154*4882a593Smuzhiyun 	default:
1155*4882a593Smuzhiyun 		/* Close and cleanup the connection. */
1156*4882a593Smuzhiyun 		vmci_transport_send_reset(pending, pkt);
1157*4882a593Smuzhiyun 		skerr = EPROTO;
1158*4882a593Smuzhiyun 		err = pkt->type == VMCI_TRANSPORT_PACKET_TYPE_RST ? 0 : -EINVAL;
1159*4882a593Smuzhiyun 		goto destroy;
1160*4882a593Smuzhiyun 	}
1161*4882a593Smuzhiyun 
1162*4882a593Smuzhiyun 	/* In order to complete the connection we need to attach to the offered
1163*4882a593Smuzhiyun 	 * queue pair and send an attach notification.  We also subscribe to the
1164*4882a593Smuzhiyun 	 * detach event so we know when our peer goes away, and we do that
1165*4882a593Smuzhiyun 	 * before attaching so we don't miss an event.  If all this succeeds,
1166*4882a593Smuzhiyun 	 * we update our state and wakeup anything waiting in accept() for a
1167*4882a593Smuzhiyun 	 * connection.
1168*4882a593Smuzhiyun 	 */
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun 	/* We don't care about attach since we ensure the other side has
1171*4882a593Smuzhiyun 	 * attached by specifying the ATTACH_ONLY flag below.
1172*4882a593Smuzhiyun 	 */
1173*4882a593Smuzhiyun 	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1174*4882a593Smuzhiyun 				   vmci_transport_peer_detach_cb,
1175*4882a593Smuzhiyun 				   vmci_trans(vpending), &detach_sub_id);
1176*4882a593Smuzhiyun 	if (err < VMCI_SUCCESS) {
1177*4882a593Smuzhiyun 		vmci_transport_send_reset(pending, pkt);
1178*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
1179*4882a593Smuzhiyun 		skerr = -err;
1180*4882a593Smuzhiyun 		goto destroy;
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	vmci_trans(vpending)->detach_sub_id = detach_sub_id;
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	/* Now attach to the queue pair the client created. */
1186*4882a593Smuzhiyun 	handle = pkt->u.handle;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	/* vpending->local_addr always has a context id so we do not need to
1189*4882a593Smuzhiyun 	 * worry about VMADDR_CID_ANY in this case.
1190*4882a593Smuzhiyun 	 */
1191*4882a593Smuzhiyun 	is_local =
1192*4882a593Smuzhiyun 	    vpending->remote_addr.svm_cid == vpending->local_addr.svm_cid;
1193*4882a593Smuzhiyun 	flags = VMCI_QPFLAG_ATTACH_ONLY;
1194*4882a593Smuzhiyun 	flags |= is_local ? VMCI_QPFLAG_LOCAL : 0;
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	err = vmci_transport_queue_pair_alloc(
1197*4882a593Smuzhiyun 					&qpair,
1198*4882a593Smuzhiyun 					&handle,
1199*4882a593Smuzhiyun 					vmci_trans(vpending)->produce_size,
1200*4882a593Smuzhiyun 					vmci_trans(vpending)->consume_size,
1201*4882a593Smuzhiyun 					pkt->dg.src.context,
1202*4882a593Smuzhiyun 					flags,
1203*4882a593Smuzhiyun 					vmci_transport_is_trusted(
1204*4882a593Smuzhiyun 						vpending,
1205*4882a593Smuzhiyun 						vpending->remote_addr.svm_cid));
1206*4882a593Smuzhiyun 	if (err < 0) {
1207*4882a593Smuzhiyun 		vmci_transport_send_reset(pending, pkt);
1208*4882a593Smuzhiyun 		skerr = -err;
1209*4882a593Smuzhiyun 		goto destroy;
1210*4882a593Smuzhiyun 	}
1211*4882a593Smuzhiyun 
1212*4882a593Smuzhiyun 	vmci_trans(vpending)->qp_handle = handle;
1213*4882a593Smuzhiyun 	vmci_trans(vpending)->qpair = qpair;
1214*4882a593Smuzhiyun 
1215*4882a593Smuzhiyun 	/* When we send the attach message, we must be ready to handle incoming
1216*4882a593Smuzhiyun 	 * control messages on the newly connected socket. So we move the
1217*4882a593Smuzhiyun 	 * pending socket to the connected state before sending the attach
1218*4882a593Smuzhiyun 	 * message. Otherwise, an incoming packet triggered by the attach being
1219*4882a593Smuzhiyun 	 * received by the peer may be processed concurrently with what happens
1220*4882a593Smuzhiyun 	 * below after sending the attach message, and that incoming packet
1221*4882a593Smuzhiyun 	 * will find the listening socket instead of the (currently) pending
1222*4882a593Smuzhiyun 	 * socket. Note that enqueueing the socket increments the reference
1223*4882a593Smuzhiyun 	 * count, so even if a reset comes before the connection is accepted,
1224*4882a593Smuzhiyun 	 * the socket will be valid until it is removed from the queue.
1225*4882a593Smuzhiyun 	 *
1226*4882a593Smuzhiyun 	 * If we fail sending the attach below, we remove the socket from the
1227*4882a593Smuzhiyun 	 * connected list and move the socket to TCP_CLOSE before
1228*4882a593Smuzhiyun 	 * releasing the lock, so a pending slow path processing of an incoming
1229*4882a593Smuzhiyun 	 * packet will not see the socket in the connected state in that case.
1230*4882a593Smuzhiyun 	 */
1231*4882a593Smuzhiyun 	pending->sk_state = TCP_ESTABLISHED;
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	vsock_insert_connected(vpending);
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	/* Notify our peer of our attach. */
1236*4882a593Smuzhiyun 	err = vmci_transport_send_attach(pending, handle);
1237*4882a593Smuzhiyun 	if (err < 0) {
1238*4882a593Smuzhiyun 		vsock_remove_connected(vpending);
1239*4882a593Smuzhiyun 		pr_err("Could not send attach\n");
1240*4882a593Smuzhiyun 		vmci_transport_send_reset(pending, pkt);
1241*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
1242*4882a593Smuzhiyun 		skerr = -err;
1243*4882a593Smuzhiyun 		goto destroy;
1244*4882a593Smuzhiyun 	}
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	/* We have a connection. Move the now connected socket from the
1247*4882a593Smuzhiyun 	 * listener's pending list to the accept queue so callers of accept()
1248*4882a593Smuzhiyun 	 * can find it.
1249*4882a593Smuzhiyun 	 */
1250*4882a593Smuzhiyun 	vsock_remove_pending(listener, pending);
1251*4882a593Smuzhiyun 	vsock_enqueue_accept(listener, pending);
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	/* Callers of accept() will be be waiting on the listening socket, not
1254*4882a593Smuzhiyun 	 * the pending socket.
1255*4882a593Smuzhiyun 	 */
1256*4882a593Smuzhiyun 	listener->sk_data_ready(listener);
1257*4882a593Smuzhiyun 
1258*4882a593Smuzhiyun 	return 0;
1259*4882a593Smuzhiyun 
1260*4882a593Smuzhiyun destroy:
1261*4882a593Smuzhiyun 	pending->sk_err = skerr;
1262*4882a593Smuzhiyun 	pending->sk_state = TCP_CLOSE;
1263*4882a593Smuzhiyun 	/* As long as we drop our reference, all necessary cleanup will handle
1264*4882a593Smuzhiyun 	 * when the cleanup function drops its reference and our destruct
1265*4882a593Smuzhiyun 	 * implementation is called.  Note that since the listen handler will
1266*4882a593Smuzhiyun 	 * remove pending from the pending list upon our failure, the cleanup
1267*4882a593Smuzhiyun 	 * function won't drop the additional reference, which is why we do it
1268*4882a593Smuzhiyun 	 * here.
1269*4882a593Smuzhiyun 	 */
1270*4882a593Smuzhiyun 	sock_put(pending);
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	return err;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun 
1275*4882a593Smuzhiyun static int
vmci_transport_recv_connecting_client(struct sock * sk,struct vmci_transport_packet * pkt)1276*4882a593Smuzhiyun vmci_transport_recv_connecting_client(struct sock *sk,
1277*4882a593Smuzhiyun 				      struct vmci_transport_packet *pkt)
1278*4882a593Smuzhiyun {
1279*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1280*4882a593Smuzhiyun 	int err;
1281*4882a593Smuzhiyun 	int skerr;
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	switch (pkt->type) {
1286*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_ATTACH:
1287*4882a593Smuzhiyun 		if (vmci_handle_is_invalid(pkt->u.handle) ||
1288*4882a593Smuzhiyun 		    !vmci_handle_is_equal(pkt->u.handle,
1289*4882a593Smuzhiyun 					  vmci_trans(vsk)->qp_handle)) {
1290*4882a593Smuzhiyun 			skerr = EPROTO;
1291*4882a593Smuzhiyun 			err = -EINVAL;
1292*4882a593Smuzhiyun 			goto destroy;
1293*4882a593Smuzhiyun 		}
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 		/* Signify the socket is connected and wakeup the waiter in
1296*4882a593Smuzhiyun 		 * connect(). Also place the socket in the connected table for
1297*4882a593Smuzhiyun 		 * accounting (it can already be found since it's in the bound
1298*4882a593Smuzhiyun 		 * table).
1299*4882a593Smuzhiyun 		 */
1300*4882a593Smuzhiyun 		sk->sk_state = TCP_ESTABLISHED;
1301*4882a593Smuzhiyun 		sk->sk_socket->state = SS_CONNECTED;
1302*4882a593Smuzhiyun 		vsock_insert_connected(vsk);
1303*4882a593Smuzhiyun 		sk->sk_state_change(sk);
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 		break;
1306*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE:
1307*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2:
1308*4882a593Smuzhiyun 		if (pkt->u.size == 0
1309*4882a593Smuzhiyun 		    || pkt->dg.src.context != vsk->remote_addr.svm_cid
1310*4882a593Smuzhiyun 		    || pkt->src_port != vsk->remote_addr.svm_port
1311*4882a593Smuzhiyun 		    || !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle)
1312*4882a593Smuzhiyun 		    || vmci_trans(vsk)->qpair
1313*4882a593Smuzhiyun 		    || vmci_trans(vsk)->produce_size != 0
1314*4882a593Smuzhiyun 		    || vmci_trans(vsk)->consume_size != 0
1315*4882a593Smuzhiyun 		    || vmci_trans(vsk)->detach_sub_id != VMCI_INVALID_ID) {
1316*4882a593Smuzhiyun 			skerr = EPROTO;
1317*4882a593Smuzhiyun 			err = -EINVAL;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 			goto destroy;
1320*4882a593Smuzhiyun 		}
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 		err = vmci_transport_recv_connecting_client_negotiate(sk, pkt);
1323*4882a593Smuzhiyun 		if (err) {
1324*4882a593Smuzhiyun 			skerr = -err;
1325*4882a593Smuzhiyun 			goto destroy;
1326*4882a593Smuzhiyun 		}
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 		break;
1329*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
1330*4882a593Smuzhiyun 		err = vmci_transport_recv_connecting_client_invalid(sk, pkt);
1331*4882a593Smuzhiyun 		if (err) {
1332*4882a593Smuzhiyun 			skerr = -err;
1333*4882a593Smuzhiyun 			goto destroy;
1334*4882a593Smuzhiyun 		}
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun 		break;
1337*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_RST:
1338*4882a593Smuzhiyun 		/* Older versions of the linux code (WS 6.5 / ESX 4.0) used to
1339*4882a593Smuzhiyun 		 * continue processing here after they sent an INVALID packet.
1340*4882a593Smuzhiyun 		 * This meant that we got a RST after the INVALID. We ignore a
1341*4882a593Smuzhiyun 		 * RST after an INVALID. The common code doesn't send the RST
1342*4882a593Smuzhiyun 		 * ... so we can hang if an old version of the common code
1343*4882a593Smuzhiyun 		 * fails between getting a REQUEST and sending an OFFER back.
1344*4882a593Smuzhiyun 		 * Not much we can do about it... except hope that it doesn't
1345*4882a593Smuzhiyun 		 * happen.
1346*4882a593Smuzhiyun 		 */
1347*4882a593Smuzhiyun 		if (vsk->ignore_connecting_rst) {
1348*4882a593Smuzhiyun 			vsk->ignore_connecting_rst = false;
1349*4882a593Smuzhiyun 		} else {
1350*4882a593Smuzhiyun 			skerr = ECONNRESET;
1351*4882a593Smuzhiyun 			err = 0;
1352*4882a593Smuzhiyun 			goto destroy;
1353*4882a593Smuzhiyun 		}
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 		break;
1356*4882a593Smuzhiyun 	default:
1357*4882a593Smuzhiyun 		/* Close and cleanup the connection. */
1358*4882a593Smuzhiyun 		skerr = EPROTO;
1359*4882a593Smuzhiyun 		err = -EINVAL;
1360*4882a593Smuzhiyun 		goto destroy;
1361*4882a593Smuzhiyun 	}
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 	return 0;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun destroy:
1366*4882a593Smuzhiyun 	vmci_transport_send_reset(sk, pkt);
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 	sk->sk_state = TCP_CLOSE;
1369*4882a593Smuzhiyun 	sk->sk_err = skerr;
1370*4882a593Smuzhiyun 	sk->sk_error_report(sk);
1371*4882a593Smuzhiyun 	return err;
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun 
vmci_transport_recv_connecting_client_negotiate(struct sock * sk,struct vmci_transport_packet * pkt)1374*4882a593Smuzhiyun static int vmci_transport_recv_connecting_client_negotiate(
1375*4882a593Smuzhiyun 					struct sock *sk,
1376*4882a593Smuzhiyun 					struct vmci_transport_packet *pkt)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun 	int err;
1379*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1380*4882a593Smuzhiyun 	struct vmci_handle handle;
1381*4882a593Smuzhiyun 	struct vmci_qp *qpair;
1382*4882a593Smuzhiyun 	u32 detach_sub_id;
1383*4882a593Smuzhiyun 	bool is_local;
1384*4882a593Smuzhiyun 	u32 flags;
1385*4882a593Smuzhiyun 	bool old_proto = true;
1386*4882a593Smuzhiyun 	bool old_pkt_proto;
1387*4882a593Smuzhiyun 	u16 version;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1390*4882a593Smuzhiyun 	handle = VMCI_INVALID_HANDLE;
1391*4882a593Smuzhiyun 	detach_sub_id = VMCI_INVALID_ID;
1392*4882a593Smuzhiyun 
1393*4882a593Smuzhiyun 	/* If we have gotten here then we should be past the point where old
1394*4882a593Smuzhiyun 	 * linux vsock could have sent the bogus rst.
1395*4882a593Smuzhiyun 	 */
1396*4882a593Smuzhiyun 	vsk->sent_request = false;
1397*4882a593Smuzhiyun 	vsk->ignore_connecting_rst = false;
1398*4882a593Smuzhiyun 
1399*4882a593Smuzhiyun 	/* Verify that we're OK with the proposed queue pair size */
1400*4882a593Smuzhiyun 	if (pkt->u.size < vsk->buffer_min_size ||
1401*4882a593Smuzhiyun 	    pkt->u.size > vsk->buffer_max_size) {
1402*4882a593Smuzhiyun 		err = -EINVAL;
1403*4882a593Smuzhiyun 		goto destroy;
1404*4882a593Smuzhiyun 	}
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	/* At this point we know the CID the peer is using to talk to us. */
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	if (vsk->local_addr.svm_cid == VMADDR_CID_ANY)
1409*4882a593Smuzhiyun 		vsk->local_addr.svm_cid = pkt->dg.dst.context;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	/* Setup the notify ops to be the highest supported version that both
1412*4882a593Smuzhiyun 	 * the server and the client support.
1413*4882a593Smuzhiyun 	 */
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	if (vmci_transport_old_proto_override(&old_pkt_proto)) {
1416*4882a593Smuzhiyun 		old_proto = old_pkt_proto;
1417*4882a593Smuzhiyun 	} else {
1418*4882a593Smuzhiyun 		if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE)
1419*4882a593Smuzhiyun 			old_proto = true;
1420*4882a593Smuzhiyun 		else if (pkt->type == VMCI_TRANSPORT_PACKET_TYPE_NEGOTIATE2)
1421*4882a593Smuzhiyun 			old_proto = false;
1422*4882a593Smuzhiyun 
1423*4882a593Smuzhiyun 	}
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	if (old_proto)
1426*4882a593Smuzhiyun 		version = VSOCK_PROTO_INVALID;
1427*4882a593Smuzhiyun 	else
1428*4882a593Smuzhiyun 		version = pkt->proto;
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	if (!vmci_transport_proto_to_notify_struct(sk, &version, old_proto)) {
1431*4882a593Smuzhiyun 		err = -EINVAL;
1432*4882a593Smuzhiyun 		goto destroy;
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	/* Subscribe to detach events first.
1436*4882a593Smuzhiyun 	 *
1437*4882a593Smuzhiyun 	 * XXX We attach once for each queue pair created for now so it is easy
1438*4882a593Smuzhiyun 	 * to find the socket (it's provided), but later we should only
1439*4882a593Smuzhiyun 	 * subscribe once and add a way to lookup sockets by queue pair handle.
1440*4882a593Smuzhiyun 	 */
1441*4882a593Smuzhiyun 	err = vmci_event_subscribe(VMCI_EVENT_QP_PEER_DETACH,
1442*4882a593Smuzhiyun 				   vmci_transport_peer_detach_cb,
1443*4882a593Smuzhiyun 				   vmci_trans(vsk), &detach_sub_id);
1444*4882a593Smuzhiyun 	if (err < VMCI_SUCCESS) {
1445*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
1446*4882a593Smuzhiyun 		goto destroy;
1447*4882a593Smuzhiyun 	}
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	/* Make VMCI select the handle for us. */
1450*4882a593Smuzhiyun 	handle = VMCI_INVALID_HANDLE;
1451*4882a593Smuzhiyun 	is_local = vsk->remote_addr.svm_cid == vsk->local_addr.svm_cid;
1452*4882a593Smuzhiyun 	flags = is_local ? VMCI_QPFLAG_LOCAL : 0;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	err = vmci_transport_queue_pair_alloc(&qpair,
1455*4882a593Smuzhiyun 					      &handle,
1456*4882a593Smuzhiyun 					      pkt->u.size,
1457*4882a593Smuzhiyun 					      pkt->u.size,
1458*4882a593Smuzhiyun 					      vsk->remote_addr.svm_cid,
1459*4882a593Smuzhiyun 					      flags,
1460*4882a593Smuzhiyun 					      vmci_transport_is_trusted(
1461*4882a593Smuzhiyun 						  vsk,
1462*4882a593Smuzhiyun 						  vsk->
1463*4882a593Smuzhiyun 						  remote_addr.svm_cid));
1464*4882a593Smuzhiyun 	if (err < 0)
1465*4882a593Smuzhiyun 		goto destroy;
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	err = vmci_transport_send_qp_offer(sk, handle);
1468*4882a593Smuzhiyun 	if (err < 0) {
1469*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
1470*4882a593Smuzhiyun 		goto destroy;
1471*4882a593Smuzhiyun 	}
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun 	vmci_trans(vsk)->qp_handle = handle;
1474*4882a593Smuzhiyun 	vmci_trans(vsk)->qpair = qpair;
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size =
1477*4882a593Smuzhiyun 		pkt->u.size;
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	vmci_trans(vsk)->detach_sub_id = detach_sub_id;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 	vmci_trans(vsk)->notify_ops->process_negotiate(sk);
1482*4882a593Smuzhiyun 
1483*4882a593Smuzhiyun 	return 0;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun destroy:
1486*4882a593Smuzhiyun 	if (detach_sub_id != VMCI_INVALID_ID)
1487*4882a593Smuzhiyun 		vmci_event_unsubscribe(detach_sub_id);
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	if (!vmci_handle_is_invalid(handle))
1490*4882a593Smuzhiyun 		vmci_qpair_detach(&qpair);
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	return err;
1493*4882a593Smuzhiyun }
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun static int
vmci_transport_recv_connecting_client_invalid(struct sock * sk,struct vmci_transport_packet * pkt)1496*4882a593Smuzhiyun vmci_transport_recv_connecting_client_invalid(struct sock *sk,
1497*4882a593Smuzhiyun 					      struct vmci_transport_packet *pkt)
1498*4882a593Smuzhiyun {
1499*4882a593Smuzhiyun 	int err = 0;
1500*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	if (vsk->sent_request) {
1503*4882a593Smuzhiyun 		vsk->sent_request = false;
1504*4882a593Smuzhiyun 		vsk->ignore_connecting_rst = true;
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun 		err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
1507*4882a593Smuzhiyun 		if (err < 0)
1508*4882a593Smuzhiyun 			err = vmci_transport_error_to_vsock_error(err);
1509*4882a593Smuzhiyun 		else
1510*4882a593Smuzhiyun 			err = 0;
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun 	}
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	return err;
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
vmci_transport_recv_connected(struct sock * sk,struct vmci_transport_packet * pkt)1517*4882a593Smuzhiyun static int vmci_transport_recv_connected(struct sock *sk,
1518*4882a593Smuzhiyun 					 struct vmci_transport_packet *pkt)
1519*4882a593Smuzhiyun {
1520*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1521*4882a593Smuzhiyun 	bool pkt_processed = false;
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	/* In cases where we are closing the connection, it's sufficient to
1524*4882a593Smuzhiyun 	 * mark the state change (and maybe error) and wake up any waiting
1525*4882a593Smuzhiyun 	 * threads. Since this is a connected socket, it's owned by a user
1526*4882a593Smuzhiyun 	 * process and will be cleaned up when the failure is passed back on
1527*4882a593Smuzhiyun 	 * the current or next system call.  Our system call implementations
1528*4882a593Smuzhiyun 	 * must therefore check for error and state changes on entry and when
1529*4882a593Smuzhiyun 	 * being awoken.
1530*4882a593Smuzhiyun 	 */
1531*4882a593Smuzhiyun 	switch (pkt->type) {
1532*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_SHUTDOWN:
1533*4882a593Smuzhiyun 		if (pkt->u.mode) {
1534*4882a593Smuzhiyun 			vsk = vsock_sk(sk);
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 			vsk->peer_shutdown |= pkt->u.mode;
1537*4882a593Smuzhiyun 			sk->sk_state_change(sk);
1538*4882a593Smuzhiyun 		}
1539*4882a593Smuzhiyun 		break;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	case VMCI_TRANSPORT_PACKET_TYPE_RST:
1542*4882a593Smuzhiyun 		vsk = vsock_sk(sk);
1543*4882a593Smuzhiyun 		/* It is possible that we sent our peer a message (e.g a
1544*4882a593Smuzhiyun 		 * WAITING_READ) right before we got notified that the peer had
1545*4882a593Smuzhiyun 		 * detached. If that happens then we can get a RST pkt back
1546*4882a593Smuzhiyun 		 * from our peer even though there is data available for us to
1547*4882a593Smuzhiyun 		 * read. In that case, don't shutdown the socket completely but
1548*4882a593Smuzhiyun 		 * instead allow the local client to finish reading data off
1549*4882a593Smuzhiyun 		 * the queuepair. Always treat a RST pkt in connected mode like
1550*4882a593Smuzhiyun 		 * a clean shutdown.
1551*4882a593Smuzhiyun 		 */
1552*4882a593Smuzhiyun 		sock_set_flag(sk, SOCK_DONE);
1553*4882a593Smuzhiyun 		vsk->peer_shutdown = SHUTDOWN_MASK;
1554*4882a593Smuzhiyun 		if (vsock_stream_has_data(vsk) <= 0)
1555*4882a593Smuzhiyun 			sk->sk_state = TCP_CLOSING;
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 		sk->sk_state_change(sk);
1558*4882a593Smuzhiyun 		break;
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	default:
1561*4882a593Smuzhiyun 		vsk = vsock_sk(sk);
1562*4882a593Smuzhiyun 		vmci_trans(vsk)->notify_ops->handle_notify_pkt(
1563*4882a593Smuzhiyun 				sk, pkt, false, NULL, NULL,
1564*4882a593Smuzhiyun 				&pkt_processed);
1565*4882a593Smuzhiyun 		if (!pkt_processed)
1566*4882a593Smuzhiyun 			return -EINVAL;
1567*4882a593Smuzhiyun 
1568*4882a593Smuzhiyun 		break;
1569*4882a593Smuzhiyun 	}
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	return 0;
1572*4882a593Smuzhiyun }
1573*4882a593Smuzhiyun 
vmci_transport_socket_init(struct vsock_sock * vsk,struct vsock_sock * psk)1574*4882a593Smuzhiyun static int vmci_transport_socket_init(struct vsock_sock *vsk,
1575*4882a593Smuzhiyun 				      struct vsock_sock *psk)
1576*4882a593Smuzhiyun {
1577*4882a593Smuzhiyun 	vsk->trans = kmalloc(sizeof(struct vmci_transport), GFP_KERNEL);
1578*4882a593Smuzhiyun 	if (!vsk->trans)
1579*4882a593Smuzhiyun 		return -ENOMEM;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 	vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1582*4882a593Smuzhiyun 	vmci_trans(vsk)->qp_handle = VMCI_INVALID_HANDLE;
1583*4882a593Smuzhiyun 	vmci_trans(vsk)->qpair = NULL;
1584*4882a593Smuzhiyun 	vmci_trans(vsk)->produce_size = vmci_trans(vsk)->consume_size = 0;
1585*4882a593Smuzhiyun 	vmci_trans(vsk)->detach_sub_id = VMCI_INVALID_ID;
1586*4882a593Smuzhiyun 	vmci_trans(vsk)->notify_ops = NULL;
1587*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vmci_trans(vsk)->elem);
1588*4882a593Smuzhiyun 	vmci_trans(vsk)->sk = &vsk->sk;
1589*4882a593Smuzhiyun 	spin_lock_init(&vmci_trans(vsk)->lock);
1590*4882a593Smuzhiyun 
1591*4882a593Smuzhiyun 	return 0;
1592*4882a593Smuzhiyun }
1593*4882a593Smuzhiyun 
vmci_transport_free_resources(struct list_head * transport_list)1594*4882a593Smuzhiyun static void vmci_transport_free_resources(struct list_head *transport_list)
1595*4882a593Smuzhiyun {
1596*4882a593Smuzhiyun 	while (!list_empty(transport_list)) {
1597*4882a593Smuzhiyun 		struct vmci_transport *transport =
1598*4882a593Smuzhiyun 		    list_first_entry(transport_list, struct vmci_transport,
1599*4882a593Smuzhiyun 				     elem);
1600*4882a593Smuzhiyun 		list_del(&transport->elem);
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 		if (transport->detach_sub_id != VMCI_INVALID_ID) {
1603*4882a593Smuzhiyun 			vmci_event_unsubscribe(transport->detach_sub_id);
1604*4882a593Smuzhiyun 			transport->detach_sub_id = VMCI_INVALID_ID;
1605*4882a593Smuzhiyun 		}
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 		if (!vmci_handle_is_invalid(transport->qp_handle)) {
1608*4882a593Smuzhiyun 			vmci_qpair_detach(&transport->qpair);
1609*4882a593Smuzhiyun 			transport->qp_handle = VMCI_INVALID_HANDLE;
1610*4882a593Smuzhiyun 			transport->produce_size = 0;
1611*4882a593Smuzhiyun 			transport->consume_size = 0;
1612*4882a593Smuzhiyun 		}
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 		kfree(transport);
1615*4882a593Smuzhiyun 	}
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun 
vmci_transport_cleanup(struct work_struct * work)1618*4882a593Smuzhiyun static void vmci_transport_cleanup(struct work_struct *work)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun 	LIST_HEAD(pending);
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 	spin_lock_bh(&vmci_transport_cleanup_lock);
1623*4882a593Smuzhiyun 	list_replace_init(&vmci_transport_cleanup_list, &pending);
1624*4882a593Smuzhiyun 	spin_unlock_bh(&vmci_transport_cleanup_lock);
1625*4882a593Smuzhiyun 	vmci_transport_free_resources(&pending);
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun 
vmci_transport_destruct(struct vsock_sock * vsk)1628*4882a593Smuzhiyun static void vmci_transport_destruct(struct vsock_sock *vsk)
1629*4882a593Smuzhiyun {
1630*4882a593Smuzhiyun 	/* transport can be NULL if we hit a failure at init() time */
1631*4882a593Smuzhiyun 	if (!vmci_trans(vsk))
1632*4882a593Smuzhiyun 		return;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	/* Ensure that the detach callback doesn't use the sk/vsk
1635*4882a593Smuzhiyun 	 * we are about to destruct.
1636*4882a593Smuzhiyun 	 */
1637*4882a593Smuzhiyun 	spin_lock_bh(&vmci_trans(vsk)->lock);
1638*4882a593Smuzhiyun 	vmci_trans(vsk)->sk = NULL;
1639*4882a593Smuzhiyun 	spin_unlock_bh(&vmci_trans(vsk)->lock);
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun 	if (vmci_trans(vsk)->notify_ops)
1642*4882a593Smuzhiyun 		vmci_trans(vsk)->notify_ops->socket_destruct(vsk);
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun 	spin_lock_bh(&vmci_transport_cleanup_lock);
1645*4882a593Smuzhiyun 	list_add(&vmci_trans(vsk)->elem, &vmci_transport_cleanup_list);
1646*4882a593Smuzhiyun 	spin_unlock_bh(&vmci_transport_cleanup_lock);
1647*4882a593Smuzhiyun 	schedule_work(&vmci_transport_cleanup_work);
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	vsk->trans = NULL;
1650*4882a593Smuzhiyun }
1651*4882a593Smuzhiyun 
vmci_transport_release(struct vsock_sock * vsk)1652*4882a593Smuzhiyun static void vmci_transport_release(struct vsock_sock *vsk)
1653*4882a593Smuzhiyun {
1654*4882a593Smuzhiyun 	vsock_remove_sock(vsk);
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	if (!vmci_handle_is_invalid(vmci_trans(vsk)->dg_handle)) {
1657*4882a593Smuzhiyun 		vmci_datagram_destroy_handle(vmci_trans(vsk)->dg_handle);
1658*4882a593Smuzhiyun 		vmci_trans(vsk)->dg_handle = VMCI_INVALID_HANDLE;
1659*4882a593Smuzhiyun 	}
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun 
vmci_transport_dgram_bind(struct vsock_sock * vsk,struct sockaddr_vm * addr)1662*4882a593Smuzhiyun static int vmci_transport_dgram_bind(struct vsock_sock *vsk,
1663*4882a593Smuzhiyun 				     struct sockaddr_vm *addr)
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun 	u32 port;
1666*4882a593Smuzhiyun 	u32 flags;
1667*4882a593Smuzhiyun 	int err;
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	/* VMCI will select a resource ID for us if we provide
1670*4882a593Smuzhiyun 	 * VMCI_INVALID_ID.
1671*4882a593Smuzhiyun 	 */
1672*4882a593Smuzhiyun 	port = addr->svm_port == VMADDR_PORT_ANY ?
1673*4882a593Smuzhiyun 			VMCI_INVALID_ID : addr->svm_port;
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	if (port <= LAST_RESERVED_PORT && !capable(CAP_NET_BIND_SERVICE))
1676*4882a593Smuzhiyun 		return -EACCES;
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	flags = addr->svm_cid == VMADDR_CID_ANY ?
1679*4882a593Smuzhiyun 				VMCI_FLAG_ANYCID_DG_HND : 0;
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun 	err = vmci_transport_datagram_create_hnd(port, flags,
1682*4882a593Smuzhiyun 						 vmci_transport_recv_dgram_cb,
1683*4882a593Smuzhiyun 						 &vsk->sk,
1684*4882a593Smuzhiyun 						 &vmci_trans(vsk)->dg_handle);
1685*4882a593Smuzhiyun 	if (err < VMCI_SUCCESS)
1686*4882a593Smuzhiyun 		return vmci_transport_error_to_vsock_error(err);
1687*4882a593Smuzhiyun 	vsock_addr_init(&vsk->local_addr, addr->svm_cid,
1688*4882a593Smuzhiyun 			vmci_trans(vsk)->dg_handle.resource);
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	return 0;
1691*4882a593Smuzhiyun }
1692*4882a593Smuzhiyun 
vmci_transport_dgram_enqueue(struct vsock_sock * vsk,struct sockaddr_vm * remote_addr,struct msghdr * msg,size_t len)1693*4882a593Smuzhiyun static int vmci_transport_dgram_enqueue(
1694*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1695*4882a593Smuzhiyun 	struct sockaddr_vm *remote_addr,
1696*4882a593Smuzhiyun 	struct msghdr *msg,
1697*4882a593Smuzhiyun 	size_t len)
1698*4882a593Smuzhiyun {
1699*4882a593Smuzhiyun 	int err;
1700*4882a593Smuzhiyun 	struct vmci_datagram *dg;
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	if (len > VMCI_MAX_DG_PAYLOAD_SIZE)
1703*4882a593Smuzhiyun 		return -EMSGSIZE;
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	if (!vmci_transport_allow_dgram(vsk, remote_addr->svm_cid))
1706*4882a593Smuzhiyun 		return -EPERM;
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	/* Allocate a buffer for the user's message and our packet header. */
1709*4882a593Smuzhiyun 	dg = kmalloc(len + sizeof(*dg), GFP_KERNEL);
1710*4882a593Smuzhiyun 	if (!dg)
1711*4882a593Smuzhiyun 		return -ENOMEM;
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	memcpy_from_msg(VMCI_DG_PAYLOAD(dg), msg, len);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	dg->dst = vmci_make_handle(remote_addr->svm_cid,
1716*4882a593Smuzhiyun 				   remote_addr->svm_port);
1717*4882a593Smuzhiyun 	dg->src = vmci_make_handle(vsk->local_addr.svm_cid,
1718*4882a593Smuzhiyun 				   vsk->local_addr.svm_port);
1719*4882a593Smuzhiyun 	dg->payload_size = len;
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	err = vmci_datagram_send(dg);
1722*4882a593Smuzhiyun 	kfree(dg);
1723*4882a593Smuzhiyun 	if (err < 0)
1724*4882a593Smuzhiyun 		return vmci_transport_error_to_vsock_error(err);
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	return err - sizeof(*dg);
1727*4882a593Smuzhiyun }
1728*4882a593Smuzhiyun 
vmci_transport_dgram_dequeue(struct vsock_sock * vsk,struct msghdr * msg,size_t len,int flags)1729*4882a593Smuzhiyun static int vmci_transport_dgram_dequeue(struct vsock_sock *vsk,
1730*4882a593Smuzhiyun 					struct msghdr *msg, size_t len,
1731*4882a593Smuzhiyun 					int flags)
1732*4882a593Smuzhiyun {
1733*4882a593Smuzhiyun 	int err;
1734*4882a593Smuzhiyun 	int noblock;
1735*4882a593Smuzhiyun 	struct vmci_datagram *dg;
1736*4882a593Smuzhiyun 	size_t payload_len;
1737*4882a593Smuzhiyun 	struct sk_buff *skb;
1738*4882a593Smuzhiyun 
1739*4882a593Smuzhiyun 	noblock = flags & MSG_DONTWAIT;
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun 	if (flags & MSG_OOB || flags & MSG_ERRQUEUE)
1742*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1743*4882a593Smuzhiyun 
1744*4882a593Smuzhiyun 	/* Retrieve the head sk_buff from the socket's receive queue. */
1745*4882a593Smuzhiyun 	err = 0;
1746*4882a593Smuzhiyun 	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err);
1747*4882a593Smuzhiyun 	if (!skb)
1748*4882a593Smuzhiyun 		return err;
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	dg = (struct vmci_datagram *)skb->data;
1751*4882a593Smuzhiyun 	if (!dg)
1752*4882a593Smuzhiyun 		/* err is 0, meaning we read zero bytes. */
1753*4882a593Smuzhiyun 		goto out;
1754*4882a593Smuzhiyun 
1755*4882a593Smuzhiyun 	payload_len = dg->payload_size;
1756*4882a593Smuzhiyun 	/* Ensure the sk_buff matches the payload size claimed in the packet. */
1757*4882a593Smuzhiyun 	if (payload_len != skb->len - sizeof(*dg)) {
1758*4882a593Smuzhiyun 		err = -EINVAL;
1759*4882a593Smuzhiyun 		goto out;
1760*4882a593Smuzhiyun 	}
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	if (payload_len > len) {
1763*4882a593Smuzhiyun 		payload_len = len;
1764*4882a593Smuzhiyun 		msg->msg_flags |= MSG_TRUNC;
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	/* Place the datagram payload in the user's iovec. */
1768*4882a593Smuzhiyun 	err = skb_copy_datagram_msg(skb, sizeof(*dg), msg, payload_len);
1769*4882a593Smuzhiyun 	if (err)
1770*4882a593Smuzhiyun 		goto out;
1771*4882a593Smuzhiyun 
1772*4882a593Smuzhiyun 	if (msg->msg_name) {
1773*4882a593Smuzhiyun 		/* Provide the address of the sender. */
1774*4882a593Smuzhiyun 		DECLARE_SOCKADDR(struct sockaddr_vm *, vm_addr, msg->msg_name);
1775*4882a593Smuzhiyun 		vsock_addr_init(vm_addr, dg->src.context, dg->src.resource);
1776*4882a593Smuzhiyun 		msg->msg_namelen = sizeof(*vm_addr);
1777*4882a593Smuzhiyun 	}
1778*4882a593Smuzhiyun 	err = payload_len;
1779*4882a593Smuzhiyun 
1780*4882a593Smuzhiyun out:
1781*4882a593Smuzhiyun 	skb_free_datagram(&vsk->sk, skb);
1782*4882a593Smuzhiyun 	return err;
1783*4882a593Smuzhiyun }
1784*4882a593Smuzhiyun 
vmci_transport_dgram_allow(u32 cid,u32 port)1785*4882a593Smuzhiyun static bool vmci_transport_dgram_allow(u32 cid, u32 port)
1786*4882a593Smuzhiyun {
1787*4882a593Smuzhiyun 	if (cid == VMADDR_CID_HYPERVISOR) {
1788*4882a593Smuzhiyun 		/* Registrations of PBRPC Servers do not modify VMX/Hypervisor
1789*4882a593Smuzhiyun 		 * state and are allowed.
1790*4882a593Smuzhiyun 		 */
1791*4882a593Smuzhiyun 		return port == VMCI_UNITY_PBRPC_REGISTER;
1792*4882a593Smuzhiyun 	}
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 	return true;
1795*4882a593Smuzhiyun }
1796*4882a593Smuzhiyun 
vmci_transport_connect(struct vsock_sock * vsk)1797*4882a593Smuzhiyun static int vmci_transport_connect(struct vsock_sock *vsk)
1798*4882a593Smuzhiyun {
1799*4882a593Smuzhiyun 	int err;
1800*4882a593Smuzhiyun 	bool old_pkt_proto = false;
1801*4882a593Smuzhiyun 	struct sock *sk = &vsk->sk;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	if (vmci_transport_old_proto_override(&old_pkt_proto) &&
1804*4882a593Smuzhiyun 		old_pkt_proto) {
1805*4882a593Smuzhiyun 		err = vmci_transport_send_conn_request(sk, vsk->buffer_size);
1806*4882a593Smuzhiyun 		if (err < 0) {
1807*4882a593Smuzhiyun 			sk->sk_state = TCP_CLOSE;
1808*4882a593Smuzhiyun 			return err;
1809*4882a593Smuzhiyun 		}
1810*4882a593Smuzhiyun 	} else {
1811*4882a593Smuzhiyun 		int supported_proto_versions =
1812*4882a593Smuzhiyun 			vmci_transport_new_proto_supported_versions();
1813*4882a593Smuzhiyun 		err = vmci_transport_send_conn_request2(sk, vsk->buffer_size,
1814*4882a593Smuzhiyun 				supported_proto_versions);
1815*4882a593Smuzhiyun 		if (err < 0) {
1816*4882a593Smuzhiyun 			sk->sk_state = TCP_CLOSE;
1817*4882a593Smuzhiyun 			return err;
1818*4882a593Smuzhiyun 		}
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 		vsk->sent_request = true;
1821*4882a593Smuzhiyun 	}
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	return err;
1824*4882a593Smuzhiyun }
1825*4882a593Smuzhiyun 
vmci_transport_stream_dequeue(struct vsock_sock * vsk,struct msghdr * msg,size_t len,int flags)1826*4882a593Smuzhiyun static ssize_t vmci_transport_stream_dequeue(
1827*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1828*4882a593Smuzhiyun 	struct msghdr *msg,
1829*4882a593Smuzhiyun 	size_t len,
1830*4882a593Smuzhiyun 	int flags)
1831*4882a593Smuzhiyun {
1832*4882a593Smuzhiyun 	if (flags & MSG_PEEK)
1833*4882a593Smuzhiyun 		return vmci_qpair_peekv(vmci_trans(vsk)->qpair, msg, len, 0);
1834*4882a593Smuzhiyun 	else
1835*4882a593Smuzhiyun 		return vmci_qpair_dequev(vmci_trans(vsk)->qpair, msg, len, 0);
1836*4882a593Smuzhiyun }
1837*4882a593Smuzhiyun 
vmci_transport_stream_enqueue(struct vsock_sock * vsk,struct msghdr * msg,size_t len)1838*4882a593Smuzhiyun static ssize_t vmci_transport_stream_enqueue(
1839*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1840*4882a593Smuzhiyun 	struct msghdr *msg,
1841*4882a593Smuzhiyun 	size_t len)
1842*4882a593Smuzhiyun {
1843*4882a593Smuzhiyun 	return vmci_qpair_enquev(vmci_trans(vsk)->qpair, msg, len, 0);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun 
vmci_transport_stream_has_data(struct vsock_sock * vsk)1846*4882a593Smuzhiyun static s64 vmci_transport_stream_has_data(struct vsock_sock *vsk)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun 	return vmci_qpair_consume_buf_ready(vmci_trans(vsk)->qpair);
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun 
vmci_transport_stream_has_space(struct vsock_sock * vsk)1851*4882a593Smuzhiyun static s64 vmci_transport_stream_has_space(struct vsock_sock *vsk)
1852*4882a593Smuzhiyun {
1853*4882a593Smuzhiyun 	return vmci_qpair_produce_free_space(vmci_trans(vsk)->qpair);
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun 
vmci_transport_stream_rcvhiwat(struct vsock_sock * vsk)1856*4882a593Smuzhiyun static u64 vmci_transport_stream_rcvhiwat(struct vsock_sock *vsk)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun 	return vmci_trans(vsk)->consume_size;
1859*4882a593Smuzhiyun }
1860*4882a593Smuzhiyun 
vmci_transport_stream_is_active(struct vsock_sock * vsk)1861*4882a593Smuzhiyun static bool vmci_transport_stream_is_active(struct vsock_sock *vsk)
1862*4882a593Smuzhiyun {
1863*4882a593Smuzhiyun 	return !vmci_handle_is_invalid(vmci_trans(vsk)->qp_handle);
1864*4882a593Smuzhiyun }
1865*4882a593Smuzhiyun 
vmci_transport_notify_poll_in(struct vsock_sock * vsk,size_t target,bool * data_ready_now)1866*4882a593Smuzhiyun static int vmci_transport_notify_poll_in(
1867*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1868*4882a593Smuzhiyun 	size_t target,
1869*4882a593Smuzhiyun 	bool *data_ready_now)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->poll_in(
1872*4882a593Smuzhiyun 			&vsk->sk, target, data_ready_now);
1873*4882a593Smuzhiyun }
1874*4882a593Smuzhiyun 
vmci_transport_notify_poll_out(struct vsock_sock * vsk,size_t target,bool * space_available_now)1875*4882a593Smuzhiyun static int vmci_transport_notify_poll_out(
1876*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1877*4882a593Smuzhiyun 	size_t target,
1878*4882a593Smuzhiyun 	bool *space_available_now)
1879*4882a593Smuzhiyun {
1880*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->poll_out(
1881*4882a593Smuzhiyun 			&vsk->sk, target, space_available_now);
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun 
vmci_transport_notify_recv_init(struct vsock_sock * vsk,size_t target,struct vsock_transport_recv_notify_data * data)1884*4882a593Smuzhiyun static int vmci_transport_notify_recv_init(
1885*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1886*4882a593Smuzhiyun 	size_t target,
1887*4882a593Smuzhiyun 	struct vsock_transport_recv_notify_data *data)
1888*4882a593Smuzhiyun {
1889*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->recv_init(
1890*4882a593Smuzhiyun 			&vsk->sk, target,
1891*4882a593Smuzhiyun 			(struct vmci_transport_recv_notify_data *)data);
1892*4882a593Smuzhiyun }
1893*4882a593Smuzhiyun 
vmci_transport_notify_recv_pre_block(struct vsock_sock * vsk,size_t target,struct vsock_transport_recv_notify_data * data)1894*4882a593Smuzhiyun static int vmci_transport_notify_recv_pre_block(
1895*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1896*4882a593Smuzhiyun 	size_t target,
1897*4882a593Smuzhiyun 	struct vsock_transport_recv_notify_data *data)
1898*4882a593Smuzhiyun {
1899*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->recv_pre_block(
1900*4882a593Smuzhiyun 			&vsk->sk, target,
1901*4882a593Smuzhiyun 			(struct vmci_transport_recv_notify_data *)data);
1902*4882a593Smuzhiyun }
1903*4882a593Smuzhiyun 
vmci_transport_notify_recv_pre_dequeue(struct vsock_sock * vsk,size_t target,struct vsock_transport_recv_notify_data * data)1904*4882a593Smuzhiyun static int vmci_transport_notify_recv_pre_dequeue(
1905*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1906*4882a593Smuzhiyun 	size_t target,
1907*4882a593Smuzhiyun 	struct vsock_transport_recv_notify_data *data)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->recv_pre_dequeue(
1910*4882a593Smuzhiyun 			&vsk->sk, target,
1911*4882a593Smuzhiyun 			(struct vmci_transport_recv_notify_data *)data);
1912*4882a593Smuzhiyun }
1913*4882a593Smuzhiyun 
vmci_transport_notify_recv_post_dequeue(struct vsock_sock * vsk,size_t target,ssize_t copied,bool data_read,struct vsock_transport_recv_notify_data * data)1914*4882a593Smuzhiyun static int vmci_transport_notify_recv_post_dequeue(
1915*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1916*4882a593Smuzhiyun 	size_t target,
1917*4882a593Smuzhiyun 	ssize_t copied,
1918*4882a593Smuzhiyun 	bool data_read,
1919*4882a593Smuzhiyun 	struct vsock_transport_recv_notify_data *data)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->recv_post_dequeue(
1922*4882a593Smuzhiyun 			&vsk->sk, target, copied, data_read,
1923*4882a593Smuzhiyun 			(struct vmci_transport_recv_notify_data *)data);
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun 
vmci_transport_notify_send_init(struct vsock_sock * vsk,struct vsock_transport_send_notify_data * data)1926*4882a593Smuzhiyun static int vmci_transport_notify_send_init(
1927*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1928*4882a593Smuzhiyun 	struct vsock_transport_send_notify_data *data)
1929*4882a593Smuzhiyun {
1930*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->send_init(
1931*4882a593Smuzhiyun 			&vsk->sk,
1932*4882a593Smuzhiyun 			(struct vmci_transport_send_notify_data *)data);
1933*4882a593Smuzhiyun }
1934*4882a593Smuzhiyun 
vmci_transport_notify_send_pre_block(struct vsock_sock * vsk,struct vsock_transport_send_notify_data * data)1935*4882a593Smuzhiyun static int vmci_transport_notify_send_pre_block(
1936*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1937*4882a593Smuzhiyun 	struct vsock_transport_send_notify_data *data)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->send_pre_block(
1940*4882a593Smuzhiyun 			&vsk->sk,
1941*4882a593Smuzhiyun 			(struct vmci_transport_send_notify_data *)data);
1942*4882a593Smuzhiyun }
1943*4882a593Smuzhiyun 
vmci_transport_notify_send_pre_enqueue(struct vsock_sock * vsk,struct vsock_transport_send_notify_data * data)1944*4882a593Smuzhiyun static int vmci_transport_notify_send_pre_enqueue(
1945*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1946*4882a593Smuzhiyun 	struct vsock_transport_send_notify_data *data)
1947*4882a593Smuzhiyun {
1948*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->send_pre_enqueue(
1949*4882a593Smuzhiyun 			&vsk->sk,
1950*4882a593Smuzhiyun 			(struct vmci_transport_send_notify_data *)data);
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun 
vmci_transport_notify_send_post_enqueue(struct vsock_sock * vsk,ssize_t written,struct vsock_transport_send_notify_data * data)1953*4882a593Smuzhiyun static int vmci_transport_notify_send_post_enqueue(
1954*4882a593Smuzhiyun 	struct vsock_sock *vsk,
1955*4882a593Smuzhiyun 	ssize_t written,
1956*4882a593Smuzhiyun 	struct vsock_transport_send_notify_data *data)
1957*4882a593Smuzhiyun {
1958*4882a593Smuzhiyun 	return vmci_trans(vsk)->notify_ops->send_post_enqueue(
1959*4882a593Smuzhiyun 			&vsk->sk, written,
1960*4882a593Smuzhiyun 			(struct vmci_transport_send_notify_data *)data);
1961*4882a593Smuzhiyun }
1962*4882a593Smuzhiyun 
vmci_transport_old_proto_override(bool * old_pkt_proto)1963*4882a593Smuzhiyun static bool vmci_transport_old_proto_override(bool *old_pkt_proto)
1964*4882a593Smuzhiyun {
1965*4882a593Smuzhiyun 	if (PROTOCOL_OVERRIDE != -1) {
1966*4882a593Smuzhiyun 		if (PROTOCOL_OVERRIDE == 0)
1967*4882a593Smuzhiyun 			*old_pkt_proto = true;
1968*4882a593Smuzhiyun 		else
1969*4882a593Smuzhiyun 			*old_pkt_proto = false;
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 		pr_info("Proto override in use\n");
1972*4882a593Smuzhiyun 		return true;
1973*4882a593Smuzhiyun 	}
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	return false;
1976*4882a593Smuzhiyun }
1977*4882a593Smuzhiyun 
vmci_transport_proto_to_notify_struct(struct sock * sk,u16 * proto,bool old_pkt_proto)1978*4882a593Smuzhiyun static bool vmci_transport_proto_to_notify_struct(struct sock *sk,
1979*4882a593Smuzhiyun 						  u16 *proto,
1980*4882a593Smuzhiyun 						  bool old_pkt_proto)
1981*4882a593Smuzhiyun {
1982*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
1983*4882a593Smuzhiyun 
1984*4882a593Smuzhiyun 	if (old_pkt_proto) {
1985*4882a593Smuzhiyun 		if (*proto != VSOCK_PROTO_INVALID) {
1986*4882a593Smuzhiyun 			pr_err("Can't set both an old and new protocol\n");
1987*4882a593Smuzhiyun 			return false;
1988*4882a593Smuzhiyun 		}
1989*4882a593Smuzhiyun 		vmci_trans(vsk)->notify_ops = &vmci_transport_notify_pkt_ops;
1990*4882a593Smuzhiyun 		goto exit;
1991*4882a593Smuzhiyun 	}
1992*4882a593Smuzhiyun 
1993*4882a593Smuzhiyun 	switch (*proto) {
1994*4882a593Smuzhiyun 	case VSOCK_PROTO_PKT_ON_NOTIFY:
1995*4882a593Smuzhiyun 		vmci_trans(vsk)->notify_ops =
1996*4882a593Smuzhiyun 			&vmci_transport_notify_pkt_q_state_ops;
1997*4882a593Smuzhiyun 		break;
1998*4882a593Smuzhiyun 	default:
1999*4882a593Smuzhiyun 		pr_err("Unknown notify protocol version\n");
2000*4882a593Smuzhiyun 		return false;
2001*4882a593Smuzhiyun 	}
2002*4882a593Smuzhiyun 
2003*4882a593Smuzhiyun exit:
2004*4882a593Smuzhiyun 	vmci_trans(vsk)->notify_ops->socket_init(sk);
2005*4882a593Smuzhiyun 	return true;
2006*4882a593Smuzhiyun }
2007*4882a593Smuzhiyun 
vmci_transport_new_proto_supported_versions(void)2008*4882a593Smuzhiyun static u16 vmci_transport_new_proto_supported_versions(void)
2009*4882a593Smuzhiyun {
2010*4882a593Smuzhiyun 	if (PROTOCOL_OVERRIDE != -1)
2011*4882a593Smuzhiyun 		return PROTOCOL_OVERRIDE;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 	return VSOCK_PROTO_ALL_SUPPORTED;
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun 
vmci_transport_get_local_cid(void)2016*4882a593Smuzhiyun static u32 vmci_transport_get_local_cid(void)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun 	return vmci_get_context_id();
2019*4882a593Smuzhiyun }
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun static struct vsock_transport vmci_transport = {
2022*4882a593Smuzhiyun 	.module = THIS_MODULE,
2023*4882a593Smuzhiyun 	.init = vmci_transport_socket_init,
2024*4882a593Smuzhiyun 	.destruct = vmci_transport_destruct,
2025*4882a593Smuzhiyun 	.release = vmci_transport_release,
2026*4882a593Smuzhiyun 	.connect = vmci_transport_connect,
2027*4882a593Smuzhiyun 	.dgram_bind = vmci_transport_dgram_bind,
2028*4882a593Smuzhiyun 	.dgram_dequeue = vmci_transport_dgram_dequeue,
2029*4882a593Smuzhiyun 	.dgram_enqueue = vmci_transport_dgram_enqueue,
2030*4882a593Smuzhiyun 	.dgram_allow = vmci_transport_dgram_allow,
2031*4882a593Smuzhiyun 	.stream_dequeue = vmci_transport_stream_dequeue,
2032*4882a593Smuzhiyun 	.stream_enqueue = vmci_transport_stream_enqueue,
2033*4882a593Smuzhiyun 	.stream_has_data = vmci_transport_stream_has_data,
2034*4882a593Smuzhiyun 	.stream_has_space = vmci_transport_stream_has_space,
2035*4882a593Smuzhiyun 	.stream_rcvhiwat = vmci_transport_stream_rcvhiwat,
2036*4882a593Smuzhiyun 	.stream_is_active = vmci_transport_stream_is_active,
2037*4882a593Smuzhiyun 	.stream_allow = vmci_transport_stream_allow,
2038*4882a593Smuzhiyun 	.notify_poll_in = vmci_transport_notify_poll_in,
2039*4882a593Smuzhiyun 	.notify_poll_out = vmci_transport_notify_poll_out,
2040*4882a593Smuzhiyun 	.notify_recv_init = vmci_transport_notify_recv_init,
2041*4882a593Smuzhiyun 	.notify_recv_pre_block = vmci_transport_notify_recv_pre_block,
2042*4882a593Smuzhiyun 	.notify_recv_pre_dequeue = vmci_transport_notify_recv_pre_dequeue,
2043*4882a593Smuzhiyun 	.notify_recv_post_dequeue = vmci_transport_notify_recv_post_dequeue,
2044*4882a593Smuzhiyun 	.notify_send_init = vmci_transport_notify_send_init,
2045*4882a593Smuzhiyun 	.notify_send_pre_block = vmci_transport_notify_send_pre_block,
2046*4882a593Smuzhiyun 	.notify_send_pre_enqueue = vmci_transport_notify_send_pre_enqueue,
2047*4882a593Smuzhiyun 	.notify_send_post_enqueue = vmci_transport_notify_send_post_enqueue,
2048*4882a593Smuzhiyun 	.shutdown = vmci_transport_shutdown,
2049*4882a593Smuzhiyun 	.get_local_cid = vmci_transport_get_local_cid,
2050*4882a593Smuzhiyun };
2051*4882a593Smuzhiyun 
vmci_check_transport(struct vsock_sock * vsk)2052*4882a593Smuzhiyun static bool vmci_check_transport(struct vsock_sock *vsk)
2053*4882a593Smuzhiyun {
2054*4882a593Smuzhiyun 	return vsk->transport == &vmci_transport;
2055*4882a593Smuzhiyun }
2056*4882a593Smuzhiyun 
vmci_vsock_transport_cb(bool is_host)2057*4882a593Smuzhiyun static void vmci_vsock_transport_cb(bool is_host)
2058*4882a593Smuzhiyun {
2059*4882a593Smuzhiyun 	int features;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	if (is_host)
2062*4882a593Smuzhiyun 		features = VSOCK_TRANSPORT_F_H2G;
2063*4882a593Smuzhiyun 	else
2064*4882a593Smuzhiyun 		features = VSOCK_TRANSPORT_F_G2H;
2065*4882a593Smuzhiyun 
2066*4882a593Smuzhiyun 	vsock_core_register(&vmci_transport, features);
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun 
vmci_transport_init(void)2069*4882a593Smuzhiyun static int __init vmci_transport_init(void)
2070*4882a593Smuzhiyun {
2071*4882a593Smuzhiyun 	int err;
2072*4882a593Smuzhiyun 
2073*4882a593Smuzhiyun 	/* Create the datagram handle that we will use to send and receive all
2074*4882a593Smuzhiyun 	 * VSocket control messages for this context.
2075*4882a593Smuzhiyun 	 */
2076*4882a593Smuzhiyun 	err = vmci_transport_datagram_create_hnd(VMCI_TRANSPORT_PACKET_RID,
2077*4882a593Smuzhiyun 						 VMCI_FLAG_ANYCID_DG_HND,
2078*4882a593Smuzhiyun 						 vmci_transport_recv_stream_cb,
2079*4882a593Smuzhiyun 						 NULL,
2080*4882a593Smuzhiyun 						 &vmci_transport_stream_handle);
2081*4882a593Smuzhiyun 	if (err < VMCI_SUCCESS) {
2082*4882a593Smuzhiyun 		pr_err("Unable to create datagram handle. (%d)\n", err);
2083*4882a593Smuzhiyun 		return vmci_transport_error_to_vsock_error(err);
2084*4882a593Smuzhiyun 	}
2085*4882a593Smuzhiyun 	err = vmci_event_subscribe(VMCI_EVENT_QP_RESUMED,
2086*4882a593Smuzhiyun 				   vmci_transport_qp_resumed_cb,
2087*4882a593Smuzhiyun 				   NULL, &vmci_transport_qp_resumed_sub_id);
2088*4882a593Smuzhiyun 	if (err < VMCI_SUCCESS) {
2089*4882a593Smuzhiyun 		pr_err("Unable to subscribe to resumed event. (%d)\n", err);
2090*4882a593Smuzhiyun 		err = vmci_transport_error_to_vsock_error(err);
2091*4882a593Smuzhiyun 		vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2092*4882a593Smuzhiyun 		goto err_destroy_stream_handle;
2093*4882a593Smuzhiyun 	}
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	/* Register only with dgram feature, other features (H2G, G2H) will be
2096*4882a593Smuzhiyun 	 * registered when the first host or guest becomes active.
2097*4882a593Smuzhiyun 	 */
2098*4882a593Smuzhiyun 	err = vsock_core_register(&vmci_transport, VSOCK_TRANSPORT_F_DGRAM);
2099*4882a593Smuzhiyun 	if (err < 0)
2100*4882a593Smuzhiyun 		goto err_unsubscribe;
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 	err = vmci_register_vsock_callback(vmci_vsock_transport_cb);
2103*4882a593Smuzhiyun 	if (err < 0)
2104*4882a593Smuzhiyun 		goto err_unregister;
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun 	return 0;
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun err_unregister:
2109*4882a593Smuzhiyun 	vsock_core_unregister(&vmci_transport);
2110*4882a593Smuzhiyun err_unsubscribe:
2111*4882a593Smuzhiyun 	vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2112*4882a593Smuzhiyun err_destroy_stream_handle:
2113*4882a593Smuzhiyun 	vmci_datagram_destroy_handle(vmci_transport_stream_handle);
2114*4882a593Smuzhiyun 	return err;
2115*4882a593Smuzhiyun }
2116*4882a593Smuzhiyun module_init(vmci_transport_init);
2117*4882a593Smuzhiyun 
vmci_transport_exit(void)2118*4882a593Smuzhiyun static void __exit vmci_transport_exit(void)
2119*4882a593Smuzhiyun {
2120*4882a593Smuzhiyun 	cancel_work_sync(&vmci_transport_cleanup_work);
2121*4882a593Smuzhiyun 	vmci_transport_free_resources(&vmci_transport_cleanup_list);
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun 	if (!vmci_handle_is_invalid(vmci_transport_stream_handle)) {
2124*4882a593Smuzhiyun 		if (vmci_datagram_destroy_handle(
2125*4882a593Smuzhiyun 			vmci_transport_stream_handle) != VMCI_SUCCESS)
2126*4882a593Smuzhiyun 			pr_err("Couldn't destroy datagram handle\n");
2127*4882a593Smuzhiyun 		vmci_transport_stream_handle = VMCI_INVALID_HANDLE;
2128*4882a593Smuzhiyun 	}
2129*4882a593Smuzhiyun 
2130*4882a593Smuzhiyun 	if (vmci_transport_qp_resumed_sub_id != VMCI_INVALID_ID) {
2131*4882a593Smuzhiyun 		vmci_event_unsubscribe(vmci_transport_qp_resumed_sub_id);
2132*4882a593Smuzhiyun 		vmci_transport_qp_resumed_sub_id = VMCI_INVALID_ID;
2133*4882a593Smuzhiyun 	}
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	vmci_register_vsock_callback(NULL);
2136*4882a593Smuzhiyun 	vsock_core_unregister(&vmci_transport);
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun module_exit(vmci_transport_exit);
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun MODULE_AUTHOR("VMware, Inc.");
2141*4882a593Smuzhiyun MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
2142*4882a593Smuzhiyun MODULE_VERSION("1.0.5.0-k");
2143*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2144*4882a593Smuzhiyun MODULE_ALIAS("vmware_vsock");
2145*4882a593Smuzhiyun MODULE_ALIAS_NETPROTO(PF_VSOCK);
2146