xref: /OK3568_Linux_fs/kernel/net/vmw_vsock/af_vsock.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * VMware vSockets Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /* Implementation notes:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * - There are two kinds of sockets: those created by user action (such as
11*4882a593Smuzhiyun  * calling socket(2)) and those created by incoming connection request packets.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * - There are two "global" tables, one for bound sockets (sockets that have
14*4882a593Smuzhiyun  * specified an address that they are responsible for) and one for connected
15*4882a593Smuzhiyun  * sockets (sockets that have established a connection with another socket).
16*4882a593Smuzhiyun  * These tables are "global" in that all sockets on the system are placed
17*4882a593Smuzhiyun  * within them. - Note, though, that the bound table contains an extra entry
18*4882a593Smuzhiyun  * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
19*4882a593Smuzhiyun  * that list. The bound table is used solely for lookup of sockets when packets
20*4882a593Smuzhiyun  * are received and that's not necessary for SOCK_DGRAM sockets since we create
21*4882a593Smuzhiyun  * a datagram handle for each and need not perform a lookup.  Keeping SOCK_DGRAM
22*4882a593Smuzhiyun  * sockets out of the bound hash buckets will reduce the chance of collisions
23*4882a593Smuzhiyun  * when looking for SOCK_STREAM sockets and prevents us from having to check the
24*4882a593Smuzhiyun  * socket type in the hash table lookups.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * - Sockets created by user action will either be "client" sockets that
27*4882a593Smuzhiyun  * initiate a connection or "server" sockets that listen for connections; we do
28*4882a593Smuzhiyun  * not support simultaneous connects (two "client" sockets connecting).
29*4882a593Smuzhiyun  *
30*4882a593Smuzhiyun  * - "Server" sockets are referred to as listener sockets throughout this
31*4882a593Smuzhiyun  * implementation because they are in the TCP_LISTEN state.  When a
32*4882a593Smuzhiyun  * connection request is received (the second kind of socket mentioned above),
33*4882a593Smuzhiyun  * we create a new socket and refer to it as a pending socket.  These pending
34*4882a593Smuzhiyun  * sockets are placed on the pending connection list of the listener socket.
35*4882a593Smuzhiyun  * When future packets are received for the address the listener socket is
36*4882a593Smuzhiyun  * bound to, we check if the source of the packet is from one that has an
37*4882a593Smuzhiyun  * existing pending connection.  If it does, we process the packet for the
38*4882a593Smuzhiyun  * pending socket.  When that socket reaches the connected state, it is removed
39*4882a593Smuzhiyun  * from the listener socket's pending list and enqueued in the listener
40*4882a593Smuzhiyun  * socket's accept queue.  Callers of accept(2) will accept connected sockets
41*4882a593Smuzhiyun  * from the listener socket's accept queue.  If the socket cannot be accepted
42*4882a593Smuzhiyun  * for some reason then it is marked rejected.  Once the connection is
43*4882a593Smuzhiyun  * accepted, it is owned by the user process and the responsibility for cleanup
44*4882a593Smuzhiyun  * falls with that user process.
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * - It is possible that these pending sockets will never reach the connected
47*4882a593Smuzhiyun  * state; in fact, we may never receive another packet after the connection
48*4882a593Smuzhiyun  * request.  Because of this, we must schedule a cleanup function to run in the
49*4882a593Smuzhiyun  * future, after some amount of time passes where a connection should have been
50*4882a593Smuzhiyun  * established.  This function ensures that the socket is off all lists so it
51*4882a593Smuzhiyun  * cannot be retrieved, then drops all references to the socket so it is cleaned
52*4882a593Smuzhiyun  * up (sock_put() -> sk_free() -> our sk_destruct implementation).  Note this
53*4882a593Smuzhiyun  * function will also cleanup rejected sockets, those that reach the connected
54*4882a593Smuzhiyun  * state but leave it before they have been accepted.
55*4882a593Smuzhiyun  *
56*4882a593Smuzhiyun  * - Lock ordering for pending or accept queue sockets is:
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  *     lock_sock(listener);
59*4882a593Smuzhiyun  *     lock_sock_nested(pending, SINGLE_DEPTH_NESTING);
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * Using explicit nested locking keeps lockdep happy since normally only one
62*4882a593Smuzhiyun  * lock of a given class may be taken at a time.
63*4882a593Smuzhiyun  *
64*4882a593Smuzhiyun  * - Sockets created by user action will be cleaned up when the user process
65*4882a593Smuzhiyun  * calls close(2), causing our release implementation to be called. Our release
66*4882a593Smuzhiyun  * implementation will perform some cleanup then drop the last reference so our
67*4882a593Smuzhiyun  * sk_destruct implementation is invoked.  Our sk_destruct implementation will
68*4882a593Smuzhiyun  * perform additional cleanup that's common for both types of sockets.
69*4882a593Smuzhiyun  *
70*4882a593Smuzhiyun  * - A socket's reference count is what ensures that the structure won't be
71*4882a593Smuzhiyun  * freed.  Each entry in a list (such as the "global" bound and connected tables
72*4882a593Smuzhiyun  * and the listener socket's pending list and connected queue) ensures a
73*4882a593Smuzhiyun  * reference.  When we defer work until process context and pass a socket as our
74*4882a593Smuzhiyun  * argument, we must ensure the reference count is increased to ensure the
75*4882a593Smuzhiyun  * socket isn't freed before the function is run; the deferred function will
76*4882a593Smuzhiyun  * then drop the reference.
77*4882a593Smuzhiyun  *
78*4882a593Smuzhiyun  * - sk->sk_state uses the TCP state constants because they are widely used by
79*4882a593Smuzhiyun  * other address families and exposed to userspace tools like ss(8):
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  *   TCP_CLOSE - unconnected
82*4882a593Smuzhiyun  *   TCP_SYN_SENT - connecting
83*4882a593Smuzhiyun  *   TCP_ESTABLISHED - connected
84*4882a593Smuzhiyun  *   TCP_CLOSING - disconnecting
85*4882a593Smuzhiyun  *   TCP_LISTEN - listening
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #include <linux/types.h>
89*4882a593Smuzhiyun #include <linux/bitops.h>
90*4882a593Smuzhiyun #include <linux/cred.h>
91*4882a593Smuzhiyun #include <linux/init.h>
92*4882a593Smuzhiyun #include <linux/io.h>
93*4882a593Smuzhiyun #include <linux/kernel.h>
94*4882a593Smuzhiyun #include <linux/sched/signal.h>
95*4882a593Smuzhiyun #include <linux/kmod.h>
96*4882a593Smuzhiyun #include <linux/list.h>
97*4882a593Smuzhiyun #include <linux/miscdevice.h>
98*4882a593Smuzhiyun #include <linux/module.h>
99*4882a593Smuzhiyun #include <linux/mutex.h>
100*4882a593Smuzhiyun #include <linux/net.h>
101*4882a593Smuzhiyun #include <linux/poll.h>
102*4882a593Smuzhiyun #include <linux/random.h>
103*4882a593Smuzhiyun #include <linux/skbuff.h>
104*4882a593Smuzhiyun #include <linux/smp.h>
105*4882a593Smuzhiyun #include <linux/socket.h>
106*4882a593Smuzhiyun #include <linux/stddef.h>
107*4882a593Smuzhiyun #include <linux/unistd.h>
108*4882a593Smuzhiyun #include <linux/wait.h>
109*4882a593Smuzhiyun #include <linux/workqueue.h>
110*4882a593Smuzhiyun #include <net/sock.h>
111*4882a593Smuzhiyun #include <net/af_vsock.h>
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr);
114*4882a593Smuzhiyun static void vsock_sk_destruct(struct sock *sk);
115*4882a593Smuzhiyun static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun /* Protocol family. */
118*4882a593Smuzhiyun static struct proto vsock_proto = {
119*4882a593Smuzhiyun 	.name = "AF_VSOCK",
120*4882a593Smuzhiyun 	.owner = THIS_MODULE,
121*4882a593Smuzhiyun 	.obj_size = sizeof(struct vsock_sock),
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun /* The default peer timeout indicates how long we will wait for a peer response
125*4882a593Smuzhiyun  * to a control message.
126*4882a593Smuzhiyun  */
127*4882a593Smuzhiyun #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define VSOCK_DEFAULT_BUFFER_SIZE     (1024 * 256)
130*4882a593Smuzhiyun #define VSOCK_DEFAULT_BUFFER_MAX_SIZE (1024 * 256)
131*4882a593Smuzhiyun #define VSOCK_DEFAULT_BUFFER_MIN_SIZE 128
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun /* Transport used for host->guest communication */
134*4882a593Smuzhiyun static const struct vsock_transport *transport_h2g;
135*4882a593Smuzhiyun /* Transport used for guest->host communication */
136*4882a593Smuzhiyun static const struct vsock_transport *transport_g2h;
137*4882a593Smuzhiyun /* Transport used for DGRAM communication */
138*4882a593Smuzhiyun static const struct vsock_transport *transport_dgram;
139*4882a593Smuzhiyun /* Transport used for local communication */
140*4882a593Smuzhiyun static const struct vsock_transport *transport_local;
141*4882a593Smuzhiyun static DEFINE_MUTEX(vsock_register_mutex);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun /**** UTILS ****/
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* Each bound VSocket is stored in the bind hash table and each connected
146*4882a593Smuzhiyun  * VSocket is stored in the connected hash table.
147*4882a593Smuzhiyun  *
148*4882a593Smuzhiyun  * Unbound sockets are all put on the same list attached to the end of the hash
149*4882a593Smuzhiyun  * table (vsock_unbound_sockets).  Bound sockets are added to the hash table in
150*4882a593Smuzhiyun  * the bucket that their local address hashes to (vsock_bound_sockets(addr)
151*4882a593Smuzhiyun  * represents the list that addr hashes to).
152*4882a593Smuzhiyun  *
153*4882a593Smuzhiyun  * Specifically, we initialize the vsock_bind_table array to a size of
154*4882a593Smuzhiyun  * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
155*4882a593Smuzhiyun  * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
156*4882a593Smuzhiyun  * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets.  The hash function
157*4882a593Smuzhiyun  * mods with VSOCK_HASH_SIZE to ensure this.
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun #define MAX_PORT_RETRIES        24
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun #define VSOCK_HASH(addr)        ((addr)->svm_port % VSOCK_HASH_SIZE)
162*4882a593Smuzhiyun #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
163*4882a593Smuzhiyun #define vsock_unbound_sockets     (&vsock_bind_table[VSOCK_HASH_SIZE])
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* XXX This can probably be implemented in a better way. */
166*4882a593Smuzhiyun #define VSOCK_CONN_HASH(src, dst)				\
167*4882a593Smuzhiyun 	(((src)->svm_cid ^ (dst)->svm_port) % VSOCK_HASH_SIZE)
168*4882a593Smuzhiyun #define vsock_connected_sockets(src, dst)		\
169*4882a593Smuzhiyun 	(&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
170*4882a593Smuzhiyun #define vsock_connected_sockets_vsk(vsk)				\
171*4882a593Smuzhiyun 	vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun struct list_head vsock_bind_table[VSOCK_HASH_SIZE + 1];
174*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_bind_table);
175*4882a593Smuzhiyun struct list_head vsock_connected_table[VSOCK_HASH_SIZE];
176*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_connected_table);
177*4882a593Smuzhiyun DEFINE_SPINLOCK(vsock_table_lock);
178*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_table_lock);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun /* Autobind this socket to the local address if necessary. */
vsock_auto_bind(struct vsock_sock * vsk)181*4882a593Smuzhiyun static int vsock_auto_bind(struct vsock_sock *vsk)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	struct sock *sk = sk_vsock(vsk);
184*4882a593Smuzhiyun 	struct sockaddr_vm local_addr;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	if (vsock_addr_bound(&vsk->local_addr))
187*4882a593Smuzhiyun 		return 0;
188*4882a593Smuzhiyun 	vsock_addr_init(&local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
189*4882a593Smuzhiyun 	return __vsock_bind(sk, &local_addr);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
vsock_init_tables(void)192*4882a593Smuzhiyun static void vsock_init_tables(void)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	int i;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(vsock_bind_table); i++)
197*4882a593Smuzhiyun 		INIT_LIST_HEAD(&vsock_bind_table[i]);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++)
200*4882a593Smuzhiyun 		INIT_LIST_HEAD(&vsock_connected_table[i]);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
__vsock_insert_bound(struct list_head * list,struct vsock_sock * vsk)203*4882a593Smuzhiyun static void __vsock_insert_bound(struct list_head *list,
204*4882a593Smuzhiyun 				 struct vsock_sock *vsk)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	sock_hold(&vsk->sk);
207*4882a593Smuzhiyun 	list_add(&vsk->bound_table, list);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
__vsock_insert_connected(struct list_head * list,struct vsock_sock * vsk)210*4882a593Smuzhiyun static void __vsock_insert_connected(struct list_head *list,
211*4882a593Smuzhiyun 				     struct vsock_sock *vsk)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun 	sock_hold(&vsk->sk);
214*4882a593Smuzhiyun 	list_add(&vsk->connected_table, list);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun 
__vsock_remove_bound(struct vsock_sock * vsk)217*4882a593Smuzhiyun static void __vsock_remove_bound(struct vsock_sock *vsk)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun 	list_del_init(&vsk->bound_table);
220*4882a593Smuzhiyun 	sock_put(&vsk->sk);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
__vsock_remove_connected(struct vsock_sock * vsk)223*4882a593Smuzhiyun static void __vsock_remove_connected(struct vsock_sock *vsk)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	list_del_init(&vsk->connected_table);
226*4882a593Smuzhiyun 	sock_put(&vsk->sk);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
__vsock_find_bound_socket(struct sockaddr_vm * addr)229*4882a593Smuzhiyun static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	struct vsock_sock *vsk;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) {
234*4882a593Smuzhiyun 		if (vsock_addr_equals_addr(addr, &vsk->local_addr))
235*4882a593Smuzhiyun 			return sk_vsock(vsk);
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 		if (addr->svm_port == vsk->local_addr.svm_port &&
238*4882a593Smuzhiyun 		    (vsk->local_addr.svm_cid == VMADDR_CID_ANY ||
239*4882a593Smuzhiyun 		     addr->svm_cid == VMADDR_CID_ANY))
240*4882a593Smuzhiyun 			return sk_vsock(vsk);
241*4882a593Smuzhiyun 	}
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return NULL;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
__vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)246*4882a593Smuzhiyun static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,
247*4882a593Smuzhiyun 						  struct sockaddr_vm *dst)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct vsock_sock *vsk;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	list_for_each_entry(vsk, vsock_connected_sockets(src, dst),
252*4882a593Smuzhiyun 			    connected_table) {
253*4882a593Smuzhiyun 		if (vsock_addr_equals_addr(src, &vsk->remote_addr) &&
254*4882a593Smuzhiyun 		    dst->svm_port == vsk->local_addr.svm_port) {
255*4882a593Smuzhiyun 			return sk_vsock(vsk);
256*4882a593Smuzhiyun 		}
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	return NULL;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun 
vsock_insert_unbound(struct vsock_sock * vsk)262*4882a593Smuzhiyun static void vsock_insert_unbound(struct vsock_sock *vsk)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
265*4882a593Smuzhiyun 	__vsock_insert_bound(vsock_unbound_sockets, vsk);
266*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun 
vsock_insert_connected(struct vsock_sock * vsk)269*4882a593Smuzhiyun void vsock_insert_connected(struct vsock_sock *vsk)
270*4882a593Smuzhiyun {
271*4882a593Smuzhiyun 	struct list_head *list = vsock_connected_sockets(
272*4882a593Smuzhiyun 		&vsk->remote_addr, &vsk->local_addr);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
275*4882a593Smuzhiyun 	__vsock_insert_connected(list, vsk);
276*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_insert_connected);
279*4882a593Smuzhiyun 
vsock_remove_bound(struct vsock_sock * vsk)280*4882a593Smuzhiyun void vsock_remove_bound(struct vsock_sock *vsk)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
283*4882a593Smuzhiyun 	if (__vsock_in_bound_table(vsk))
284*4882a593Smuzhiyun 		__vsock_remove_bound(vsk);
285*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_remove_bound);
288*4882a593Smuzhiyun 
vsock_remove_connected(struct vsock_sock * vsk)289*4882a593Smuzhiyun void vsock_remove_connected(struct vsock_sock *vsk)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
292*4882a593Smuzhiyun 	if (__vsock_in_connected_table(vsk))
293*4882a593Smuzhiyun 		__vsock_remove_connected(vsk);
294*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_remove_connected);
297*4882a593Smuzhiyun 
vsock_find_bound_socket(struct sockaddr_vm * addr)298*4882a593Smuzhiyun struct sock *vsock_find_bound_socket(struct sockaddr_vm *addr)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun 	struct sock *sk;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
303*4882a593Smuzhiyun 	sk = __vsock_find_bound_socket(addr);
304*4882a593Smuzhiyun 	if (sk)
305*4882a593Smuzhiyun 		sock_hold(sk);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	return sk;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_find_bound_socket);
312*4882a593Smuzhiyun 
vsock_find_connected_socket(struct sockaddr_vm * src,struct sockaddr_vm * dst)313*4882a593Smuzhiyun struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
314*4882a593Smuzhiyun 					 struct sockaddr_vm *dst)
315*4882a593Smuzhiyun {
316*4882a593Smuzhiyun 	struct sock *sk;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
319*4882a593Smuzhiyun 	sk = __vsock_find_connected_socket(src, dst);
320*4882a593Smuzhiyun 	if (sk)
321*4882a593Smuzhiyun 		sock_hold(sk);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return sk;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_find_connected_socket);
328*4882a593Smuzhiyun 
vsock_remove_sock(struct vsock_sock * vsk)329*4882a593Smuzhiyun void vsock_remove_sock(struct vsock_sock *vsk)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	vsock_remove_bound(vsk);
332*4882a593Smuzhiyun 	vsock_remove_connected(vsk);
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_remove_sock);
335*4882a593Smuzhiyun 
vsock_for_each_connected_socket(void (* fn)(struct sock * sk))336*4882a593Smuzhiyun void vsock_for_each_connected_socket(void (*fn)(struct sock *sk))
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun 	int i;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	spin_lock_bh(&vsock_table_lock);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(vsock_connected_table); i++) {
343*4882a593Smuzhiyun 		struct vsock_sock *vsk;
344*4882a593Smuzhiyun 		list_for_each_entry(vsk, &vsock_connected_table[i],
345*4882a593Smuzhiyun 				    connected_table)
346*4882a593Smuzhiyun 			fn(sk_vsock(vsk));
347*4882a593Smuzhiyun 	}
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	spin_unlock_bh(&vsock_table_lock);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket);
352*4882a593Smuzhiyun 
vsock_add_pending(struct sock * listener,struct sock * pending)353*4882a593Smuzhiyun void vsock_add_pending(struct sock *listener, struct sock *pending)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun 	struct vsock_sock *vlistener;
356*4882a593Smuzhiyun 	struct vsock_sock *vpending;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	vlistener = vsock_sk(listener);
359*4882a593Smuzhiyun 	vpending = vsock_sk(pending);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	sock_hold(pending);
362*4882a593Smuzhiyun 	sock_hold(listener);
363*4882a593Smuzhiyun 	list_add_tail(&vpending->pending_links, &vlistener->pending_links);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_add_pending);
366*4882a593Smuzhiyun 
vsock_remove_pending(struct sock * listener,struct sock * pending)367*4882a593Smuzhiyun void vsock_remove_pending(struct sock *listener, struct sock *pending)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct vsock_sock *vpending = vsock_sk(pending);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	list_del_init(&vpending->pending_links);
372*4882a593Smuzhiyun 	sock_put(listener);
373*4882a593Smuzhiyun 	sock_put(pending);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_remove_pending);
376*4882a593Smuzhiyun 
vsock_enqueue_accept(struct sock * listener,struct sock * connected)377*4882a593Smuzhiyun void vsock_enqueue_accept(struct sock *listener, struct sock *connected)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun 	struct vsock_sock *vlistener;
380*4882a593Smuzhiyun 	struct vsock_sock *vconnected;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	vlistener = vsock_sk(listener);
383*4882a593Smuzhiyun 	vconnected = vsock_sk(connected);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	sock_hold(connected);
386*4882a593Smuzhiyun 	sock_hold(listener);
387*4882a593Smuzhiyun 	list_add_tail(&vconnected->accept_queue, &vlistener->accept_queue);
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
390*4882a593Smuzhiyun 
vsock_use_local_transport(unsigned int remote_cid)391*4882a593Smuzhiyun static bool vsock_use_local_transport(unsigned int remote_cid)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun 	if (!transport_local)
394*4882a593Smuzhiyun 		return false;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	if (remote_cid == VMADDR_CID_LOCAL)
397*4882a593Smuzhiyun 		return true;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	if (transport_g2h) {
400*4882a593Smuzhiyun 		return remote_cid == transport_g2h->get_local_cid();
401*4882a593Smuzhiyun 	} else {
402*4882a593Smuzhiyun 		return remote_cid == VMADDR_CID_HOST;
403*4882a593Smuzhiyun 	}
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
vsock_deassign_transport(struct vsock_sock * vsk)406*4882a593Smuzhiyun static void vsock_deassign_transport(struct vsock_sock *vsk)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun 	if (!vsk->transport)
409*4882a593Smuzhiyun 		return;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	vsk->transport->destruct(vsk);
412*4882a593Smuzhiyun 	module_put(vsk->transport->module);
413*4882a593Smuzhiyun 	vsk->transport = NULL;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun /* Assign a transport to a socket and call the .init transport callback.
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Note: for stream socket this must be called when vsk->remote_addr is set
419*4882a593Smuzhiyun  * (e.g. during the connect() or when a connection request on a listener
420*4882a593Smuzhiyun  * socket is received).
421*4882a593Smuzhiyun  * The vsk->remote_addr is used to decide which transport to use:
422*4882a593Smuzhiyun  *  - remote CID == VMADDR_CID_LOCAL or g2h->local_cid or VMADDR_CID_HOST if
423*4882a593Smuzhiyun  *    g2h is not loaded, will use local transport;
424*4882a593Smuzhiyun  *  - remote CID <= VMADDR_CID_HOST will use guest->host transport;
425*4882a593Smuzhiyun  *  - remote CID > VMADDR_CID_HOST will use host->guest transport;
426*4882a593Smuzhiyun  */
vsock_assign_transport(struct vsock_sock * vsk,struct vsock_sock * psk)427*4882a593Smuzhiyun int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	const struct vsock_transport *new_transport;
430*4882a593Smuzhiyun 	struct sock *sk = sk_vsock(vsk);
431*4882a593Smuzhiyun 	unsigned int remote_cid = vsk->remote_addr.svm_cid;
432*4882a593Smuzhiyun 	int ret;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	switch (sk->sk_type) {
435*4882a593Smuzhiyun 	case SOCK_DGRAM:
436*4882a593Smuzhiyun 		new_transport = transport_dgram;
437*4882a593Smuzhiyun 		break;
438*4882a593Smuzhiyun 	case SOCK_STREAM:
439*4882a593Smuzhiyun 		if (vsock_use_local_transport(remote_cid))
440*4882a593Smuzhiyun 			new_transport = transport_local;
441*4882a593Smuzhiyun 		else if (remote_cid <= VMADDR_CID_HOST || !transport_h2g)
442*4882a593Smuzhiyun 			new_transport = transport_g2h;
443*4882a593Smuzhiyun 		else
444*4882a593Smuzhiyun 			new_transport = transport_h2g;
445*4882a593Smuzhiyun 		break;
446*4882a593Smuzhiyun 	default:
447*4882a593Smuzhiyun 		return -ESOCKTNOSUPPORT;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (vsk->transport) {
451*4882a593Smuzhiyun 		if (vsk->transport == new_transport)
452*4882a593Smuzhiyun 			return 0;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 		/* transport->release() must be called with sock lock acquired.
455*4882a593Smuzhiyun 		 * This path can only be taken during vsock_stream_connect(),
456*4882a593Smuzhiyun 		 * where we have already held the sock lock.
457*4882a593Smuzhiyun 		 * In the other cases, this function is called on a new socket
458*4882a593Smuzhiyun 		 * which is not assigned to any transport.
459*4882a593Smuzhiyun 		 */
460*4882a593Smuzhiyun 		vsk->transport->release(vsk);
461*4882a593Smuzhiyun 		vsock_deassign_transport(vsk);
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	/* We increase the module refcnt to prevent the transport unloading
465*4882a593Smuzhiyun 	 * while there are open sockets assigned to it.
466*4882a593Smuzhiyun 	 */
467*4882a593Smuzhiyun 	if (!new_transport || !try_module_get(new_transport->module))
468*4882a593Smuzhiyun 		return -ENODEV;
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	ret = new_transport->init(vsk, psk);
471*4882a593Smuzhiyun 	if (ret) {
472*4882a593Smuzhiyun 		module_put(new_transport->module);
473*4882a593Smuzhiyun 		return ret;
474*4882a593Smuzhiyun 	}
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	vsk->transport = new_transport;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_assign_transport);
481*4882a593Smuzhiyun 
vsock_find_cid(unsigned int cid)482*4882a593Smuzhiyun bool vsock_find_cid(unsigned int cid)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	if (transport_g2h && cid == transport_g2h->get_local_cid())
485*4882a593Smuzhiyun 		return true;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	if (transport_h2g && cid == VMADDR_CID_HOST)
488*4882a593Smuzhiyun 		return true;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	if (transport_local && cid == VMADDR_CID_LOCAL)
491*4882a593Smuzhiyun 		return true;
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	return false;
494*4882a593Smuzhiyun }
495*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_find_cid);
496*4882a593Smuzhiyun 
vsock_dequeue_accept(struct sock * listener)497*4882a593Smuzhiyun static struct sock *vsock_dequeue_accept(struct sock *listener)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct vsock_sock *vlistener;
500*4882a593Smuzhiyun 	struct vsock_sock *vconnected;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	vlistener = vsock_sk(listener);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (list_empty(&vlistener->accept_queue))
505*4882a593Smuzhiyun 		return NULL;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	vconnected = list_entry(vlistener->accept_queue.next,
508*4882a593Smuzhiyun 				struct vsock_sock, accept_queue);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	list_del_init(&vconnected->accept_queue);
511*4882a593Smuzhiyun 	sock_put(listener);
512*4882a593Smuzhiyun 	/* The caller will need a reference on the connected socket so we let
513*4882a593Smuzhiyun 	 * it call sock_put().
514*4882a593Smuzhiyun 	 */
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return sk_vsock(vconnected);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
vsock_is_accept_queue_empty(struct sock * sk)519*4882a593Smuzhiyun static bool vsock_is_accept_queue_empty(struct sock *sk)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
522*4882a593Smuzhiyun 	return list_empty(&vsk->accept_queue);
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun 
vsock_is_pending(struct sock * sk)525*4882a593Smuzhiyun static bool vsock_is_pending(struct sock *sk)
526*4882a593Smuzhiyun {
527*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
528*4882a593Smuzhiyun 	return !list_empty(&vsk->pending_links);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
vsock_send_shutdown(struct sock * sk,int mode)531*4882a593Smuzhiyun static int vsock_send_shutdown(struct sock *sk, int mode)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	if (!vsk->transport)
536*4882a593Smuzhiyun 		return -ENODEV;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return vsk->transport->shutdown(vsk, mode);
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
vsock_pending_work(struct work_struct * work)541*4882a593Smuzhiyun static void vsock_pending_work(struct work_struct *work)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	struct sock *sk;
544*4882a593Smuzhiyun 	struct sock *listener;
545*4882a593Smuzhiyun 	struct vsock_sock *vsk;
546*4882a593Smuzhiyun 	bool cleanup;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	vsk = container_of(work, struct vsock_sock, pending_work.work);
549*4882a593Smuzhiyun 	sk = sk_vsock(vsk);
550*4882a593Smuzhiyun 	listener = vsk->listener;
551*4882a593Smuzhiyun 	cleanup = true;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	lock_sock(listener);
554*4882a593Smuzhiyun 	lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	if (vsock_is_pending(sk)) {
557*4882a593Smuzhiyun 		vsock_remove_pending(listener, sk);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 		sk_acceptq_removed(listener);
560*4882a593Smuzhiyun 	} else if (!vsk->rejected) {
561*4882a593Smuzhiyun 		/* We are not on the pending list and accept() did not reject
562*4882a593Smuzhiyun 		 * us, so we must have been accepted by our user process.  We
563*4882a593Smuzhiyun 		 * just need to drop our references to the sockets and be on
564*4882a593Smuzhiyun 		 * our way.
565*4882a593Smuzhiyun 		 */
566*4882a593Smuzhiyun 		cleanup = false;
567*4882a593Smuzhiyun 		goto out;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	/* We need to remove ourself from the global connected sockets list so
571*4882a593Smuzhiyun 	 * incoming packets can't find this socket, and to reduce the reference
572*4882a593Smuzhiyun 	 * count.
573*4882a593Smuzhiyun 	 */
574*4882a593Smuzhiyun 	vsock_remove_connected(vsk);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	sk->sk_state = TCP_CLOSE;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun out:
579*4882a593Smuzhiyun 	release_sock(sk);
580*4882a593Smuzhiyun 	release_sock(listener);
581*4882a593Smuzhiyun 	if (cleanup)
582*4882a593Smuzhiyun 		sock_put(sk);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	sock_put(sk);
585*4882a593Smuzhiyun 	sock_put(listener);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun /**** SOCKET OPERATIONS ****/
589*4882a593Smuzhiyun 
__vsock_bind_stream(struct vsock_sock * vsk,struct sockaddr_vm * addr)590*4882a593Smuzhiyun static int __vsock_bind_stream(struct vsock_sock *vsk,
591*4882a593Smuzhiyun 			       struct sockaddr_vm *addr)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	static u32 port;
594*4882a593Smuzhiyun 	struct sockaddr_vm new_addr;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	if (!port)
597*4882a593Smuzhiyun 		port = LAST_RESERVED_PORT + 1 +
598*4882a593Smuzhiyun 			prandom_u32_max(U32_MAX - LAST_RESERVED_PORT);
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	vsock_addr_init(&new_addr, addr->svm_cid, addr->svm_port);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (addr->svm_port == VMADDR_PORT_ANY) {
603*4882a593Smuzhiyun 		bool found = false;
604*4882a593Smuzhiyun 		unsigned int i;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 		for (i = 0; i < MAX_PORT_RETRIES; i++) {
607*4882a593Smuzhiyun 			if (port <= LAST_RESERVED_PORT)
608*4882a593Smuzhiyun 				port = LAST_RESERVED_PORT + 1;
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 			new_addr.svm_port = port++;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 			if (!__vsock_find_bound_socket(&new_addr)) {
613*4882a593Smuzhiyun 				found = true;
614*4882a593Smuzhiyun 				break;
615*4882a593Smuzhiyun 			}
616*4882a593Smuzhiyun 		}
617*4882a593Smuzhiyun 
618*4882a593Smuzhiyun 		if (!found)
619*4882a593Smuzhiyun 			return -EADDRNOTAVAIL;
620*4882a593Smuzhiyun 	} else {
621*4882a593Smuzhiyun 		/* If port is in reserved range, ensure caller
622*4882a593Smuzhiyun 		 * has necessary privileges.
623*4882a593Smuzhiyun 		 */
624*4882a593Smuzhiyun 		if (addr->svm_port <= LAST_RESERVED_PORT &&
625*4882a593Smuzhiyun 		    !capable(CAP_NET_BIND_SERVICE)) {
626*4882a593Smuzhiyun 			return -EACCES;
627*4882a593Smuzhiyun 		}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		if (__vsock_find_bound_socket(&new_addr))
630*4882a593Smuzhiyun 			return -EADDRINUSE;
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 	vsock_addr_init(&vsk->local_addr, new_addr.svm_cid, new_addr.svm_port);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	/* Remove stream sockets from the unbound list and add them to the hash
636*4882a593Smuzhiyun 	 * table for easy lookup by its address.  The unbound list is simply an
637*4882a593Smuzhiyun 	 * extra entry at the end of the hash table, a trick used by AF_UNIX.
638*4882a593Smuzhiyun 	 */
639*4882a593Smuzhiyun 	__vsock_remove_bound(vsk);
640*4882a593Smuzhiyun 	__vsock_insert_bound(vsock_bound_sockets(&vsk->local_addr), vsk);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	return 0;
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun 
__vsock_bind_dgram(struct vsock_sock * vsk,struct sockaddr_vm * addr)645*4882a593Smuzhiyun static int __vsock_bind_dgram(struct vsock_sock *vsk,
646*4882a593Smuzhiyun 			      struct sockaddr_vm *addr)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	return vsk->transport->dgram_bind(vsk, addr);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
__vsock_bind(struct sock * sk,struct sockaddr_vm * addr)651*4882a593Smuzhiyun static int __vsock_bind(struct sock *sk, struct sockaddr_vm *addr)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
654*4882a593Smuzhiyun 	int retval;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/* First ensure this socket isn't already bound. */
657*4882a593Smuzhiyun 	if (vsock_addr_bound(&vsk->local_addr))
658*4882a593Smuzhiyun 		return -EINVAL;
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	/* Now bind to the provided address or select appropriate values if
661*4882a593Smuzhiyun 	 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY).  Note that
662*4882a593Smuzhiyun 	 * like AF_INET prevents binding to a non-local IP address (in most
663*4882a593Smuzhiyun 	 * cases), we only allow binding to a local CID.
664*4882a593Smuzhiyun 	 */
665*4882a593Smuzhiyun 	if (addr->svm_cid != VMADDR_CID_ANY && !vsock_find_cid(addr->svm_cid))
666*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	switch (sk->sk_socket->type) {
669*4882a593Smuzhiyun 	case SOCK_STREAM:
670*4882a593Smuzhiyun 		spin_lock_bh(&vsock_table_lock);
671*4882a593Smuzhiyun 		retval = __vsock_bind_stream(vsk, addr);
672*4882a593Smuzhiyun 		spin_unlock_bh(&vsock_table_lock);
673*4882a593Smuzhiyun 		break;
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	case SOCK_DGRAM:
676*4882a593Smuzhiyun 		retval = __vsock_bind_dgram(vsk, addr);
677*4882a593Smuzhiyun 		break;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	default:
680*4882a593Smuzhiyun 		retval = -EINVAL;
681*4882a593Smuzhiyun 		break;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return retval;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun static void vsock_connect_timeout(struct work_struct *work);
688*4882a593Smuzhiyun 
__vsock_create(struct net * net,struct socket * sock,struct sock * parent,gfp_t priority,unsigned short type,int kern)689*4882a593Smuzhiyun static struct sock *__vsock_create(struct net *net,
690*4882a593Smuzhiyun 				   struct socket *sock,
691*4882a593Smuzhiyun 				   struct sock *parent,
692*4882a593Smuzhiyun 				   gfp_t priority,
693*4882a593Smuzhiyun 				   unsigned short type,
694*4882a593Smuzhiyun 				   int kern)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun 	struct sock *sk;
697*4882a593Smuzhiyun 	struct vsock_sock *psk;
698*4882a593Smuzhiyun 	struct vsock_sock *vsk;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	sk = sk_alloc(net, AF_VSOCK, priority, &vsock_proto, kern);
701*4882a593Smuzhiyun 	if (!sk)
702*4882a593Smuzhiyun 		return NULL;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 	sock_init_data(sock, sk);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	/* sk->sk_type is normally set in sock_init_data, but only if sock is
707*4882a593Smuzhiyun 	 * non-NULL. We make sure that our sockets always have a type by
708*4882a593Smuzhiyun 	 * setting it here if needed.
709*4882a593Smuzhiyun 	 */
710*4882a593Smuzhiyun 	if (!sock)
711*4882a593Smuzhiyun 		sk->sk_type = type;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
714*4882a593Smuzhiyun 	vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
715*4882a593Smuzhiyun 	vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	sk->sk_destruct = vsock_sk_destruct;
718*4882a593Smuzhiyun 	sk->sk_backlog_rcv = vsock_queue_rcv_skb;
719*4882a593Smuzhiyun 	sock_reset_flag(sk, SOCK_DONE);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vsk->bound_table);
722*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vsk->connected_table);
723*4882a593Smuzhiyun 	vsk->listener = NULL;
724*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vsk->pending_links);
725*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vsk->accept_queue);
726*4882a593Smuzhiyun 	vsk->rejected = false;
727*4882a593Smuzhiyun 	vsk->sent_request = false;
728*4882a593Smuzhiyun 	vsk->ignore_connecting_rst = false;
729*4882a593Smuzhiyun 	vsk->peer_shutdown = 0;
730*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&vsk->connect_work, vsock_connect_timeout);
731*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&vsk->pending_work, vsock_pending_work);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	psk = parent ? vsock_sk(parent) : NULL;
734*4882a593Smuzhiyun 	if (parent) {
735*4882a593Smuzhiyun 		vsk->trusted = psk->trusted;
736*4882a593Smuzhiyun 		vsk->owner = get_cred(psk->owner);
737*4882a593Smuzhiyun 		vsk->connect_timeout = psk->connect_timeout;
738*4882a593Smuzhiyun 		vsk->buffer_size = psk->buffer_size;
739*4882a593Smuzhiyun 		vsk->buffer_min_size = psk->buffer_min_size;
740*4882a593Smuzhiyun 		vsk->buffer_max_size = psk->buffer_max_size;
741*4882a593Smuzhiyun 		security_sk_clone(parent, sk);
742*4882a593Smuzhiyun 	} else {
743*4882a593Smuzhiyun 		vsk->trusted = ns_capable_noaudit(&init_user_ns, CAP_NET_ADMIN);
744*4882a593Smuzhiyun 		vsk->owner = get_current_cred();
745*4882a593Smuzhiyun 		vsk->connect_timeout = VSOCK_DEFAULT_CONNECT_TIMEOUT;
746*4882a593Smuzhiyun 		vsk->buffer_size = VSOCK_DEFAULT_BUFFER_SIZE;
747*4882a593Smuzhiyun 		vsk->buffer_min_size = VSOCK_DEFAULT_BUFFER_MIN_SIZE;
748*4882a593Smuzhiyun 		vsk->buffer_max_size = VSOCK_DEFAULT_BUFFER_MAX_SIZE;
749*4882a593Smuzhiyun 	}
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	return sk;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
__vsock_release(struct sock * sk,int level)754*4882a593Smuzhiyun static void __vsock_release(struct sock *sk, int level)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	if (sk) {
757*4882a593Smuzhiyun 		struct sock *pending;
758*4882a593Smuzhiyun 		struct vsock_sock *vsk;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 		vsk = vsock_sk(sk);
761*4882a593Smuzhiyun 		pending = NULL;	/* Compiler warning. */
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 		/* When "level" is SINGLE_DEPTH_NESTING, use the nested
764*4882a593Smuzhiyun 		 * version to avoid the warning "possible recursive locking
765*4882a593Smuzhiyun 		 * detected". When "level" is 0, lock_sock_nested(sk, level)
766*4882a593Smuzhiyun 		 * is the same as lock_sock(sk).
767*4882a593Smuzhiyun 		 */
768*4882a593Smuzhiyun 		lock_sock_nested(sk, level);
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun 		if (vsk->transport)
771*4882a593Smuzhiyun 			vsk->transport->release(vsk);
772*4882a593Smuzhiyun 		else if (sk->sk_type == SOCK_STREAM)
773*4882a593Smuzhiyun 			vsock_remove_sock(vsk);
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 		sock_orphan(sk);
776*4882a593Smuzhiyun 		sk->sk_shutdown = SHUTDOWN_MASK;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 		skb_queue_purge(&sk->sk_receive_queue);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		/* Clean up any sockets that never were accepted. */
781*4882a593Smuzhiyun 		while ((pending = vsock_dequeue_accept(sk)) != NULL) {
782*4882a593Smuzhiyun 			__vsock_release(pending, SINGLE_DEPTH_NESTING);
783*4882a593Smuzhiyun 			sock_put(pending);
784*4882a593Smuzhiyun 		}
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 		release_sock(sk);
787*4882a593Smuzhiyun 		sock_put(sk);
788*4882a593Smuzhiyun 	}
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
vsock_sk_destruct(struct sock * sk)791*4882a593Smuzhiyun static void vsock_sk_destruct(struct sock *sk)
792*4882a593Smuzhiyun {
793*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sk);
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	vsock_deassign_transport(vsk);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	/* When clearing these addresses, there's no need to set the family and
798*4882a593Smuzhiyun 	 * possibly register the address family with the kernel.
799*4882a593Smuzhiyun 	 */
800*4882a593Smuzhiyun 	vsock_addr_init(&vsk->local_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
801*4882a593Smuzhiyun 	vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY, VMADDR_PORT_ANY);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	put_cred(vsk->owner);
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun 
vsock_queue_rcv_skb(struct sock * sk,struct sk_buff * skb)806*4882a593Smuzhiyun static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
807*4882a593Smuzhiyun {
808*4882a593Smuzhiyun 	int err;
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	err = sock_queue_rcv_skb(sk, skb);
811*4882a593Smuzhiyun 	if (err)
812*4882a593Smuzhiyun 		kfree_skb(skb);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	return err;
815*4882a593Smuzhiyun }
816*4882a593Smuzhiyun 
vsock_create_connected(struct sock * parent)817*4882a593Smuzhiyun struct sock *vsock_create_connected(struct sock *parent)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun 	return __vsock_create(sock_net(parent), NULL, parent, GFP_KERNEL,
820*4882a593Smuzhiyun 			      parent->sk_type, 0);
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_create_connected);
823*4882a593Smuzhiyun 
vsock_stream_has_data(struct vsock_sock * vsk)824*4882a593Smuzhiyun s64 vsock_stream_has_data(struct vsock_sock *vsk)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun 	return vsk->transport->stream_has_data(vsk);
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_stream_has_data);
829*4882a593Smuzhiyun 
vsock_stream_has_space(struct vsock_sock * vsk)830*4882a593Smuzhiyun s64 vsock_stream_has_space(struct vsock_sock *vsk)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	return vsk->transport->stream_has_space(vsk);
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_stream_has_space);
835*4882a593Smuzhiyun 
vsock_release(struct socket * sock)836*4882a593Smuzhiyun static int vsock_release(struct socket *sock)
837*4882a593Smuzhiyun {
838*4882a593Smuzhiyun 	__vsock_release(sock->sk, 0);
839*4882a593Smuzhiyun 	sock->sk = NULL;
840*4882a593Smuzhiyun 	sock->state = SS_FREE;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	return 0;
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun static int
vsock_bind(struct socket * sock,struct sockaddr * addr,int addr_len)846*4882a593Smuzhiyun vsock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun 	int err;
849*4882a593Smuzhiyun 	struct sock *sk;
850*4882a593Smuzhiyun 	struct sockaddr_vm *vm_addr;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	sk = sock->sk;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	if (vsock_addr_cast(addr, addr_len, &vm_addr) != 0)
855*4882a593Smuzhiyun 		return -EINVAL;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	lock_sock(sk);
858*4882a593Smuzhiyun 	err = __vsock_bind(sk, vm_addr);
859*4882a593Smuzhiyun 	release_sock(sk);
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return err;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
vsock_getname(struct socket * sock,struct sockaddr * addr,int peer)864*4882a593Smuzhiyun static int vsock_getname(struct socket *sock,
865*4882a593Smuzhiyun 			 struct sockaddr *addr, int peer)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun 	int err;
868*4882a593Smuzhiyun 	struct sock *sk;
869*4882a593Smuzhiyun 	struct vsock_sock *vsk;
870*4882a593Smuzhiyun 	struct sockaddr_vm *vm_addr;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	sk = sock->sk;
873*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
874*4882a593Smuzhiyun 	err = 0;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	lock_sock(sk);
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (peer) {
879*4882a593Smuzhiyun 		if (sock->state != SS_CONNECTED) {
880*4882a593Smuzhiyun 			err = -ENOTCONN;
881*4882a593Smuzhiyun 			goto out;
882*4882a593Smuzhiyun 		}
883*4882a593Smuzhiyun 		vm_addr = &vsk->remote_addr;
884*4882a593Smuzhiyun 	} else {
885*4882a593Smuzhiyun 		vm_addr = &vsk->local_addr;
886*4882a593Smuzhiyun 	}
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	if (!vm_addr) {
889*4882a593Smuzhiyun 		err = -EINVAL;
890*4882a593Smuzhiyun 		goto out;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	/* sys_getsockname() and sys_getpeername() pass us a
894*4882a593Smuzhiyun 	 * MAX_SOCK_ADDR-sized buffer and don't set addr_len.  Unfortunately
895*4882a593Smuzhiyun 	 * that macro is defined in socket.c instead of .h, so we hardcode its
896*4882a593Smuzhiyun 	 * value here.
897*4882a593Smuzhiyun 	 */
898*4882a593Smuzhiyun 	BUILD_BUG_ON(sizeof(*vm_addr) > 128);
899*4882a593Smuzhiyun 	memcpy(addr, vm_addr, sizeof(*vm_addr));
900*4882a593Smuzhiyun 	err = sizeof(*vm_addr);
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun out:
903*4882a593Smuzhiyun 	release_sock(sk);
904*4882a593Smuzhiyun 	return err;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun 
vsock_shutdown(struct socket * sock,int mode)907*4882a593Smuzhiyun static int vsock_shutdown(struct socket *sock, int mode)
908*4882a593Smuzhiyun {
909*4882a593Smuzhiyun 	int err;
910*4882a593Smuzhiyun 	struct sock *sk;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	/* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
913*4882a593Smuzhiyun 	 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
914*4882a593Smuzhiyun 	 * here like the other address families do.  Note also that the
915*4882a593Smuzhiyun 	 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
916*4882a593Smuzhiyun 	 * which is what we want.
917*4882a593Smuzhiyun 	 */
918*4882a593Smuzhiyun 	mode++;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	if ((mode & ~SHUTDOWN_MASK) || !mode)
921*4882a593Smuzhiyun 		return -EINVAL;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* If this is a STREAM socket and it is not connected then bail out
924*4882a593Smuzhiyun 	 * immediately.  If it is a DGRAM socket then we must first kick the
925*4882a593Smuzhiyun 	 * socket so that it wakes up from any sleeping calls, for example
926*4882a593Smuzhiyun 	 * recv(), and then afterwards return the error.
927*4882a593Smuzhiyun 	 */
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	sk = sock->sk;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 	lock_sock(sk);
932*4882a593Smuzhiyun 	if (sock->state == SS_UNCONNECTED) {
933*4882a593Smuzhiyun 		err = -ENOTCONN;
934*4882a593Smuzhiyun 		if (sk->sk_type == SOCK_STREAM)
935*4882a593Smuzhiyun 			goto out;
936*4882a593Smuzhiyun 	} else {
937*4882a593Smuzhiyun 		sock->state = SS_DISCONNECTING;
938*4882a593Smuzhiyun 		err = 0;
939*4882a593Smuzhiyun 	}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	/* Receive and send shutdowns are treated alike. */
942*4882a593Smuzhiyun 	mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
943*4882a593Smuzhiyun 	if (mode) {
944*4882a593Smuzhiyun 		sk->sk_shutdown |= mode;
945*4882a593Smuzhiyun 		sk->sk_state_change(sk);
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 		if (sk->sk_type == SOCK_STREAM) {
948*4882a593Smuzhiyun 			sock_reset_flag(sk, SOCK_DONE);
949*4882a593Smuzhiyun 			vsock_send_shutdown(sk, mode);
950*4882a593Smuzhiyun 		}
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 
953*4882a593Smuzhiyun out:
954*4882a593Smuzhiyun 	release_sock(sk);
955*4882a593Smuzhiyun 	return err;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
vsock_poll(struct file * file,struct socket * sock,poll_table * wait)958*4882a593Smuzhiyun static __poll_t vsock_poll(struct file *file, struct socket *sock,
959*4882a593Smuzhiyun 			       poll_table *wait)
960*4882a593Smuzhiyun {
961*4882a593Smuzhiyun 	struct sock *sk;
962*4882a593Smuzhiyun 	__poll_t mask;
963*4882a593Smuzhiyun 	struct vsock_sock *vsk;
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	sk = sock->sk;
966*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	poll_wait(file, sk_sleep(sk), wait);
969*4882a593Smuzhiyun 	mask = 0;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	if (sk->sk_err)
972*4882a593Smuzhiyun 		/* Signify that there has been an error on this socket. */
973*4882a593Smuzhiyun 		mask |= EPOLLERR;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	/* INET sockets treat local write shutdown and peer write shutdown as a
976*4882a593Smuzhiyun 	 * case of EPOLLHUP set.
977*4882a593Smuzhiyun 	 */
978*4882a593Smuzhiyun 	if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
979*4882a593Smuzhiyun 	    ((sk->sk_shutdown & SEND_SHUTDOWN) &&
980*4882a593Smuzhiyun 	     (vsk->peer_shutdown & SEND_SHUTDOWN))) {
981*4882a593Smuzhiyun 		mask |= EPOLLHUP;
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	if (sk->sk_shutdown & RCV_SHUTDOWN ||
985*4882a593Smuzhiyun 	    vsk->peer_shutdown & SEND_SHUTDOWN) {
986*4882a593Smuzhiyun 		mask |= EPOLLRDHUP;
987*4882a593Smuzhiyun 	}
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	if (sock->type == SOCK_DGRAM) {
990*4882a593Smuzhiyun 		/* For datagram sockets we can read if there is something in
991*4882a593Smuzhiyun 		 * the queue and write as long as the socket isn't shutdown for
992*4882a593Smuzhiyun 		 * sending.
993*4882a593Smuzhiyun 		 */
994*4882a593Smuzhiyun 		if (!skb_queue_empty_lockless(&sk->sk_receive_queue) ||
995*4882a593Smuzhiyun 		    (sk->sk_shutdown & RCV_SHUTDOWN)) {
996*4882a593Smuzhiyun 			mask |= EPOLLIN | EPOLLRDNORM;
997*4882a593Smuzhiyun 		}
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 		if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1000*4882a593Smuzhiyun 			mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	} else if (sock->type == SOCK_STREAM) {
1003*4882a593Smuzhiyun 		const struct vsock_transport *transport;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 		lock_sock(sk);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 		transport = vsk->transport;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 		/* Listening sockets that have connections in their accept
1010*4882a593Smuzhiyun 		 * queue can be read.
1011*4882a593Smuzhiyun 		 */
1012*4882a593Smuzhiyun 		if (sk->sk_state == TCP_LISTEN
1013*4882a593Smuzhiyun 		    && !vsock_is_accept_queue_empty(sk))
1014*4882a593Smuzhiyun 			mask |= EPOLLIN | EPOLLRDNORM;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 		/* If there is something in the queue then we can read. */
1017*4882a593Smuzhiyun 		if (transport && transport->stream_is_active(vsk) &&
1018*4882a593Smuzhiyun 		    !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1019*4882a593Smuzhiyun 			bool data_ready_now = false;
1020*4882a593Smuzhiyun 			int ret = transport->notify_poll_in(
1021*4882a593Smuzhiyun 					vsk, 1, &data_ready_now);
1022*4882a593Smuzhiyun 			if (ret < 0) {
1023*4882a593Smuzhiyun 				mask |= EPOLLERR;
1024*4882a593Smuzhiyun 			} else {
1025*4882a593Smuzhiyun 				if (data_ready_now)
1026*4882a593Smuzhiyun 					mask |= EPOLLIN | EPOLLRDNORM;
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 			}
1029*4882a593Smuzhiyun 		}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		/* Sockets whose connections have been closed, reset, or
1032*4882a593Smuzhiyun 		 * terminated should also be considered read, and we check the
1033*4882a593Smuzhiyun 		 * shutdown flag for that.
1034*4882a593Smuzhiyun 		 */
1035*4882a593Smuzhiyun 		if (sk->sk_shutdown & RCV_SHUTDOWN ||
1036*4882a593Smuzhiyun 		    vsk->peer_shutdown & SEND_SHUTDOWN) {
1037*4882a593Smuzhiyun 			mask |= EPOLLIN | EPOLLRDNORM;
1038*4882a593Smuzhiyun 		}
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		/* Connected sockets that can produce data can be written. */
1041*4882a593Smuzhiyun 		if (transport && sk->sk_state == TCP_ESTABLISHED) {
1042*4882a593Smuzhiyun 			if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1043*4882a593Smuzhiyun 				bool space_avail_now = false;
1044*4882a593Smuzhiyun 				int ret = transport->notify_poll_out(
1045*4882a593Smuzhiyun 						vsk, 1, &space_avail_now);
1046*4882a593Smuzhiyun 				if (ret < 0) {
1047*4882a593Smuzhiyun 					mask |= EPOLLERR;
1048*4882a593Smuzhiyun 				} else {
1049*4882a593Smuzhiyun 					if (space_avail_now)
1050*4882a593Smuzhiyun 						/* Remove EPOLLWRBAND since INET
1051*4882a593Smuzhiyun 						 * sockets are not setting it.
1052*4882a593Smuzhiyun 						 */
1053*4882a593Smuzhiyun 						mask |= EPOLLOUT | EPOLLWRNORM;
1054*4882a593Smuzhiyun 
1055*4882a593Smuzhiyun 				}
1056*4882a593Smuzhiyun 			}
1057*4882a593Smuzhiyun 		}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 		/* Simulate INET socket poll behaviors, which sets
1060*4882a593Smuzhiyun 		 * EPOLLOUT|EPOLLWRNORM when peer is closed and nothing to read,
1061*4882a593Smuzhiyun 		 * but local send is not shutdown.
1062*4882a593Smuzhiyun 		 */
1063*4882a593Smuzhiyun 		if (sk->sk_state == TCP_CLOSE || sk->sk_state == TCP_CLOSING) {
1064*4882a593Smuzhiyun 			if (!(sk->sk_shutdown & SEND_SHUTDOWN))
1065*4882a593Smuzhiyun 				mask |= EPOLLOUT | EPOLLWRNORM;
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 		}
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 		release_sock(sk);
1070*4882a593Smuzhiyun 	}
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	return mask;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
vsock_dgram_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1075*4882a593Smuzhiyun static int vsock_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
1076*4882a593Smuzhiyun 			       size_t len)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	int err;
1079*4882a593Smuzhiyun 	struct sock *sk;
1080*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1081*4882a593Smuzhiyun 	struct sockaddr_vm *remote_addr;
1082*4882a593Smuzhiyun 	const struct vsock_transport *transport;
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	if (msg->msg_flags & MSG_OOB)
1085*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* For now, MSG_DONTWAIT is always assumed... */
1088*4882a593Smuzhiyun 	err = 0;
1089*4882a593Smuzhiyun 	sk = sock->sk;
1090*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	lock_sock(sk);
1093*4882a593Smuzhiyun 
1094*4882a593Smuzhiyun 	transport = vsk->transport;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	err = vsock_auto_bind(vsk);
1097*4882a593Smuzhiyun 	if (err)
1098*4882a593Smuzhiyun 		goto out;
1099*4882a593Smuzhiyun 
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	/* If the provided message contains an address, use that.  Otherwise
1102*4882a593Smuzhiyun 	 * fall back on the socket's remote handle (if it has been connected).
1103*4882a593Smuzhiyun 	 */
1104*4882a593Smuzhiyun 	if (msg->msg_name &&
1105*4882a593Smuzhiyun 	    vsock_addr_cast(msg->msg_name, msg->msg_namelen,
1106*4882a593Smuzhiyun 			    &remote_addr) == 0) {
1107*4882a593Smuzhiyun 		/* Ensure this address is of the right type and is a valid
1108*4882a593Smuzhiyun 		 * destination.
1109*4882a593Smuzhiyun 		 */
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 		if (remote_addr->svm_cid == VMADDR_CID_ANY)
1112*4882a593Smuzhiyun 			remote_addr->svm_cid = transport->get_local_cid();
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 		if (!vsock_addr_bound(remote_addr)) {
1115*4882a593Smuzhiyun 			err = -EINVAL;
1116*4882a593Smuzhiyun 			goto out;
1117*4882a593Smuzhiyun 		}
1118*4882a593Smuzhiyun 	} else if (sock->state == SS_CONNECTED) {
1119*4882a593Smuzhiyun 		remote_addr = &vsk->remote_addr;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 		if (remote_addr->svm_cid == VMADDR_CID_ANY)
1122*4882a593Smuzhiyun 			remote_addr->svm_cid = transport->get_local_cid();
1123*4882a593Smuzhiyun 
1124*4882a593Smuzhiyun 		/* XXX Should connect() or this function ensure remote_addr is
1125*4882a593Smuzhiyun 		 * bound?
1126*4882a593Smuzhiyun 		 */
1127*4882a593Smuzhiyun 		if (!vsock_addr_bound(&vsk->remote_addr)) {
1128*4882a593Smuzhiyun 			err = -EINVAL;
1129*4882a593Smuzhiyun 			goto out;
1130*4882a593Smuzhiyun 		}
1131*4882a593Smuzhiyun 	} else {
1132*4882a593Smuzhiyun 		err = -EINVAL;
1133*4882a593Smuzhiyun 		goto out;
1134*4882a593Smuzhiyun 	}
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	if (!transport->dgram_allow(remote_addr->svm_cid,
1137*4882a593Smuzhiyun 				    remote_addr->svm_port)) {
1138*4882a593Smuzhiyun 		err = -EINVAL;
1139*4882a593Smuzhiyun 		goto out;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	err = transport->dgram_enqueue(vsk, remote_addr, msg, len);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun out:
1145*4882a593Smuzhiyun 	release_sock(sk);
1146*4882a593Smuzhiyun 	return err;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun 
vsock_dgram_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1149*4882a593Smuzhiyun static int vsock_dgram_connect(struct socket *sock,
1150*4882a593Smuzhiyun 			       struct sockaddr *addr, int addr_len, int flags)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun 	int err;
1153*4882a593Smuzhiyun 	struct sock *sk;
1154*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1155*4882a593Smuzhiyun 	struct sockaddr_vm *remote_addr;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	sk = sock->sk;
1158*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	err = vsock_addr_cast(addr, addr_len, &remote_addr);
1161*4882a593Smuzhiyun 	if (err == -EAFNOSUPPORT && remote_addr->svm_family == AF_UNSPEC) {
1162*4882a593Smuzhiyun 		lock_sock(sk);
1163*4882a593Smuzhiyun 		vsock_addr_init(&vsk->remote_addr, VMADDR_CID_ANY,
1164*4882a593Smuzhiyun 				VMADDR_PORT_ANY);
1165*4882a593Smuzhiyun 		sock->state = SS_UNCONNECTED;
1166*4882a593Smuzhiyun 		release_sock(sk);
1167*4882a593Smuzhiyun 		return 0;
1168*4882a593Smuzhiyun 	} else if (err != 0)
1169*4882a593Smuzhiyun 		return -EINVAL;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	lock_sock(sk);
1172*4882a593Smuzhiyun 
1173*4882a593Smuzhiyun 	err = vsock_auto_bind(vsk);
1174*4882a593Smuzhiyun 	if (err)
1175*4882a593Smuzhiyun 		goto out;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	if (!vsk->transport->dgram_allow(remote_addr->svm_cid,
1178*4882a593Smuzhiyun 					 remote_addr->svm_port)) {
1179*4882a593Smuzhiyun 		err = -EINVAL;
1180*4882a593Smuzhiyun 		goto out;
1181*4882a593Smuzhiyun 	}
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	memcpy(&vsk->remote_addr, remote_addr, sizeof(vsk->remote_addr));
1184*4882a593Smuzhiyun 	sock->state = SS_CONNECTED;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun out:
1187*4882a593Smuzhiyun 	release_sock(sk);
1188*4882a593Smuzhiyun 	return err;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
vsock_dgram_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1191*4882a593Smuzhiyun static int vsock_dgram_recvmsg(struct socket *sock, struct msghdr *msg,
1192*4882a593Smuzhiyun 			       size_t len, int flags)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	struct vsock_sock *vsk = vsock_sk(sock->sk);
1195*4882a593Smuzhiyun 
1196*4882a593Smuzhiyun 	return vsk->transport->dgram_dequeue(vsk, msg, len, flags);
1197*4882a593Smuzhiyun }
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun static const struct proto_ops vsock_dgram_ops = {
1200*4882a593Smuzhiyun 	.family = PF_VSOCK,
1201*4882a593Smuzhiyun 	.owner = THIS_MODULE,
1202*4882a593Smuzhiyun 	.release = vsock_release,
1203*4882a593Smuzhiyun 	.bind = vsock_bind,
1204*4882a593Smuzhiyun 	.connect = vsock_dgram_connect,
1205*4882a593Smuzhiyun 	.socketpair = sock_no_socketpair,
1206*4882a593Smuzhiyun 	.accept = sock_no_accept,
1207*4882a593Smuzhiyun 	.getname = vsock_getname,
1208*4882a593Smuzhiyun 	.poll = vsock_poll,
1209*4882a593Smuzhiyun 	.ioctl = sock_no_ioctl,
1210*4882a593Smuzhiyun 	.listen = sock_no_listen,
1211*4882a593Smuzhiyun 	.shutdown = vsock_shutdown,
1212*4882a593Smuzhiyun 	.sendmsg = vsock_dgram_sendmsg,
1213*4882a593Smuzhiyun 	.recvmsg = vsock_dgram_recvmsg,
1214*4882a593Smuzhiyun 	.mmap = sock_no_mmap,
1215*4882a593Smuzhiyun 	.sendpage = sock_no_sendpage,
1216*4882a593Smuzhiyun };
1217*4882a593Smuzhiyun 
vsock_transport_cancel_pkt(struct vsock_sock * vsk)1218*4882a593Smuzhiyun static int vsock_transport_cancel_pkt(struct vsock_sock *vsk)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun 	const struct vsock_transport *transport = vsk->transport;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	if (!transport || !transport->cancel_pkt)
1223*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	return transport->cancel_pkt(vsk);
1226*4882a593Smuzhiyun }
1227*4882a593Smuzhiyun 
vsock_connect_timeout(struct work_struct * work)1228*4882a593Smuzhiyun static void vsock_connect_timeout(struct work_struct *work)
1229*4882a593Smuzhiyun {
1230*4882a593Smuzhiyun 	struct sock *sk;
1231*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	vsk = container_of(work, struct vsock_sock, connect_work.work);
1234*4882a593Smuzhiyun 	sk = sk_vsock(vsk);
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	lock_sock(sk);
1237*4882a593Smuzhiyun 	if (sk->sk_state == TCP_SYN_SENT &&
1238*4882a593Smuzhiyun 	    (sk->sk_shutdown != SHUTDOWN_MASK)) {
1239*4882a593Smuzhiyun 		sk->sk_state = TCP_CLOSE;
1240*4882a593Smuzhiyun 		sk->sk_socket->state = SS_UNCONNECTED;
1241*4882a593Smuzhiyun 		sk->sk_err = ETIMEDOUT;
1242*4882a593Smuzhiyun 		sk->sk_error_report(sk);
1243*4882a593Smuzhiyun 		vsock_transport_cancel_pkt(vsk);
1244*4882a593Smuzhiyun 	}
1245*4882a593Smuzhiyun 	release_sock(sk);
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 	sock_put(sk);
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun 
vsock_stream_connect(struct socket * sock,struct sockaddr * addr,int addr_len,int flags)1250*4882a593Smuzhiyun static int vsock_stream_connect(struct socket *sock, struct sockaddr *addr,
1251*4882a593Smuzhiyun 				int addr_len, int flags)
1252*4882a593Smuzhiyun {
1253*4882a593Smuzhiyun 	int err;
1254*4882a593Smuzhiyun 	struct sock *sk;
1255*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1256*4882a593Smuzhiyun 	const struct vsock_transport *transport;
1257*4882a593Smuzhiyun 	struct sockaddr_vm *remote_addr;
1258*4882a593Smuzhiyun 	long timeout;
1259*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun 	err = 0;
1262*4882a593Smuzhiyun 	sk = sock->sk;
1263*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun 	lock_sock(sk);
1266*4882a593Smuzhiyun 
1267*4882a593Smuzhiyun 	/* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1268*4882a593Smuzhiyun 	switch (sock->state) {
1269*4882a593Smuzhiyun 	case SS_CONNECTED:
1270*4882a593Smuzhiyun 		err = -EISCONN;
1271*4882a593Smuzhiyun 		goto out;
1272*4882a593Smuzhiyun 	case SS_DISCONNECTING:
1273*4882a593Smuzhiyun 		err = -EINVAL;
1274*4882a593Smuzhiyun 		goto out;
1275*4882a593Smuzhiyun 	case SS_CONNECTING:
1276*4882a593Smuzhiyun 		/* This continues on so we can move sock into the SS_CONNECTED
1277*4882a593Smuzhiyun 		 * state once the connection has completed (at which point err
1278*4882a593Smuzhiyun 		 * will be set to zero also).  Otherwise, we will either wait
1279*4882a593Smuzhiyun 		 * for the connection or return -EALREADY should this be a
1280*4882a593Smuzhiyun 		 * non-blocking call.
1281*4882a593Smuzhiyun 		 */
1282*4882a593Smuzhiyun 		err = -EALREADY;
1283*4882a593Smuzhiyun 		if (flags & O_NONBLOCK)
1284*4882a593Smuzhiyun 			goto out;
1285*4882a593Smuzhiyun 		break;
1286*4882a593Smuzhiyun 	default:
1287*4882a593Smuzhiyun 		if ((sk->sk_state == TCP_LISTEN) ||
1288*4882a593Smuzhiyun 		    vsock_addr_cast(addr, addr_len, &remote_addr) != 0) {
1289*4882a593Smuzhiyun 			err = -EINVAL;
1290*4882a593Smuzhiyun 			goto out;
1291*4882a593Smuzhiyun 		}
1292*4882a593Smuzhiyun 
1293*4882a593Smuzhiyun 		/* Set the remote address that we are connecting to. */
1294*4882a593Smuzhiyun 		memcpy(&vsk->remote_addr, remote_addr,
1295*4882a593Smuzhiyun 		       sizeof(vsk->remote_addr));
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 		err = vsock_assign_transport(vsk, NULL);
1298*4882a593Smuzhiyun 		if (err)
1299*4882a593Smuzhiyun 			goto out;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 		transport = vsk->transport;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 		/* The hypervisor and well-known contexts do not have socket
1304*4882a593Smuzhiyun 		 * endpoints.
1305*4882a593Smuzhiyun 		 */
1306*4882a593Smuzhiyun 		if (!transport ||
1307*4882a593Smuzhiyun 		    !transport->stream_allow(remote_addr->svm_cid,
1308*4882a593Smuzhiyun 					     remote_addr->svm_port)) {
1309*4882a593Smuzhiyun 			err = -ENETUNREACH;
1310*4882a593Smuzhiyun 			goto out;
1311*4882a593Smuzhiyun 		}
1312*4882a593Smuzhiyun 
1313*4882a593Smuzhiyun 		err = vsock_auto_bind(vsk);
1314*4882a593Smuzhiyun 		if (err)
1315*4882a593Smuzhiyun 			goto out;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 		sk->sk_state = TCP_SYN_SENT;
1318*4882a593Smuzhiyun 
1319*4882a593Smuzhiyun 		err = transport->connect(vsk);
1320*4882a593Smuzhiyun 		if (err < 0)
1321*4882a593Smuzhiyun 			goto out;
1322*4882a593Smuzhiyun 
1323*4882a593Smuzhiyun 		/* Mark sock as connecting and set the error code to in
1324*4882a593Smuzhiyun 		 * progress in case this is a non-blocking connect.
1325*4882a593Smuzhiyun 		 */
1326*4882a593Smuzhiyun 		sock->state = SS_CONNECTING;
1327*4882a593Smuzhiyun 		err = -EINPROGRESS;
1328*4882a593Smuzhiyun 	}
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	/* The receive path will handle all communication until we are able to
1331*4882a593Smuzhiyun 	 * enter the connected state.  Here we wait for the connection to be
1332*4882a593Smuzhiyun 	 * completed or a notification of an error.
1333*4882a593Smuzhiyun 	 */
1334*4882a593Smuzhiyun 	timeout = vsk->connect_timeout;
1335*4882a593Smuzhiyun 	prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	while (sk->sk_state != TCP_ESTABLISHED && sk->sk_err == 0) {
1338*4882a593Smuzhiyun 		if (flags & O_NONBLOCK) {
1339*4882a593Smuzhiyun 			/* If we're not going to block, we schedule a timeout
1340*4882a593Smuzhiyun 			 * function to generate a timeout on the connection
1341*4882a593Smuzhiyun 			 * attempt, in case the peer doesn't respond in a
1342*4882a593Smuzhiyun 			 * timely manner. We hold on to the socket until the
1343*4882a593Smuzhiyun 			 * timeout fires.
1344*4882a593Smuzhiyun 			 */
1345*4882a593Smuzhiyun 			sock_hold(sk);
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 			/* If the timeout function is already scheduled,
1348*4882a593Smuzhiyun 			 * reschedule it, then ungrab the socket refcount to
1349*4882a593Smuzhiyun 			 * keep it balanced.
1350*4882a593Smuzhiyun 			 */
1351*4882a593Smuzhiyun 			if (mod_delayed_work(system_wq, &vsk->connect_work,
1352*4882a593Smuzhiyun 					     timeout))
1353*4882a593Smuzhiyun 				sock_put(sk);
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 			/* Skip ahead to preserve error code set above. */
1356*4882a593Smuzhiyun 			goto out_wait;
1357*4882a593Smuzhiyun 		}
1358*4882a593Smuzhiyun 
1359*4882a593Smuzhiyun 		release_sock(sk);
1360*4882a593Smuzhiyun 		timeout = schedule_timeout(timeout);
1361*4882a593Smuzhiyun 		lock_sock(sk);
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 		if (signal_pending(current)) {
1364*4882a593Smuzhiyun 			err = sock_intr_errno(timeout);
1365*4882a593Smuzhiyun 			sk->sk_state = sk->sk_state == TCP_ESTABLISHED ? TCP_CLOSING : TCP_CLOSE;
1366*4882a593Smuzhiyun 			sock->state = SS_UNCONNECTED;
1367*4882a593Smuzhiyun 			vsock_transport_cancel_pkt(vsk);
1368*4882a593Smuzhiyun 			vsock_remove_connected(vsk);
1369*4882a593Smuzhiyun 			goto out_wait;
1370*4882a593Smuzhiyun 		} else if (timeout == 0) {
1371*4882a593Smuzhiyun 			err = -ETIMEDOUT;
1372*4882a593Smuzhiyun 			sk->sk_state = TCP_CLOSE;
1373*4882a593Smuzhiyun 			sock->state = SS_UNCONNECTED;
1374*4882a593Smuzhiyun 			vsock_transport_cancel_pkt(vsk);
1375*4882a593Smuzhiyun 			goto out_wait;
1376*4882a593Smuzhiyun 		}
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1379*4882a593Smuzhiyun 	}
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	if (sk->sk_err) {
1382*4882a593Smuzhiyun 		err = -sk->sk_err;
1383*4882a593Smuzhiyun 		sk->sk_state = TCP_CLOSE;
1384*4882a593Smuzhiyun 		sock->state = SS_UNCONNECTED;
1385*4882a593Smuzhiyun 	} else {
1386*4882a593Smuzhiyun 		err = 0;
1387*4882a593Smuzhiyun 	}
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun out_wait:
1390*4882a593Smuzhiyun 	finish_wait(sk_sleep(sk), &wait);
1391*4882a593Smuzhiyun out:
1392*4882a593Smuzhiyun 	release_sock(sk);
1393*4882a593Smuzhiyun 	return err;
1394*4882a593Smuzhiyun }
1395*4882a593Smuzhiyun 
vsock_accept(struct socket * sock,struct socket * newsock,int flags,bool kern)1396*4882a593Smuzhiyun static int vsock_accept(struct socket *sock, struct socket *newsock, int flags,
1397*4882a593Smuzhiyun 			bool kern)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	struct sock *listener;
1400*4882a593Smuzhiyun 	int err;
1401*4882a593Smuzhiyun 	struct sock *connected;
1402*4882a593Smuzhiyun 	struct vsock_sock *vconnected;
1403*4882a593Smuzhiyun 	long timeout;
1404*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	err = 0;
1407*4882a593Smuzhiyun 	listener = sock->sk;
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	lock_sock(listener);
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	if (sock->type != SOCK_STREAM) {
1412*4882a593Smuzhiyun 		err = -EOPNOTSUPP;
1413*4882a593Smuzhiyun 		goto out;
1414*4882a593Smuzhiyun 	}
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	if (listener->sk_state != TCP_LISTEN) {
1417*4882a593Smuzhiyun 		err = -EINVAL;
1418*4882a593Smuzhiyun 		goto out;
1419*4882a593Smuzhiyun 	}
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	/* Wait for children sockets to appear; these are the new sockets
1422*4882a593Smuzhiyun 	 * created upon connection establishment.
1423*4882a593Smuzhiyun 	 */
1424*4882a593Smuzhiyun 	timeout = sock_rcvtimeo(listener, flags & O_NONBLOCK);
1425*4882a593Smuzhiyun 	prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1426*4882a593Smuzhiyun 
1427*4882a593Smuzhiyun 	while ((connected = vsock_dequeue_accept(listener)) == NULL &&
1428*4882a593Smuzhiyun 	       listener->sk_err == 0) {
1429*4882a593Smuzhiyun 		release_sock(listener);
1430*4882a593Smuzhiyun 		timeout = schedule_timeout(timeout);
1431*4882a593Smuzhiyun 		finish_wait(sk_sleep(listener), &wait);
1432*4882a593Smuzhiyun 		lock_sock(listener);
1433*4882a593Smuzhiyun 
1434*4882a593Smuzhiyun 		if (signal_pending(current)) {
1435*4882a593Smuzhiyun 			err = sock_intr_errno(timeout);
1436*4882a593Smuzhiyun 			goto out;
1437*4882a593Smuzhiyun 		} else if (timeout == 0) {
1438*4882a593Smuzhiyun 			err = -EAGAIN;
1439*4882a593Smuzhiyun 			goto out;
1440*4882a593Smuzhiyun 		}
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 		prepare_to_wait(sk_sleep(listener), &wait, TASK_INTERRUPTIBLE);
1443*4882a593Smuzhiyun 	}
1444*4882a593Smuzhiyun 	finish_wait(sk_sleep(listener), &wait);
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 	if (listener->sk_err)
1447*4882a593Smuzhiyun 		err = -listener->sk_err;
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	if (connected) {
1450*4882a593Smuzhiyun 		sk_acceptq_removed(listener);
1451*4882a593Smuzhiyun 
1452*4882a593Smuzhiyun 		lock_sock_nested(connected, SINGLE_DEPTH_NESTING);
1453*4882a593Smuzhiyun 		vconnected = vsock_sk(connected);
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 		/* If the listener socket has received an error, then we should
1456*4882a593Smuzhiyun 		 * reject this socket and return.  Note that we simply mark the
1457*4882a593Smuzhiyun 		 * socket rejected, drop our reference, and let the cleanup
1458*4882a593Smuzhiyun 		 * function handle the cleanup; the fact that we found it in
1459*4882a593Smuzhiyun 		 * the listener's accept queue guarantees that the cleanup
1460*4882a593Smuzhiyun 		 * function hasn't run yet.
1461*4882a593Smuzhiyun 		 */
1462*4882a593Smuzhiyun 		if (err) {
1463*4882a593Smuzhiyun 			vconnected->rejected = true;
1464*4882a593Smuzhiyun 		} else {
1465*4882a593Smuzhiyun 			newsock->state = SS_CONNECTED;
1466*4882a593Smuzhiyun 			sock_graft(connected, newsock);
1467*4882a593Smuzhiyun 		}
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 		release_sock(connected);
1470*4882a593Smuzhiyun 		sock_put(connected);
1471*4882a593Smuzhiyun 	}
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun out:
1474*4882a593Smuzhiyun 	release_sock(listener);
1475*4882a593Smuzhiyun 	return err;
1476*4882a593Smuzhiyun }
1477*4882a593Smuzhiyun 
vsock_listen(struct socket * sock,int backlog)1478*4882a593Smuzhiyun static int vsock_listen(struct socket *sock, int backlog)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun 	int err;
1481*4882a593Smuzhiyun 	struct sock *sk;
1482*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1483*4882a593Smuzhiyun 
1484*4882a593Smuzhiyun 	sk = sock->sk;
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	lock_sock(sk);
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	if (sock->type != SOCK_STREAM) {
1489*4882a593Smuzhiyun 		err = -EOPNOTSUPP;
1490*4882a593Smuzhiyun 		goto out;
1491*4882a593Smuzhiyun 	}
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 	if (sock->state != SS_UNCONNECTED) {
1494*4882a593Smuzhiyun 		err = -EINVAL;
1495*4882a593Smuzhiyun 		goto out;
1496*4882a593Smuzhiyun 	}
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	if (!vsock_addr_bound(&vsk->local_addr)) {
1501*4882a593Smuzhiyun 		err = -EINVAL;
1502*4882a593Smuzhiyun 		goto out;
1503*4882a593Smuzhiyun 	}
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	sk->sk_max_ack_backlog = backlog;
1506*4882a593Smuzhiyun 	sk->sk_state = TCP_LISTEN;
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	err = 0;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun out:
1511*4882a593Smuzhiyun 	release_sock(sk);
1512*4882a593Smuzhiyun 	return err;
1513*4882a593Smuzhiyun }
1514*4882a593Smuzhiyun 
vsock_update_buffer_size(struct vsock_sock * vsk,const struct vsock_transport * transport,u64 val)1515*4882a593Smuzhiyun static void vsock_update_buffer_size(struct vsock_sock *vsk,
1516*4882a593Smuzhiyun 				     const struct vsock_transport *transport,
1517*4882a593Smuzhiyun 				     u64 val)
1518*4882a593Smuzhiyun {
1519*4882a593Smuzhiyun 	if (val > vsk->buffer_max_size)
1520*4882a593Smuzhiyun 		val = vsk->buffer_max_size;
1521*4882a593Smuzhiyun 
1522*4882a593Smuzhiyun 	if (val < vsk->buffer_min_size)
1523*4882a593Smuzhiyun 		val = vsk->buffer_min_size;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	if (val != vsk->buffer_size &&
1526*4882a593Smuzhiyun 	    transport && transport->notify_buffer_size)
1527*4882a593Smuzhiyun 		transport->notify_buffer_size(vsk, &val);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	vsk->buffer_size = val;
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun 
vsock_stream_setsockopt(struct socket * sock,int level,int optname,sockptr_t optval,unsigned int optlen)1532*4882a593Smuzhiyun static int vsock_stream_setsockopt(struct socket *sock,
1533*4882a593Smuzhiyun 				   int level,
1534*4882a593Smuzhiyun 				   int optname,
1535*4882a593Smuzhiyun 				   sockptr_t optval,
1536*4882a593Smuzhiyun 				   unsigned int optlen)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun 	int err;
1539*4882a593Smuzhiyun 	struct sock *sk;
1540*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1541*4882a593Smuzhiyun 	const struct vsock_transport *transport;
1542*4882a593Smuzhiyun 	u64 val;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	if (level != AF_VSOCK)
1545*4882a593Smuzhiyun 		return -ENOPROTOOPT;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun #define COPY_IN(_v)                                       \
1548*4882a593Smuzhiyun 	do {						  \
1549*4882a593Smuzhiyun 		if (optlen < sizeof(_v)) {		  \
1550*4882a593Smuzhiyun 			err = -EINVAL;			  \
1551*4882a593Smuzhiyun 			goto exit;			  \
1552*4882a593Smuzhiyun 		}					  \
1553*4882a593Smuzhiyun 		if (copy_from_sockptr(&_v, optval, sizeof(_v)) != 0) {	\
1554*4882a593Smuzhiyun 			err = -EFAULT;					\
1555*4882a593Smuzhiyun 			goto exit;					\
1556*4882a593Smuzhiyun 		}							\
1557*4882a593Smuzhiyun 	} while (0)
1558*4882a593Smuzhiyun 
1559*4882a593Smuzhiyun 	err = 0;
1560*4882a593Smuzhiyun 	sk = sock->sk;
1561*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1562*4882a593Smuzhiyun 
1563*4882a593Smuzhiyun 	lock_sock(sk);
1564*4882a593Smuzhiyun 
1565*4882a593Smuzhiyun 	transport = vsk->transport;
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	switch (optname) {
1568*4882a593Smuzhiyun 	case SO_VM_SOCKETS_BUFFER_SIZE:
1569*4882a593Smuzhiyun 		COPY_IN(val);
1570*4882a593Smuzhiyun 		vsock_update_buffer_size(vsk, transport, val);
1571*4882a593Smuzhiyun 		break;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1574*4882a593Smuzhiyun 		COPY_IN(val);
1575*4882a593Smuzhiyun 		vsk->buffer_max_size = val;
1576*4882a593Smuzhiyun 		vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1577*4882a593Smuzhiyun 		break;
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1580*4882a593Smuzhiyun 		COPY_IN(val);
1581*4882a593Smuzhiyun 		vsk->buffer_min_size = val;
1582*4882a593Smuzhiyun 		vsock_update_buffer_size(vsk, transport, vsk->buffer_size);
1583*4882a593Smuzhiyun 		break;
1584*4882a593Smuzhiyun 
1585*4882a593Smuzhiyun 	case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1586*4882a593Smuzhiyun 		struct __kernel_old_timeval tv;
1587*4882a593Smuzhiyun 		COPY_IN(tv);
1588*4882a593Smuzhiyun 		if (tv.tv_sec >= 0 && tv.tv_usec < USEC_PER_SEC &&
1589*4882a593Smuzhiyun 		    tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / HZ - 1)) {
1590*4882a593Smuzhiyun 			vsk->connect_timeout = tv.tv_sec * HZ +
1591*4882a593Smuzhiyun 			    DIV_ROUND_UP(tv.tv_usec, (1000000 / HZ));
1592*4882a593Smuzhiyun 			if (vsk->connect_timeout == 0)
1593*4882a593Smuzhiyun 				vsk->connect_timeout =
1594*4882a593Smuzhiyun 				    VSOCK_DEFAULT_CONNECT_TIMEOUT;
1595*4882a593Smuzhiyun 
1596*4882a593Smuzhiyun 		} else {
1597*4882a593Smuzhiyun 			err = -ERANGE;
1598*4882a593Smuzhiyun 		}
1599*4882a593Smuzhiyun 		break;
1600*4882a593Smuzhiyun 	}
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	default:
1603*4882a593Smuzhiyun 		err = -ENOPROTOOPT;
1604*4882a593Smuzhiyun 		break;
1605*4882a593Smuzhiyun 	}
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun #undef COPY_IN
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun exit:
1610*4882a593Smuzhiyun 	release_sock(sk);
1611*4882a593Smuzhiyun 	return err;
1612*4882a593Smuzhiyun }
1613*4882a593Smuzhiyun 
vsock_stream_getsockopt(struct socket * sock,int level,int optname,char __user * optval,int __user * optlen)1614*4882a593Smuzhiyun static int vsock_stream_getsockopt(struct socket *sock,
1615*4882a593Smuzhiyun 				   int level, int optname,
1616*4882a593Smuzhiyun 				   char __user *optval,
1617*4882a593Smuzhiyun 				   int __user *optlen)
1618*4882a593Smuzhiyun {
1619*4882a593Smuzhiyun 	int err;
1620*4882a593Smuzhiyun 	int len;
1621*4882a593Smuzhiyun 	struct sock *sk;
1622*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1623*4882a593Smuzhiyun 	u64 val;
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	if (level != AF_VSOCK)
1626*4882a593Smuzhiyun 		return -ENOPROTOOPT;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	err = get_user(len, optlen);
1629*4882a593Smuzhiyun 	if (err != 0)
1630*4882a593Smuzhiyun 		return err;
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun #define COPY_OUT(_v)                            \
1633*4882a593Smuzhiyun 	do {					\
1634*4882a593Smuzhiyun 		if (len < sizeof(_v))		\
1635*4882a593Smuzhiyun 			return -EINVAL;		\
1636*4882a593Smuzhiyun 						\
1637*4882a593Smuzhiyun 		len = sizeof(_v);		\
1638*4882a593Smuzhiyun 		if (copy_to_user(optval, &_v, len) != 0)	\
1639*4882a593Smuzhiyun 			return -EFAULT;				\
1640*4882a593Smuzhiyun 								\
1641*4882a593Smuzhiyun 	} while (0)
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	err = 0;
1644*4882a593Smuzhiyun 	sk = sock->sk;
1645*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	switch (optname) {
1648*4882a593Smuzhiyun 	case SO_VM_SOCKETS_BUFFER_SIZE:
1649*4882a593Smuzhiyun 		val = vsk->buffer_size;
1650*4882a593Smuzhiyun 		COPY_OUT(val);
1651*4882a593Smuzhiyun 		break;
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	case SO_VM_SOCKETS_BUFFER_MAX_SIZE:
1654*4882a593Smuzhiyun 		val = vsk->buffer_max_size;
1655*4882a593Smuzhiyun 		COPY_OUT(val);
1656*4882a593Smuzhiyun 		break;
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	case SO_VM_SOCKETS_BUFFER_MIN_SIZE:
1659*4882a593Smuzhiyun 		val = vsk->buffer_min_size;
1660*4882a593Smuzhiyun 		COPY_OUT(val);
1661*4882a593Smuzhiyun 		break;
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	case SO_VM_SOCKETS_CONNECT_TIMEOUT: {
1664*4882a593Smuzhiyun 		struct __kernel_old_timeval tv;
1665*4882a593Smuzhiyun 		tv.tv_sec = vsk->connect_timeout / HZ;
1666*4882a593Smuzhiyun 		tv.tv_usec =
1667*4882a593Smuzhiyun 		    (vsk->connect_timeout -
1668*4882a593Smuzhiyun 		     tv.tv_sec * HZ) * (1000000 / HZ);
1669*4882a593Smuzhiyun 		COPY_OUT(tv);
1670*4882a593Smuzhiyun 		break;
1671*4882a593Smuzhiyun 	}
1672*4882a593Smuzhiyun 	default:
1673*4882a593Smuzhiyun 		return -ENOPROTOOPT;
1674*4882a593Smuzhiyun 	}
1675*4882a593Smuzhiyun 
1676*4882a593Smuzhiyun 	err = put_user(len, optlen);
1677*4882a593Smuzhiyun 	if (err != 0)
1678*4882a593Smuzhiyun 		return -EFAULT;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun #undef COPY_OUT
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	return 0;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun 
vsock_stream_sendmsg(struct socket * sock,struct msghdr * msg,size_t len)1685*4882a593Smuzhiyun static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
1686*4882a593Smuzhiyun 				size_t len)
1687*4882a593Smuzhiyun {
1688*4882a593Smuzhiyun 	struct sock *sk;
1689*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1690*4882a593Smuzhiyun 	const struct vsock_transport *transport;
1691*4882a593Smuzhiyun 	ssize_t total_written;
1692*4882a593Smuzhiyun 	long timeout;
1693*4882a593Smuzhiyun 	int err;
1694*4882a593Smuzhiyun 	struct vsock_transport_send_notify_data send_data;
1695*4882a593Smuzhiyun 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
1696*4882a593Smuzhiyun 
1697*4882a593Smuzhiyun 	sk = sock->sk;
1698*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1699*4882a593Smuzhiyun 	total_written = 0;
1700*4882a593Smuzhiyun 	err = 0;
1701*4882a593Smuzhiyun 
1702*4882a593Smuzhiyun 	if (msg->msg_flags & MSG_OOB)
1703*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1704*4882a593Smuzhiyun 
1705*4882a593Smuzhiyun 	lock_sock(sk);
1706*4882a593Smuzhiyun 
1707*4882a593Smuzhiyun 	transport = vsk->transport;
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	/* Callers should not provide a destination with stream sockets. */
1710*4882a593Smuzhiyun 	if (msg->msg_namelen) {
1711*4882a593Smuzhiyun 		err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EOPNOTSUPP;
1712*4882a593Smuzhiyun 		goto out;
1713*4882a593Smuzhiyun 	}
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	/* Send data only if both sides are not shutdown in the direction. */
1716*4882a593Smuzhiyun 	if (sk->sk_shutdown & SEND_SHUTDOWN ||
1717*4882a593Smuzhiyun 	    vsk->peer_shutdown & RCV_SHUTDOWN) {
1718*4882a593Smuzhiyun 		err = -EPIPE;
1719*4882a593Smuzhiyun 		goto out;
1720*4882a593Smuzhiyun 	}
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	if (!transport || sk->sk_state != TCP_ESTABLISHED ||
1723*4882a593Smuzhiyun 	    !vsock_addr_bound(&vsk->local_addr)) {
1724*4882a593Smuzhiyun 		err = -ENOTCONN;
1725*4882a593Smuzhiyun 		goto out;
1726*4882a593Smuzhiyun 	}
1727*4882a593Smuzhiyun 
1728*4882a593Smuzhiyun 	if (!vsock_addr_bound(&vsk->remote_addr)) {
1729*4882a593Smuzhiyun 		err = -EDESTADDRREQ;
1730*4882a593Smuzhiyun 		goto out;
1731*4882a593Smuzhiyun 	}
1732*4882a593Smuzhiyun 
1733*4882a593Smuzhiyun 	/* Wait for room in the produce queue to enqueue our user's data. */
1734*4882a593Smuzhiyun 	timeout = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	err = transport->notify_send_init(vsk, &send_data);
1737*4882a593Smuzhiyun 	if (err < 0)
1738*4882a593Smuzhiyun 		goto out;
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	while (total_written < len) {
1741*4882a593Smuzhiyun 		ssize_t written;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 		add_wait_queue(sk_sleep(sk), &wait);
1744*4882a593Smuzhiyun 		while (vsock_stream_has_space(vsk) == 0 &&
1745*4882a593Smuzhiyun 		       sk->sk_err == 0 &&
1746*4882a593Smuzhiyun 		       !(sk->sk_shutdown & SEND_SHUTDOWN) &&
1747*4882a593Smuzhiyun 		       !(vsk->peer_shutdown & RCV_SHUTDOWN)) {
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 			/* Don't wait for non-blocking sockets. */
1750*4882a593Smuzhiyun 			if (timeout == 0) {
1751*4882a593Smuzhiyun 				err = -EAGAIN;
1752*4882a593Smuzhiyun 				remove_wait_queue(sk_sleep(sk), &wait);
1753*4882a593Smuzhiyun 				goto out_err;
1754*4882a593Smuzhiyun 			}
1755*4882a593Smuzhiyun 
1756*4882a593Smuzhiyun 			err = transport->notify_send_pre_block(vsk, &send_data);
1757*4882a593Smuzhiyun 			if (err < 0) {
1758*4882a593Smuzhiyun 				remove_wait_queue(sk_sleep(sk), &wait);
1759*4882a593Smuzhiyun 				goto out_err;
1760*4882a593Smuzhiyun 			}
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 			release_sock(sk);
1763*4882a593Smuzhiyun 			timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
1764*4882a593Smuzhiyun 			lock_sock(sk);
1765*4882a593Smuzhiyun 			if (signal_pending(current)) {
1766*4882a593Smuzhiyun 				err = sock_intr_errno(timeout);
1767*4882a593Smuzhiyun 				remove_wait_queue(sk_sleep(sk), &wait);
1768*4882a593Smuzhiyun 				goto out_err;
1769*4882a593Smuzhiyun 			} else if (timeout == 0) {
1770*4882a593Smuzhiyun 				err = -EAGAIN;
1771*4882a593Smuzhiyun 				remove_wait_queue(sk_sleep(sk), &wait);
1772*4882a593Smuzhiyun 				goto out_err;
1773*4882a593Smuzhiyun 			}
1774*4882a593Smuzhiyun 		}
1775*4882a593Smuzhiyun 		remove_wait_queue(sk_sleep(sk), &wait);
1776*4882a593Smuzhiyun 
1777*4882a593Smuzhiyun 		/* These checks occur both as part of and after the loop
1778*4882a593Smuzhiyun 		 * conditional since we need to check before and after
1779*4882a593Smuzhiyun 		 * sleeping.
1780*4882a593Smuzhiyun 		 */
1781*4882a593Smuzhiyun 		if (sk->sk_err) {
1782*4882a593Smuzhiyun 			err = -sk->sk_err;
1783*4882a593Smuzhiyun 			goto out_err;
1784*4882a593Smuzhiyun 		} else if ((sk->sk_shutdown & SEND_SHUTDOWN) ||
1785*4882a593Smuzhiyun 			   (vsk->peer_shutdown & RCV_SHUTDOWN)) {
1786*4882a593Smuzhiyun 			err = -EPIPE;
1787*4882a593Smuzhiyun 			goto out_err;
1788*4882a593Smuzhiyun 		}
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun 		err = transport->notify_send_pre_enqueue(vsk, &send_data);
1791*4882a593Smuzhiyun 		if (err < 0)
1792*4882a593Smuzhiyun 			goto out_err;
1793*4882a593Smuzhiyun 
1794*4882a593Smuzhiyun 		/* Note that enqueue will only write as many bytes as are free
1795*4882a593Smuzhiyun 		 * in the produce queue, so we don't need to ensure len is
1796*4882a593Smuzhiyun 		 * smaller than the queue size.  It is the caller's
1797*4882a593Smuzhiyun 		 * responsibility to check how many bytes we were able to send.
1798*4882a593Smuzhiyun 		 */
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 		written = transport->stream_enqueue(
1801*4882a593Smuzhiyun 				vsk, msg,
1802*4882a593Smuzhiyun 				len - total_written);
1803*4882a593Smuzhiyun 		if (written < 0) {
1804*4882a593Smuzhiyun 			err = -ENOMEM;
1805*4882a593Smuzhiyun 			goto out_err;
1806*4882a593Smuzhiyun 		}
1807*4882a593Smuzhiyun 
1808*4882a593Smuzhiyun 		total_written += written;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 		err = transport->notify_send_post_enqueue(
1811*4882a593Smuzhiyun 				vsk, written, &send_data);
1812*4882a593Smuzhiyun 		if (err < 0)
1813*4882a593Smuzhiyun 			goto out_err;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	}
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun out_err:
1818*4882a593Smuzhiyun 	if (total_written > 0)
1819*4882a593Smuzhiyun 		err = total_written;
1820*4882a593Smuzhiyun out:
1821*4882a593Smuzhiyun 	release_sock(sk);
1822*4882a593Smuzhiyun 	return err;
1823*4882a593Smuzhiyun }
1824*4882a593Smuzhiyun 
1825*4882a593Smuzhiyun 
1826*4882a593Smuzhiyun static int
vsock_stream_recvmsg(struct socket * sock,struct msghdr * msg,size_t len,int flags)1827*4882a593Smuzhiyun vsock_stream_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
1828*4882a593Smuzhiyun 		     int flags)
1829*4882a593Smuzhiyun {
1830*4882a593Smuzhiyun 	struct sock *sk;
1831*4882a593Smuzhiyun 	struct vsock_sock *vsk;
1832*4882a593Smuzhiyun 	const struct vsock_transport *transport;
1833*4882a593Smuzhiyun 	int err;
1834*4882a593Smuzhiyun 	size_t target;
1835*4882a593Smuzhiyun 	ssize_t copied;
1836*4882a593Smuzhiyun 	long timeout;
1837*4882a593Smuzhiyun 	struct vsock_transport_recv_notify_data recv_data;
1838*4882a593Smuzhiyun 
1839*4882a593Smuzhiyun 	DEFINE_WAIT(wait);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	sk = sock->sk;
1842*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
1843*4882a593Smuzhiyun 	err = 0;
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun 	lock_sock(sk);
1846*4882a593Smuzhiyun 
1847*4882a593Smuzhiyun 	transport = vsk->transport;
1848*4882a593Smuzhiyun 
1849*4882a593Smuzhiyun 	if (!transport || sk->sk_state != TCP_ESTABLISHED) {
1850*4882a593Smuzhiyun 		/* Recvmsg is supposed to return 0 if a peer performs an
1851*4882a593Smuzhiyun 		 * orderly shutdown. Differentiate between that case and when a
1852*4882a593Smuzhiyun 		 * peer has not connected or a local shutdown occured with the
1853*4882a593Smuzhiyun 		 * SOCK_DONE flag.
1854*4882a593Smuzhiyun 		 */
1855*4882a593Smuzhiyun 		if (sock_flag(sk, SOCK_DONE))
1856*4882a593Smuzhiyun 			err = 0;
1857*4882a593Smuzhiyun 		else
1858*4882a593Smuzhiyun 			err = -ENOTCONN;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 		goto out;
1861*4882a593Smuzhiyun 	}
1862*4882a593Smuzhiyun 
1863*4882a593Smuzhiyun 	if (flags & MSG_OOB) {
1864*4882a593Smuzhiyun 		err = -EOPNOTSUPP;
1865*4882a593Smuzhiyun 		goto out;
1866*4882a593Smuzhiyun 	}
1867*4882a593Smuzhiyun 
1868*4882a593Smuzhiyun 	/* We don't check peer_shutdown flag here since peer may actually shut
1869*4882a593Smuzhiyun 	 * down, but there can be data in the queue that a local socket can
1870*4882a593Smuzhiyun 	 * receive.
1871*4882a593Smuzhiyun 	 */
1872*4882a593Smuzhiyun 	if (sk->sk_shutdown & RCV_SHUTDOWN) {
1873*4882a593Smuzhiyun 		err = 0;
1874*4882a593Smuzhiyun 		goto out;
1875*4882a593Smuzhiyun 	}
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 	/* It is valid on Linux to pass in a zero-length receive buffer.  This
1878*4882a593Smuzhiyun 	 * is not an error.  We may as well bail out now.
1879*4882a593Smuzhiyun 	 */
1880*4882a593Smuzhiyun 	if (!len) {
1881*4882a593Smuzhiyun 		err = 0;
1882*4882a593Smuzhiyun 		goto out;
1883*4882a593Smuzhiyun 	}
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 	/* We must not copy less than target bytes into the user's buffer
1886*4882a593Smuzhiyun 	 * before returning successfully, so we wait for the consume queue to
1887*4882a593Smuzhiyun 	 * have that much data to consume before dequeueing.  Note that this
1888*4882a593Smuzhiyun 	 * makes it impossible to handle cases where target is greater than the
1889*4882a593Smuzhiyun 	 * queue size.
1890*4882a593Smuzhiyun 	 */
1891*4882a593Smuzhiyun 	target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1892*4882a593Smuzhiyun 	if (target >= transport->stream_rcvhiwat(vsk)) {
1893*4882a593Smuzhiyun 		err = -ENOMEM;
1894*4882a593Smuzhiyun 		goto out;
1895*4882a593Smuzhiyun 	}
1896*4882a593Smuzhiyun 	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
1897*4882a593Smuzhiyun 	copied = 0;
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 	err = transport->notify_recv_init(vsk, target, &recv_data);
1900*4882a593Smuzhiyun 	if (err < 0)
1901*4882a593Smuzhiyun 		goto out;
1902*4882a593Smuzhiyun 
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	while (1) {
1905*4882a593Smuzhiyun 		s64 ready;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 		prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1908*4882a593Smuzhiyun 		ready = vsock_stream_has_data(vsk);
1909*4882a593Smuzhiyun 
1910*4882a593Smuzhiyun 		if (ready == 0) {
1911*4882a593Smuzhiyun 			if (sk->sk_err != 0 ||
1912*4882a593Smuzhiyun 			    (sk->sk_shutdown & RCV_SHUTDOWN) ||
1913*4882a593Smuzhiyun 			    (vsk->peer_shutdown & SEND_SHUTDOWN)) {
1914*4882a593Smuzhiyun 				finish_wait(sk_sleep(sk), &wait);
1915*4882a593Smuzhiyun 				break;
1916*4882a593Smuzhiyun 			}
1917*4882a593Smuzhiyun 			/* Don't wait for non-blocking sockets. */
1918*4882a593Smuzhiyun 			if (timeout == 0) {
1919*4882a593Smuzhiyun 				err = -EAGAIN;
1920*4882a593Smuzhiyun 				finish_wait(sk_sleep(sk), &wait);
1921*4882a593Smuzhiyun 				break;
1922*4882a593Smuzhiyun 			}
1923*4882a593Smuzhiyun 
1924*4882a593Smuzhiyun 			err = transport->notify_recv_pre_block(
1925*4882a593Smuzhiyun 					vsk, target, &recv_data);
1926*4882a593Smuzhiyun 			if (err < 0) {
1927*4882a593Smuzhiyun 				finish_wait(sk_sleep(sk), &wait);
1928*4882a593Smuzhiyun 				break;
1929*4882a593Smuzhiyun 			}
1930*4882a593Smuzhiyun 			release_sock(sk);
1931*4882a593Smuzhiyun 			timeout = schedule_timeout(timeout);
1932*4882a593Smuzhiyun 			lock_sock(sk);
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 			if (signal_pending(current)) {
1935*4882a593Smuzhiyun 				err = sock_intr_errno(timeout);
1936*4882a593Smuzhiyun 				finish_wait(sk_sleep(sk), &wait);
1937*4882a593Smuzhiyun 				break;
1938*4882a593Smuzhiyun 			} else if (timeout == 0) {
1939*4882a593Smuzhiyun 				err = -EAGAIN;
1940*4882a593Smuzhiyun 				finish_wait(sk_sleep(sk), &wait);
1941*4882a593Smuzhiyun 				break;
1942*4882a593Smuzhiyun 			}
1943*4882a593Smuzhiyun 		} else {
1944*4882a593Smuzhiyun 			ssize_t read;
1945*4882a593Smuzhiyun 
1946*4882a593Smuzhiyun 			finish_wait(sk_sleep(sk), &wait);
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 			if (ready < 0) {
1949*4882a593Smuzhiyun 				/* Invalid queue pair content. XXX This should
1950*4882a593Smuzhiyun 				* be changed to a connection reset in a later
1951*4882a593Smuzhiyun 				* change.
1952*4882a593Smuzhiyun 				*/
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 				err = -ENOMEM;
1955*4882a593Smuzhiyun 				goto out;
1956*4882a593Smuzhiyun 			}
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 			err = transport->notify_recv_pre_dequeue(
1959*4882a593Smuzhiyun 					vsk, target, &recv_data);
1960*4882a593Smuzhiyun 			if (err < 0)
1961*4882a593Smuzhiyun 				break;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 			read = transport->stream_dequeue(
1964*4882a593Smuzhiyun 					vsk, msg,
1965*4882a593Smuzhiyun 					len - copied, flags);
1966*4882a593Smuzhiyun 			if (read < 0) {
1967*4882a593Smuzhiyun 				err = -ENOMEM;
1968*4882a593Smuzhiyun 				break;
1969*4882a593Smuzhiyun 			}
1970*4882a593Smuzhiyun 
1971*4882a593Smuzhiyun 			copied += read;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 			err = transport->notify_recv_post_dequeue(
1974*4882a593Smuzhiyun 					vsk, target, read,
1975*4882a593Smuzhiyun 					!(flags & MSG_PEEK), &recv_data);
1976*4882a593Smuzhiyun 			if (err < 0)
1977*4882a593Smuzhiyun 				goto out;
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 			if (read >= target || flags & MSG_PEEK)
1980*4882a593Smuzhiyun 				break;
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 			target -= read;
1983*4882a593Smuzhiyun 		}
1984*4882a593Smuzhiyun 	}
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	if (sk->sk_err)
1987*4882a593Smuzhiyun 		err = -sk->sk_err;
1988*4882a593Smuzhiyun 	else if (sk->sk_shutdown & RCV_SHUTDOWN)
1989*4882a593Smuzhiyun 		err = 0;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	if (copied > 0)
1992*4882a593Smuzhiyun 		err = copied;
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun out:
1995*4882a593Smuzhiyun 	release_sock(sk);
1996*4882a593Smuzhiyun 	return err;
1997*4882a593Smuzhiyun }
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun static const struct proto_ops vsock_stream_ops = {
2000*4882a593Smuzhiyun 	.family = PF_VSOCK,
2001*4882a593Smuzhiyun 	.owner = THIS_MODULE,
2002*4882a593Smuzhiyun 	.release = vsock_release,
2003*4882a593Smuzhiyun 	.bind = vsock_bind,
2004*4882a593Smuzhiyun 	.connect = vsock_stream_connect,
2005*4882a593Smuzhiyun 	.socketpair = sock_no_socketpair,
2006*4882a593Smuzhiyun 	.accept = vsock_accept,
2007*4882a593Smuzhiyun 	.getname = vsock_getname,
2008*4882a593Smuzhiyun 	.poll = vsock_poll,
2009*4882a593Smuzhiyun 	.ioctl = sock_no_ioctl,
2010*4882a593Smuzhiyun 	.listen = vsock_listen,
2011*4882a593Smuzhiyun 	.shutdown = vsock_shutdown,
2012*4882a593Smuzhiyun 	.setsockopt = vsock_stream_setsockopt,
2013*4882a593Smuzhiyun 	.getsockopt = vsock_stream_getsockopt,
2014*4882a593Smuzhiyun 	.sendmsg = vsock_stream_sendmsg,
2015*4882a593Smuzhiyun 	.recvmsg = vsock_stream_recvmsg,
2016*4882a593Smuzhiyun 	.mmap = sock_no_mmap,
2017*4882a593Smuzhiyun 	.sendpage = sock_no_sendpage,
2018*4882a593Smuzhiyun };
2019*4882a593Smuzhiyun 
vsock_create(struct net * net,struct socket * sock,int protocol,int kern)2020*4882a593Smuzhiyun static int vsock_create(struct net *net, struct socket *sock,
2021*4882a593Smuzhiyun 			int protocol, int kern)
2022*4882a593Smuzhiyun {
2023*4882a593Smuzhiyun 	struct vsock_sock *vsk;
2024*4882a593Smuzhiyun 	struct sock *sk;
2025*4882a593Smuzhiyun 	int ret;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	if (!sock)
2028*4882a593Smuzhiyun 		return -EINVAL;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	if (protocol && protocol != PF_VSOCK)
2031*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	switch (sock->type) {
2034*4882a593Smuzhiyun 	case SOCK_DGRAM:
2035*4882a593Smuzhiyun 		sock->ops = &vsock_dgram_ops;
2036*4882a593Smuzhiyun 		break;
2037*4882a593Smuzhiyun 	case SOCK_STREAM:
2038*4882a593Smuzhiyun 		sock->ops = &vsock_stream_ops;
2039*4882a593Smuzhiyun 		break;
2040*4882a593Smuzhiyun 	default:
2041*4882a593Smuzhiyun 		return -ESOCKTNOSUPPORT;
2042*4882a593Smuzhiyun 	}
2043*4882a593Smuzhiyun 
2044*4882a593Smuzhiyun 	sock->state = SS_UNCONNECTED;
2045*4882a593Smuzhiyun 
2046*4882a593Smuzhiyun 	sk = __vsock_create(net, sock, NULL, GFP_KERNEL, 0, kern);
2047*4882a593Smuzhiyun 	if (!sk)
2048*4882a593Smuzhiyun 		return -ENOMEM;
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	vsk = vsock_sk(sk);
2051*4882a593Smuzhiyun 
2052*4882a593Smuzhiyun 	if (sock->type == SOCK_DGRAM) {
2053*4882a593Smuzhiyun 		ret = vsock_assign_transport(vsk, NULL);
2054*4882a593Smuzhiyun 		if (ret < 0) {
2055*4882a593Smuzhiyun 			sock_put(sk);
2056*4882a593Smuzhiyun 			return ret;
2057*4882a593Smuzhiyun 		}
2058*4882a593Smuzhiyun 	}
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 	vsock_insert_unbound(vsk);
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	return 0;
2063*4882a593Smuzhiyun }
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun static const struct net_proto_family vsock_family_ops = {
2066*4882a593Smuzhiyun 	.family = AF_VSOCK,
2067*4882a593Smuzhiyun 	.create = vsock_create,
2068*4882a593Smuzhiyun 	.owner = THIS_MODULE,
2069*4882a593Smuzhiyun };
2070*4882a593Smuzhiyun 
vsock_dev_do_ioctl(struct file * filp,unsigned int cmd,void __user * ptr)2071*4882a593Smuzhiyun static long vsock_dev_do_ioctl(struct file *filp,
2072*4882a593Smuzhiyun 			       unsigned int cmd, void __user *ptr)
2073*4882a593Smuzhiyun {
2074*4882a593Smuzhiyun 	u32 __user *p = ptr;
2075*4882a593Smuzhiyun 	u32 cid = VMADDR_CID_ANY;
2076*4882a593Smuzhiyun 	int retval = 0;
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	switch (cmd) {
2079*4882a593Smuzhiyun 	case IOCTL_VM_SOCKETS_GET_LOCAL_CID:
2080*4882a593Smuzhiyun 		/* To be compatible with the VMCI behavior, we prioritize the
2081*4882a593Smuzhiyun 		 * guest CID instead of well-know host CID (VMADDR_CID_HOST).
2082*4882a593Smuzhiyun 		 */
2083*4882a593Smuzhiyun 		if (transport_g2h)
2084*4882a593Smuzhiyun 			cid = transport_g2h->get_local_cid();
2085*4882a593Smuzhiyun 		else if (transport_h2g)
2086*4882a593Smuzhiyun 			cid = transport_h2g->get_local_cid();
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 		if (put_user(cid, p) != 0)
2089*4882a593Smuzhiyun 			retval = -EFAULT;
2090*4882a593Smuzhiyun 		break;
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	default:
2093*4882a593Smuzhiyun 		pr_err("Unknown ioctl %d\n", cmd);
2094*4882a593Smuzhiyun 		retval = -EINVAL;
2095*4882a593Smuzhiyun 	}
2096*4882a593Smuzhiyun 
2097*4882a593Smuzhiyun 	return retval;
2098*4882a593Smuzhiyun }
2099*4882a593Smuzhiyun 
vsock_dev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2100*4882a593Smuzhiyun static long vsock_dev_ioctl(struct file *filp,
2101*4882a593Smuzhiyun 			    unsigned int cmd, unsigned long arg)
2102*4882a593Smuzhiyun {
2103*4882a593Smuzhiyun 	return vsock_dev_do_ioctl(filp, cmd, (void __user *)arg);
2104*4882a593Smuzhiyun }
2105*4882a593Smuzhiyun 
2106*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
vsock_dev_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2107*4882a593Smuzhiyun static long vsock_dev_compat_ioctl(struct file *filp,
2108*4882a593Smuzhiyun 				   unsigned int cmd, unsigned long arg)
2109*4882a593Smuzhiyun {
2110*4882a593Smuzhiyun 	return vsock_dev_do_ioctl(filp, cmd, compat_ptr(arg));
2111*4882a593Smuzhiyun }
2112*4882a593Smuzhiyun #endif
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun static const struct file_operations vsock_device_ops = {
2115*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
2116*4882a593Smuzhiyun 	.unlocked_ioctl	= vsock_dev_ioctl,
2117*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
2118*4882a593Smuzhiyun 	.compat_ioctl	= vsock_dev_compat_ioctl,
2119*4882a593Smuzhiyun #endif
2120*4882a593Smuzhiyun 	.open		= nonseekable_open,
2121*4882a593Smuzhiyun };
2122*4882a593Smuzhiyun 
2123*4882a593Smuzhiyun static struct miscdevice vsock_device = {
2124*4882a593Smuzhiyun 	.name		= "vsock",
2125*4882a593Smuzhiyun 	.fops		= &vsock_device_ops,
2126*4882a593Smuzhiyun };
2127*4882a593Smuzhiyun 
vsock_init(void)2128*4882a593Smuzhiyun static int __init vsock_init(void)
2129*4882a593Smuzhiyun {
2130*4882a593Smuzhiyun 	int err = 0;
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	vsock_init_tables();
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 	vsock_proto.owner = THIS_MODULE;
2135*4882a593Smuzhiyun 	vsock_device.minor = MISC_DYNAMIC_MINOR;
2136*4882a593Smuzhiyun 	err = misc_register(&vsock_device);
2137*4882a593Smuzhiyun 	if (err) {
2138*4882a593Smuzhiyun 		pr_err("Failed to register misc device\n");
2139*4882a593Smuzhiyun 		goto err_reset_transport;
2140*4882a593Smuzhiyun 	}
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun 	err = proto_register(&vsock_proto, 1);	/* we want our slab */
2143*4882a593Smuzhiyun 	if (err) {
2144*4882a593Smuzhiyun 		pr_err("Cannot register vsock protocol\n");
2145*4882a593Smuzhiyun 		goto err_deregister_misc;
2146*4882a593Smuzhiyun 	}
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	err = sock_register(&vsock_family_ops);
2149*4882a593Smuzhiyun 	if (err) {
2150*4882a593Smuzhiyun 		pr_err("could not register af_vsock (%d) address family: %d\n",
2151*4882a593Smuzhiyun 		       AF_VSOCK, err);
2152*4882a593Smuzhiyun 		goto err_unregister_proto;
2153*4882a593Smuzhiyun 	}
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	return 0;
2156*4882a593Smuzhiyun 
2157*4882a593Smuzhiyun err_unregister_proto:
2158*4882a593Smuzhiyun 	proto_unregister(&vsock_proto);
2159*4882a593Smuzhiyun err_deregister_misc:
2160*4882a593Smuzhiyun 	misc_deregister(&vsock_device);
2161*4882a593Smuzhiyun err_reset_transport:
2162*4882a593Smuzhiyun 	return err;
2163*4882a593Smuzhiyun }
2164*4882a593Smuzhiyun 
vsock_exit(void)2165*4882a593Smuzhiyun static void __exit vsock_exit(void)
2166*4882a593Smuzhiyun {
2167*4882a593Smuzhiyun 	misc_deregister(&vsock_device);
2168*4882a593Smuzhiyun 	sock_unregister(AF_VSOCK);
2169*4882a593Smuzhiyun 	proto_unregister(&vsock_proto);
2170*4882a593Smuzhiyun }
2171*4882a593Smuzhiyun 
vsock_core_get_transport(struct vsock_sock * vsk)2172*4882a593Smuzhiyun const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk)
2173*4882a593Smuzhiyun {
2174*4882a593Smuzhiyun 	return vsk->transport;
2175*4882a593Smuzhiyun }
2176*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_core_get_transport);
2177*4882a593Smuzhiyun 
vsock_core_register(const struct vsock_transport * t,int features)2178*4882a593Smuzhiyun int vsock_core_register(const struct vsock_transport *t, int features)
2179*4882a593Smuzhiyun {
2180*4882a593Smuzhiyun 	const struct vsock_transport *t_h2g, *t_g2h, *t_dgram, *t_local;
2181*4882a593Smuzhiyun 	int err = mutex_lock_interruptible(&vsock_register_mutex);
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	if (err)
2184*4882a593Smuzhiyun 		return err;
2185*4882a593Smuzhiyun 
2186*4882a593Smuzhiyun 	t_h2g = transport_h2g;
2187*4882a593Smuzhiyun 	t_g2h = transport_g2h;
2188*4882a593Smuzhiyun 	t_dgram = transport_dgram;
2189*4882a593Smuzhiyun 	t_local = transport_local;
2190*4882a593Smuzhiyun 
2191*4882a593Smuzhiyun 	if (features & VSOCK_TRANSPORT_F_H2G) {
2192*4882a593Smuzhiyun 		if (t_h2g) {
2193*4882a593Smuzhiyun 			err = -EBUSY;
2194*4882a593Smuzhiyun 			goto err_busy;
2195*4882a593Smuzhiyun 		}
2196*4882a593Smuzhiyun 		t_h2g = t;
2197*4882a593Smuzhiyun 	}
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 	if (features & VSOCK_TRANSPORT_F_G2H) {
2200*4882a593Smuzhiyun 		if (t_g2h) {
2201*4882a593Smuzhiyun 			err = -EBUSY;
2202*4882a593Smuzhiyun 			goto err_busy;
2203*4882a593Smuzhiyun 		}
2204*4882a593Smuzhiyun 		t_g2h = t;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	if (features & VSOCK_TRANSPORT_F_DGRAM) {
2208*4882a593Smuzhiyun 		if (t_dgram) {
2209*4882a593Smuzhiyun 			err = -EBUSY;
2210*4882a593Smuzhiyun 			goto err_busy;
2211*4882a593Smuzhiyun 		}
2212*4882a593Smuzhiyun 		t_dgram = t;
2213*4882a593Smuzhiyun 	}
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	if (features & VSOCK_TRANSPORT_F_LOCAL) {
2216*4882a593Smuzhiyun 		if (t_local) {
2217*4882a593Smuzhiyun 			err = -EBUSY;
2218*4882a593Smuzhiyun 			goto err_busy;
2219*4882a593Smuzhiyun 		}
2220*4882a593Smuzhiyun 		t_local = t;
2221*4882a593Smuzhiyun 	}
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	transport_h2g = t_h2g;
2224*4882a593Smuzhiyun 	transport_g2h = t_g2h;
2225*4882a593Smuzhiyun 	transport_dgram = t_dgram;
2226*4882a593Smuzhiyun 	transport_local = t_local;
2227*4882a593Smuzhiyun 
2228*4882a593Smuzhiyun err_busy:
2229*4882a593Smuzhiyun 	mutex_unlock(&vsock_register_mutex);
2230*4882a593Smuzhiyun 	return err;
2231*4882a593Smuzhiyun }
2232*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_core_register);
2233*4882a593Smuzhiyun 
vsock_core_unregister(const struct vsock_transport * t)2234*4882a593Smuzhiyun void vsock_core_unregister(const struct vsock_transport *t)
2235*4882a593Smuzhiyun {
2236*4882a593Smuzhiyun 	mutex_lock(&vsock_register_mutex);
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 	if (transport_h2g == t)
2239*4882a593Smuzhiyun 		transport_h2g = NULL;
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	if (transport_g2h == t)
2242*4882a593Smuzhiyun 		transport_g2h = NULL;
2243*4882a593Smuzhiyun 
2244*4882a593Smuzhiyun 	if (transport_dgram == t)
2245*4882a593Smuzhiyun 		transport_dgram = NULL;
2246*4882a593Smuzhiyun 
2247*4882a593Smuzhiyun 	if (transport_local == t)
2248*4882a593Smuzhiyun 		transport_local = NULL;
2249*4882a593Smuzhiyun 
2250*4882a593Smuzhiyun 	mutex_unlock(&vsock_register_mutex);
2251*4882a593Smuzhiyun }
2252*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vsock_core_unregister);
2253*4882a593Smuzhiyun 
2254*4882a593Smuzhiyun module_init(vsock_init);
2255*4882a593Smuzhiyun module_exit(vsock_exit);
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun MODULE_AUTHOR("VMware, Inc.");
2258*4882a593Smuzhiyun MODULE_DESCRIPTION("VMware Virtual Socket Family");
2259*4882a593Smuzhiyun MODULE_VERSION("1.0.2.0-k");
2260*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2261