1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * vsock sock_diag(7) module
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2017 Red Hat, Inc.
6*4882a593Smuzhiyun * Author: Stefan Hajnoczi <stefanha@redhat.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/sock_diag.h>
11*4882a593Smuzhiyun #include <linux/vm_sockets_diag.h>
12*4882a593Smuzhiyun #include <net/af_vsock.h>
13*4882a593Smuzhiyun
sk_diag_fill(struct sock * sk,struct sk_buff * skb,u32 portid,u32 seq,u32 flags)14*4882a593Smuzhiyun static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
15*4882a593Smuzhiyun u32 portid, u32 seq, u32 flags)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun struct vsock_sock *vsk = vsock_sk(sk);
18*4882a593Smuzhiyun struct vsock_diag_msg *rep;
19*4882a593Smuzhiyun struct nlmsghdr *nlh;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun nlh = nlmsg_put(skb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*rep),
22*4882a593Smuzhiyun flags);
23*4882a593Smuzhiyun if (!nlh)
24*4882a593Smuzhiyun return -EMSGSIZE;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun rep = nlmsg_data(nlh);
27*4882a593Smuzhiyun rep->vdiag_family = AF_VSOCK;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Lock order dictates that sk_lock is acquired before
30*4882a593Smuzhiyun * vsock_table_lock, so we cannot lock here. Simply don't take
31*4882a593Smuzhiyun * sk_lock; sk is guaranteed to stay alive since vsock_table_lock is
32*4882a593Smuzhiyun * held.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun rep->vdiag_type = sk->sk_type;
35*4882a593Smuzhiyun rep->vdiag_state = sk->sk_state;
36*4882a593Smuzhiyun rep->vdiag_shutdown = sk->sk_shutdown;
37*4882a593Smuzhiyun rep->vdiag_src_cid = vsk->local_addr.svm_cid;
38*4882a593Smuzhiyun rep->vdiag_src_port = vsk->local_addr.svm_port;
39*4882a593Smuzhiyun rep->vdiag_dst_cid = vsk->remote_addr.svm_cid;
40*4882a593Smuzhiyun rep->vdiag_dst_port = vsk->remote_addr.svm_port;
41*4882a593Smuzhiyun rep->vdiag_ino = sock_i_ino(sk);
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun sock_diag_save_cookie(sk, rep->vdiag_cookie);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun return 0;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
vsock_diag_dump(struct sk_buff * skb,struct netlink_callback * cb)48*4882a593Smuzhiyun static int vsock_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct vsock_diag_req *req;
51*4882a593Smuzhiyun struct vsock_sock *vsk;
52*4882a593Smuzhiyun unsigned int bucket;
53*4882a593Smuzhiyun unsigned int last_i;
54*4882a593Smuzhiyun unsigned int table;
55*4882a593Smuzhiyun struct net *net;
56*4882a593Smuzhiyun unsigned int i;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun req = nlmsg_data(cb->nlh);
59*4882a593Smuzhiyun net = sock_net(skb->sk);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* State saved between calls: */
62*4882a593Smuzhiyun table = cb->args[0];
63*4882a593Smuzhiyun bucket = cb->args[1];
64*4882a593Smuzhiyun i = last_i = cb->args[2];
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun /* TODO VMCI pending sockets? */
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun spin_lock_bh(&vsock_table_lock);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Bind table (locally created sockets) */
71*4882a593Smuzhiyun if (table == 0) {
72*4882a593Smuzhiyun while (bucket < ARRAY_SIZE(vsock_bind_table)) {
73*4882a593Smuzhiyun struct list_head *head = &vsock_bind_table[bucket];
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun i = 0;
76*4882a593Smuzhiyun list_for_each_entry(vsk, head, bound_table) {
77*4882a593Smuzhiyun struct sock *sk = sk_vsock(vsk);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (!net_eq(sock_net(sk), net))
80*4882a593Smuzhiyun continue;
81*4882a593Smuzhiyun if (i < last_i)
82*4882a593Smuzhiyun goto next_bind;
83*4882a593Smuzhiyun if (!(req->vdiag_states & (1 << sk->sk_state)))
84*4882a593Smuzhiyun goto next_bind;
85*4882a593Smuzhiyun if (sk_diag_fill(sk, skb,
86*4882a593Smuzhiyun NETLINK_CB(cb->skb).portid,
87*4882a593Smuzhiyun cb->nlh->nlmsg_seq,
88*4882a593Smuzhiyun NLM_F_MULTI) < 0)
89*4882a593Smuzhiyun goto done;
90*4882a593Smuzhiyun next_bind:
91*4882a593Smuzhiyun i++;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun last_i = 0;
94*4882a593Smuzhiyun bucket++;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun table++;
98*4882a593Smuzhiyun bucket = 0;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* Connected table (accepted connections) */
102*4882a593Smuzhiyun while (bucket < ARRAY_SIZE(vsock_connected_table)) {
103*4882a593Smuzhiyun struct list_head *head = &vsock_connected_table[bucket];
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun i = 0;
106*4882a593Smuzhiyun list_for_each_entry(vsk, head, connected_table) {
107*4882a593Smuzhiyun struct sock *sk = sk_vsock(vsk);
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Skip sockets we've already seen above */
110*4882a593Smuzhiyun if (__vsock_in_bound_table(vsk))
111*4882a593Smuzhiyun continue;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun if (!net_eq(sock_net(sk), net))
114*4882a593Smuzhiyun continue;
115*4882a593Smuzhiyun if (i < last_i)
116*4882a593Smuzhiyun goto next_connected;
117*4882a593Smuzhiyun if (!(req->vdiag_states & (1 << sk->sk_state)))
118*4882a593Smuzhiyun goto next_connected;
119*4882a593Smuzhiyun if (sk_diag_fill(sk, skb,
120*4882a593Smuzhiyun NETLINK_CB(cb->skb).portid,
121*4882a593Smuzhiyun cb->nlh->nlmsg_seq,
122*4882a593Smuzhiyun NLM_F_MULTI) < 0)
123*4882a593Smuzhiyun goto done;
124*4882a593Smuzhiyun next_connected:
125*4882a593Smuzhiyun i++;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun last_i = 0;
128*4882a593Smuzhiyun bucket++;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun done:
132*4882a593Smuzhiyun spin_unlock_bh(&vsock_table_lock);
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun cb->args[0] = table;
135*4882a593Smuzhiyun cb->args[1] = bucket;
136*4882a593Smuzhiyun cb->args[2] = i;
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return skb->len;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
vsock_diag_handler_dump(struct sk_buff * skb,struct nlmsghdr * h)141*4882a593Smuzhiyun static int vsock_diag_handler_dump(struct sk_buff *skb, struct nlmsghdr *h)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun int hdrlen = sizeof(struct vsock_diag_req);
144*4882a593Smuzhiyun struct net *net = sock_net(skb->sk);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (nlmsg_len(h) < hdrlen)
147*4882a593Smuzhiyun return -EINVAL;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (h->nlmsg_flags & NLM_F_DUMP) {
150*4882a593Smuzhiyun struct netlink_dump_control c = {
151*4882a593Smuzhiyun .dump = vsock_diag_dump,
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun return netlink_dump_start(net->diag_nlsk, skb, h, &c);
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return -EOPNOTSUPP;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun static const struct sock_diag_handler vsock_diag_handler = {
160*4882a593Smuzhiyun .family = AF_VSOCK,
161*4882a593Smuzhiyun .dump = vsock_diag_handler_dump,
162*4882a593Smuzhiyun };
163*4882a593Smuzhiyun
vsock_diag_init(void)164*4882a593Smuzhiyun static int __init vsock_diag_init(void)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun return sock_diag_register(&vsock_diag_handler);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
vsock_diag_exit(void)169*4882a593Smuzhiyun static void __exit vsock_diag_exit(void)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun sock_diag_unregister(&vsock_diag_handler);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun module_init(vsock_diag_init);
175*4882a593Smuzhiyun module_exit(vsock_diag_exit);
176*4882a593Smuzhiyun MODULE_LICENSE("GPL");
177*4882a593Smuzhiyun MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG,
178*4882a593Smuzhiyun 40 /* AF_VSOCK */);
179