xref: /OK3568_Linux_fs/kernel/net/xdp/xsk_diag.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* XDP sockets monitoring support
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright(c) 2019 Intel Corporation.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Björn Töpel <bjorn.topel@intel.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <net/xdp_sock.h>
11*4882a593Smuzhiyun #include <linux/xdp_diag.h>
12*4882a593Smuzhiyun #include <linux/sock_diag.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "xsk_queue.h"
15*4882a593Smuzhiyun #include "xsk.h"
16*4882a593Smuzhiyun 
xsk_diag_put_info(const struct xdp_sock * xs,struct sk_buff * nlskb)17*4882a593Smuzhiyun static int xsk_diag_put_info(const struct xdp_sock *xs, struct sk_buff *nlskb)
18*4882a593Smuzhiyun {
19*4882a593Smuzhiyun 	struct xdp_diag_info di = {};
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	di.ifindex = xs->dev ? xs->dev->ifindex : 0;
22*4882a593Smuzhiyun 	di.queue_id = xs->queue_id;
23*4882a593Smuzhiyun 	return nla_put(nlskb, XDP_DIAG_INFO, sizeof(di), &di);
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
xsk_diag_put_ring(const struct xsk_queue * queue,int nl_type,struct sk_buff * nlskb)26*4882a593Smuzhiyun static int xsk_diag_put_ring(const struct xsk_queue *queue, int nl_type,
27*4882a593Smuzhiyun 			     struct sk_buff *nlskb)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct xdp_diag_ring dr = {};
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	dr.entries = queue->nentries;
32*4882a593Smuzhiyun 	return nla_put(nlskb, nl_type, sizeof(dr), &dr);
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun 
xsk_diag_put_rings_cfg(const struct xdp_sock * xs,struct sk_buff * nlskb)35*4882a593Smuzhiyun static int xsk_diag_put_rings_cfg(const struct xdp_sock *xs,
36*4882a593Smuzhiyun 				  struct sk_buff *nlskb)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun 	int err = 0;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	if (xs->rx)
41*4882a593Smuzhiyun 		err = xsk_diag_put_ring(xs->rx, XDP_DIAG_RX_RING, nlskb);
42*4882a593Smuzhiyun 	if (!err && xs->tx)
43*4882a593Smuzhiyun 		err = xsk_diag_put_ring(xs->tx, XDP_DIAG_TX_RING, nlskb);
44*4882a593Smuzhiyun 	return err;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun 
xsk_diag_put_umem(const struct xdp_sock * xs,struct sk_buff * nlskb)47*4882a593Smuzhiyun static int xsk_diag_put_umem(const struct xdp_sock *xs, struct sk_buff *nlskb)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	struct xsk_buff_pool *pool = xs->pool;
50*4882a593Smuzhiyun 	struct xdp_umem *umem = xs->umem;
51*4882a593Smuzhiyun 	struct xdp_diag_umem du = {};
52*4882a593Smuzhiyun 	int err;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (!umem)
55*4882a593Smuzhiyun 		return 0;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	du.id = umem->id;
58*4882a593Smuzhiyun 	du.size = umem->size;
59*4882a593Smuzhiyun 	du.num_pages = umem->npgs;
60*4882a593Smuzhiyun 	du.chunk_size = umem->chunk_size;
61*4882a593Smuzhiyun 	du.headroom = umem->headroom;
62*4882a593Smuzhiyun 	du.ifindex = (pool && pool->netdev) ? pool->netdev->ifindex : 0;
63*4882a593Smuzhiyun 	du.queue_id = pool ? pool->queue_id : 0;
64*4882a593Smuzhiyun 	du.flags = 0;
65*4882a593Smuzhiyun 	if (umem->zc)
66*4882a593Smuzhiyun 		du.flags |= XDP_DU_F_ZEROCOPY;
67*4882a593Smuzhiyun 	du.refs = refcount_read(&umem->users);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	err = nla_put(nlskb, XDP_DIAG_UMEM, sizeof(du), &du);
70*4882a593Smuzhiyun 	if (!err && pool && pool->fq)
71*4882a593Smuzhiyun 		err = xsk_diag_put_ring(pool->fq,
72*4882a593Smuzhiyun 					XDP_DIAG_UMEM_FILL_RING, nlskb);
73*4882a593Smuzhiyun 	if (!err && pool && pool->cq)
74*4882a593Smuzhiyun 		err = xsk_diag_put_ring(pool->cq,
75*4882a593Smuzhiyun 					XDP_DIAG_UMEM_COMPLETION_RING, nlskb);
76*4882a593Smuzhiyun 	return err;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
xsk_diag_put_stats(const struct xdp_sock * xs,struct sk_buff * nlskb)79*4882a593Smuzhiyun static int xsk_diag_put_stats(const struct xdp_sock *xs, struct sk_buff *nlskb)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct xdp_diag_stats du = {};
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	du.n_rx_dropped = xs->rx_dropped;
84*4882a593Smuzhiyun 	du.n_rx_invalid = xskq_nb_invalid_descs(xs->rx);
85*4882a593Smuzhiyun 	du.n_rx_full = xs->rx_queue_full;
86*4882a593Smuzhiyun 	du.n_fill_ring_empty = xs->pool ? xskq_nb_queue_empty_descs(xs->pool->fq) : 0;
87*4882a593Smuzhiyun 	du.n_tx_invalid = xskq_nb_invalid_descs(xs->tx);
88*4882a593Smuzhiyun 	du.n_tx_ring_empty = xskq_nb_queue_empty_descs(xs->tx);
89*4882a593Smuzhiyun 	return nla_put(nlskb, XDP_DIAG_STATS, sizeof(du), &du);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
xsk_diag_fill(struct sock * sk,struct sk_buff * nlskb,struct xdp_diag_req * req,struct user_namespace * user_ns,u32 portid,u32 seq,u32 flags,int sk_ino)92*4882a593Smuzhiyun static int xsk_diag_fill(struct sock *sk, struct sk_buff *nlskb,
93*4882a593Smuzhiyun 			 struct xdp_diag_req *req,
94*4882a593Smuzhiyun 			 struct user_namespace *user_ns,
95*4882a593Smuzhiyun 			 u32 portid, u32 seq, u32 flags, int sk_ino)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	struct xdp_sock *xs = xdp_sk(sk);
98*4882a593Smuzhiyun 	struct xdp_diag_msg *msg;
99*4882a593Smuzhiyun 	struct nlmsghdr *nlh;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	nlh = nlmsg_put(nlskb, portid, seq, SOCK_DIAG_BY_FAMILY, sizeof(*msg),
102*4882a593Smuzhiyun 			flags);
103*4882a593Smuzhiyun 	if (!nlh)
104*4882a593Smuzhiyun 		return -EMSGSIZE;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	msg = nlmsg_data(nlh);
107*4882a593Smuzhiyun 	memset(msg, 0, sizeof(*msg));
108*4882a593Smuzhiyun 	msg->xdiag_family = AF_XDP;
109*4882a593Smuzhiyun 	msg->xdiag_type = sk->sk_type;
110*4882a593Smuzhiyun 	msg->xdiag_ino = sk_ino;
111*4882a593Smuzhiyun 	sock_diag_save_cookie(sk, msg->xdiag_cookie);
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	mutex_lock(&xs->mutex);
114*4882a593Smuzhiyun 	if ((req->xdiag_show & XDP_SHOW_INFO) && xsk_diag_put_info(xs, nlskb))
115*4882a593Smuzhiyun 		goto out_nlmsg_trim;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if ((req->xdiag_show & XDP_SHOW_INFO) &&
118*4882a593Smuzhiyun 	    nla_put_u32(nlskb, XDP_DIAG_UID,
119*4882a593Smuzhiyun 			from_kuid_munged(user_ns, sock_i_uid(sk))))
120*4882a593Smuzhiyun 		goto out_nlmsg_trim;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if ((req->xdiag_show & XDP_SHOW_RING_CFG) &&
123*4882a593Smuzhiyun 	    xsk_diag_put_rings_cfg(xs, nlskb))
124*4882a593Smuzhiyun 		goto out_nlmsg_trim;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	if ((req->xdiag_show & XDP_SHOW_UMEM) &&
127*4882a593Smuzhiyun 	    xsk_diag_put_umem(xs, nlskb))
128*4882a593Smuzhiyun 		goto out_nlmsg_trim;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if ((req->xdiag_show & XDP_SHOW_MEMINFO) &&
131*4882a593Smuzhiyun 	    sock_diag_put_meminfo(sk, nlskb, XDP_DIAG_MEMINFO))
132*4882a593Smuzhiyun 		goto out_nlmsg_trim;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if ((req->xdiag_show & XDP_SHOW_STATS) &&
135*4882a593Smuzhiyun 	    xsk_diag_put_stats(xs, nlskb))
136*4882a593Smuzhiyun 		goto out_nlmsg_trim;
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	mutex_unlock(&xs->mutex);
139*4882a593Smuzhiyun 	nlmsg_end(nlskb, nlh);
140*4882a593Smuzhiyun 	return 0;
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun out_nlmsg_trim:
143*4882a593Smuzhiyun 	mutex_unlock(&xs->mutex);
144*4882a593Smuzhiyun 	nlmsg_cancel(nlskb, nlh);
145*4882a593Smuzhiyun 	return -EMSGSIZE;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun 
xsk_diag_dump(struct sk_buff * nlskb,struct netlink_callback * cb)148*4882a593Smuzhiyun static int xsk_diag_dump(struct sk_buff *nlskb, struct netlink_callback *cb)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct xdp_diag_req *req = nlmsg_data(cb->nlh);
151*4882a593Smuzhiyun 	struct net *net = sock_net(nlskb->sk);
152*4882a593Smuzhiyun 	int num = 0, s_num = cb->args[0];
153*4882a593Smuzhiyun 	struct sock *sk;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	mutex_lock(&net->xdp.lock);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	sk_for_each(sk, &net->xdp.list) {
158*4882a593Smuzhiyun 		if (!net_eq(sock_net(sk), net))
159*4882a593Smuzhiyun 			continue;
160*4882a593Smuzhiyun 		if (num++ < s_num)
161*4882a593Smuzhiyun 			continue;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		if (xsk_diag_fill(sk, nlskb, req,
164*4882a593Smuzhiyun 				  sk_user_ns(NETLINK_CB(cb->skb).sk),
165*4882a593Smuzhiyun 				  NETLINK_CB(cb->skb).portid,
166*4882a593Smuzhiyun 				  cb->nlh->nlmsg_seq, NLM_F_MULTI,
167*4882a593Smuzhiyun 				  sock_i_ino(sk)) < 0) {
168*4882a593Smuzhiyun 			num--;
169*4882a593Smuzhiyun 			break;
170*4882a593Smuzhiyun 		}
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	mutex_unlock(&net->xdp.lock);
174*4882a593Smuzhiyun 	cb->args[0] = num;
175*4882a593Smuzhiyun 	return nlskb->len;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun 
xsk_diag_handler_dump(struct sk_buff * nlskb,struct nlmsghdr * hdr)178*4882a593Smuzhiyun static int xsk_diag_handler_dump(struct sk_buff *nlskb, struct nlmsghdr *hdr)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun 	struct netlink_dump_control c = { .dump = xsk_diag_dump };
181*4882a593Smuzhiyun 	int hdrlen = sizeof(struct xdp_diag_req);
182*4882a593Smuzhiyun 	struct net *net = sock_net(nlskb->sk);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (nlmsg_len(hdr) < hdrlen)
185*4882a593Smuzhiyun 		return -EINVAL;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	if (!(hdr->nlmsg_flags & NLM_F_DUMP))
188*4882a593Smuzhiyun 		return -EOPNOTSUPP;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	return netlink_dump_start(net->diag_nlsk, nlskb, hdr, &c);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun static const struct sock_diag_handler xsk_diag_handler = {
194*4882a593Smuzhiyun 	.family = AF_XDP,
195*4882a593Smuzhiyun 	.dump = xsk_diag_handler_dump,
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun 
xsk_diag_init(void)198*4882a593Smuzhiyun static int __init xsk_diag_init(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	return sock_diag_register(&xsk_diag_handler);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
xsk_diag_exit(void)203*4882a593Smuzhiyun static void __exit xsk_diag_exit(void)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	sock_diag_unregister(&xsk_diag_handler);
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun module_init(xsk_diag_init);
209*4882a593Smuzhiyun module_exit(xsk_diag_exit);
210*4882a593Smuzhiyun MODULE_LICENSE("GPL");
211*4882a593Smuzhiyun MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_NETLINK, NETLINK_SOCK_DIAG, AF_XDP);
212