xref: /OK3568_Linux_fs/kernel/drivers/net/hyperv/netvsc_bpf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /* Copyright (c) 2019, Microsoft Corporation.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Author:
5*4882a593Smuzhiyun  *   Haiyang Zhang <haiyangz@microsoft.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/netdevice.h>
11*4882a593Smuzhiyun #include <linux/etherdevice.h>
12*4882a593Smuzhiyun #include <linux/ethtool.h>
13*4882a593Smuzhiyun #include <linux/bpf.h>
14*4882a593Smuzhiyun #include <linux/bpf_trace.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <net/xdp.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/mutex.h>
19*4882a593Smuzhiyun #include <linux/rtnetlink.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "hyperv_net.h"
22*4882a593Smuzhiyun 
netvsc_run_xdp(struct net_device * ndev,struct netvsc_channel * nvchan,struct xdp_buff * xdp)23*4882a593Smuzhiyun u32 netvsc_run_xdp(struct net_device *ndev, struct netvsc_channel *nvchan,
24*4882a593Smuzhiyun 		   struct xdp_buff *xdp)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun 	void *data = nvchan->rsc.data[0];
27*4882a593Smuzhiyun 	u32 len = nvchan->rsc.len[0];
28*4882a593Smuzhiyun 	struct page *page = NULL;
29*4882a593Smuzhiyun 	struct bpf_prog *prog;
30*4882a593Smuzhiyun 	u32 act = XDP_PASS;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	xdp->data_hard_start = NULL;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	rcu_read_lock();
35*4882a593Smuzhiyun 	prog = rcu_dereference(nvchan->bpf_prog);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	if (!prog)
38*4882a593Smuzhiyun 		goto out;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	/* allocate page buffer for data */
41*4882a593Smuzhiyun 	page = alloc_page(GFP_ATOMIC);
42*4882a593Smuzhiyun 	if (!page) {
43*4882a593Smuzhiyun 		act = XDP_DROP;
44*4882a593Smuzhiyun 		goto out;
45*4882a593Smuzhiyun 	}
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	xdp->data_hard_start = page_address(page);
48*4882a593Smuzhiyun 	xdp->data = xdp->data_hard_start + NETVSC_XDP_HDRM;
49*4882a593Smuzhiyun 	xdp_set_data_meta_invalid(xdp);
50*4882a593Smuzhiyun 	xdp->data_end = xdp->data + len;
51*4882a593Smuzhiyun 	xdp->rxq = &nvchan->xdp_rxq;
52*4882a593Smuzhiyun 	xdp->frame_sz = PAGE_SIZE;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	memcpy(xdp->data, data, len);
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	act = bpf_prog_run_xdp(prog, xdp);
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 	switch (act) {
59*4882a593Smuzhiyun 	case XDP_PASS:
60*4882a593Smuzhiyun 	case XDP_TX:
61*4882a593Smuzhiyun 	case XDP_DROP:
62*4882a593Smuzhiyun 		break;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 	case XDP_ABORTED:
65*4882a593Smuzhiyun 		trace_xdp_exception(ndev, prog, act);
66*4882a593Smuzhiyun 		break;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	default:
69*4882a593Smuzhiyun 		bpf_warn_invalid_xdp_action(act);
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun out:
73*4882a593Smuzhiyun 	rcu_read_unlock();
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	if (page && act != XDP_PASS && act != XDP_TX) {
76*4882a593Smuzhiyun 		__free_page(page);
77*4882a593Smuzhiyun 		xdp->data_hard_start = NULL;
78*4882a593Smuzhiyun 	}
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	return act;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun 
netvsc_xdp_fraglen(unsigned int len)83*4882a593Smuzhiyun unsigned int netvsc_xdp_fraglen(unsigned int len)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	return SKB_DATA_ALIGN(len) +
86*4882a593Smuzhiyun 	       SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
netvsc_xdp_get(struct netvsc_device * nvdev)89*4882a593Smuzhiyun struct bpf_prog *netvsc_xdp_get(struct netvsc_device *nvdev)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	return rtnl_dereference(nvdev->chan_table[0].bpf_prog);
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
netvsc_xdp_set(struct net_device * dev,struct bpf_prog * prog,struct netlink_ext_ack * extack,struct netvsc_device * nvdev)94*4882a593Smuzhiyun int netvsc_xdp_set(struct net_device *dev, struct bpf_prog *prog,
95*4882a593Smuzhiyun 		   struct netlink_ext_ack *extack,
96*4882a593Smuzhiyun 		   struct netvsc_device *nvdev)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	struct bpf_prog *old_prog;
99*4882a593Smuzhiyun 	int buf_max, i;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	old_prog = netvsc_xdp_get(nvdev);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	if (!old_prog && !prog)
104*4882a593Smuzhiyun 		return 0;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	buf_max = NETVSC_XDP_HDRM + netvsc_xdp_fraglen(dev->mtu + ETH_HLEN);
107*4882a593Smuzhiyun 	if (prog && buf_max > PAGE_SIZE) {
108*4882a593Smuzhiyun 		netdev_err(dev, "XDP: mtu:%u too large, buf_max:%u\n",
109*4882a593Smuzhiyun 			   dev->mtu, buf_max);
110*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "XDP: mtu too large");
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 		return -EOPNOTSUPP;
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (prog && (dev->features & NETIF_F_LRO)) {
116*4882a593Smuzhiyun 		netdev_err(dev, "XDP: not support LRO\n");
117*4882a593Smuzhiyun 		NL_SET_ERR_MSG_MOD(extack, "XDP: not support LRO");
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 		return -EOPNOTSUPP;
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	if (prog)
123*4882a593Smuzhiyun 		bpf_prog_add(prog, nvdev->num_chn - 1);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	for (i = 0; i < nvdev->num_chn; i++)
126*4882a593Smuzhiyun 		rcu_assign_pointer(nvdev->chan_table[i].bpf_prog, prog);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (old_prog)
129*4882a593Smuzhiyun 		for (i = 0; i < nvdev->num_chn; i++)
130*4882a593Smuzhiyun 			bpf_prog_put(old_prog);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	return 0;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun 
netvsc_vf_setxdp(struct net_device * vf_netdev,struct bpf_prog * prog)135*4882a593Smuzhiyun int netvsc_vf_setxdp(struct net_device *vf_netdev, struct bpf_prog *prog)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct netdev_bpf xdp;
138*4882a593Smuzhiyun 	bpf_op_t ndo_bpf;
139*4882a593Smuzhiyun 	int ret;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	ASSERT_RTNL();
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	if (!vf_netdev)
144*4882a593Smuzhiyun 		return 0;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	ndo_bpf = vf_netdev->netdev_ops->ndo_bpf;
147*4882a593Smuzhiyun 	if (!ndo_bpf)
148*4882a593Smuzhiyun 		return 0;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	memset(&xdp, 0, sizeof(xdp));
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	if (prog)
153*4882a593Smuzhiyun 		bpf_prog_inc(prog);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	xdp.command = XDP_SETUP_PROG;
156*4882a593Smuzhiyun 	xdp.prog = prog;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	ret = ndo_bpf(vf_netdev, &xdp);
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	if (ret && prog)
161*4882a593Smuzhiyun 		bpf_prog_put(prog);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	return ret;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
netvsc_bpf(struct net_device * dev,struct netdev_bpf * bpf)166*4882a593Smuzhiyun int netvsc_bpf(struct net_device *dev, struct netdev_bpf *bpf)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct net_device_context *ndevctx = netdev_priv(dev);
169*4882a593Smuzhiyun 	struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
170*4882a593Smuzhiyun 	struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
171*4882a593Smuzhiyun 	struct netlink_ext_ack *extack = bpf->extack;
172*4882a593Smuzhiyun 	int ret;
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (!nvdev || nvdev->destroy) {
175*4882a593Smuzhiyun 		return -ENODEV;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	switch (bpf->command) {
179*4882a593Smuzhiyun 	case XDP_SETUP_PROG:
180*4882a593Smuzhiyun 		ret = netvsc_xdp_set(dev, bpf->prog, extack, nvdev);
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		if (ret)
183*4882a593Smuzhiyun 			return ret;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		ret = netvsc_vf_setxdp(vf_netdev, bpf->prog);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		if (ret) {
188*4882a593Smuzhiyun 			netdev_err(dev, "vf_setxdp failed:%d\n", ret);
189*4882a593Smuzhiyun 			NL_SET_ERR_MSG_MOD(extack, "vf_setxdp failed");
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 			netvsc_xdp_set(dev, NULL, extack, nvdev);
192*4882a593Smuzhiyun 		}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 		return ret;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	default:
197*4882a593Smuzhiyun 		return -EINVAL;
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun }
200