1*4882a593Smuzhiyun /* Broadcom NetXtreme-C/E network driver.
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (c) 2016-2017 Broadcom Limited
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
6*4882a593Smuzhiyun * it under the terms of the GNU General Public License as published by
7*4882a593Smuzhiyun * the Free Software Foundation.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #include <linux/kernel.h>
10*4882a593Smuzhiyun #include <linux/errno.h>
11*4882a593Smuzhiyun #include <linux/pci.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/etherdevice.h>
14*4882a593Smuzhiyun #include <linux/if_vlan.h>
15*4882a593Smuzhiyun #include <linux/bpf.h>
16*4882a593Smuzhiyun #include <linux/bpf_trace.h>
17*4882a593Smuzhiyun #include <linux/filter.h>
18*4882a593Smuzhiyun #include <net/page_pool.h>
19*4882a593Smuzhiyun #include "bnxt_hsi.h"
20*4882a593Smuzhiyun #include "bnxt.h"
21*4882a593Smuzhiyun #include "bnxt_xdp.h"
22*4882a593Smuzhiyun
bnxt_xmit_bd(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len)23*4882a593Smuzhiyun struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
24*4882a593Smuzhiyun struct bnxt_tx_ring_info *txr,
25*4882a593Smuzhiyun dma_addr_t mapping, u32 len)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct bnxt_sw_tx_bd *tx_buf;
28*4882a593Smuzhiyun struct tx_bd *txbd;
29*4882a593Smuzhiyun u32 flags;
30*4882a593Smuzhiyun u16 prod;
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun prod = txr->tx_prod;
33*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[prod];
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
36*4882a593Smuzhiyun flags = (len << TX_BD_LEN_SHIFT) | (1 << TX_BD_FLAGS_BD_CNT_SHIFT) |
37*4882a593Smuzhiyun TX_BD_FLAGS_PACKET_END | bnxt_lhint_arr[len >> 9];
38*4882a593Smuzhiyun txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
39*4882a593Smuzhiyun txbd->tx_bd_opaque = prod;
40*4882a593Smuzhiyun txbd->tx_bd_haddr = cpu_to_le64(mapping);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun prod = NEXT_TX(prod);
43*4882a593Smuzhiyun txr->tx_prod = prod;
44*4882a593Smuzhiyun return tx_buf;
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
__bnxt_xmit_xdp(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,u16 rx_prod)47*4882a593Smuzhiyun static void __bnxt_xmit_xdp(struct bnxt *bp, struct bnxt_tx_ring_info *txr,
48*4882a593Smuzhiyun dma_addr_t mapping, u32 len, u16 rx_prod)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun struct bnxt_sw_tx_bd *tx_buf;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
53*4882a593Smuzhiyun tx_buf->rx_prod = rx_prod;
54*4882a593Smuzhiyun tx_buf->action = XDP_TX;
55*4882a593Smuzhiyun }
56*4882a593Smuzhiyun
__bnxt_xmit_xdp_redirect(struct bnxt * bp,struct bnxt_tx_ring_info * txr,dma_addr_t mapping,u32 len,struct xdp_frame * xdpf)57*4882a593Smuzhiyun static void __bnxt_xmit_xdp_redirect(struct bnxt *bp,
58*4882a593Smuzhiyun struct bnxt_tx_ring_info *txr,
59*4882a593Smuzhiyun dma_addr_t mapping, u32 len,
60*4882a593Smuzhiyun struct xdp_frame *xdpf)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun struct bnxt_sw_tx_bd *tx_buf;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun tx_buf = bnxt_xmit_bd(bp, txr, mapping, len);
65*4882a593Smuzhiyun tx_buf->action = XDP_REDIRECT;
66*4882a593Smuzhiyun tx_buf->xdpf = xdpf;
67*4882a593Smuzhiyun dma_unmap_addr_set(tx_buf, mapping, mapping);
68*4882a593Smuzhiyun dma_unmap_len_set(tx_buf, len, 0);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
bnxt_tx_int_xdp(struct bnxt * bp,struct bnxt_napi * bnapi,int nr_pkts)71*4882a593Smuzhiyun void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
74*4882a593Smuzhiyun struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
75*4882a593Smuzhiyun bool rx_doorbell_needed = false;
76*4882a593Smuzhiyun struct bnxt_sw_tx_bd *tx_buf;
77*4882a593Smuzhiyun u16 tx_cons = txr->tx_cons;
78*4882a593Smuzhiyun u16 last_tx_cons = tx_cons;
79*4882a593Smuzhiyun int i;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun for (i = 0; i < nr_pkts; i++) {
82*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[tx_cons];
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (tx_buf->action == XDP_REDIRECT) {
85*4882a593Smuzhiyun struct pci_dev *pdev = bp->pdev;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun dma_unmap_single(&pdev->dev,
88*4882a593Smuzhiyun dma_unmap_addr(tx_buf, mapping),
89*4882a593Smuzhiyun dma_unmap_len(tx_buf, len),
90*4882a593Smuzhiyun PCI_DMA_TODEVICE);
91*4882a593Smuzhiyun xdp_return_frame(tx_buf->xdpf);
92*4882a593Smuzhiyun tx_buf->action = 0;
93*4882a593Smuzhiyun tx_buf->xdpf = NULL;
94*4882a593Smuzhiyun } else if (tx_buf->action == XDP_TX) {
95*4882a593Smuzhiyun rx_doorbell_needed = true;
96*4882a593Smuzhiyun last_tx_cons = tx_cons;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun tx_cons = NEXT_TX(tx_cons);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun txr->tx_cons = tx_cons;
101*4882a593Smuzhiyun if (rx_doorbell_needed) {
102*4882a593Smuzhiyun tx_buf = &txr->tx_buf_ring[last_tx_cons];
103*4882a593Smuzhiyun bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* returns the following:
108*4882a593Smuzhiyun * true - packet consumed by XDP and new buffer is allocated.
109*4882a593Smuzhiyun * false - packet should be passed to the stack.
110*4882a593Smuzhiyun */
bnxt_rx_xdp(struct bnxt * bp,struct bnxt_rx_ring_info * rxr,u16 cons,struct page * page,u8 ** data_ptr,unsigned int * len,u8 * event)111*4882a593Smuzhiyun bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
112*4882a593Smuzhiyun struct page *page, u8 **data_ptr, unsigned int *len, u8 *event)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
115*4882a593Smuzhiyun struct bnxt_tx_ring_info *txr;
116*4882a593Smuzhiyun struct bnxt_sw_rx_bd *rx_buf;
117*4882a593Smuzhiyun struct pci_dev *pdev;
118*4882a593Smuzhiyun struct xdp_buff xdp;
119*4882a593Smuzhiyun dma_addr_t mapping;
120*4882a593Smuzhiyun void *orig_data;
121*4882a593Smuzhiyun u32 tx_avail;
122*4882a593Smuzhiyun u32 offset;
123*4882a593Smuzhiyun u32 act;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (!xdp_prog)
126*4882a593Smuzhiyun return false;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun pdev = bp->pdev;
129*4882a593Smuzhiyun rx_buf = &rxr->rx_buf_ring[cons];
130*4882a593Smuzhiyun offset = bp->rx_offset;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun mapping = rx_buf->mapping - bp->rx_dma_offset;
133*4882a593Smuzhiyun dma_sync_single_for_cpu(&pdev->dev, mapping + offset, *len, bp->rx_dir);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun txr = rxr->bnapi->tx_ring;
136*4882a593Smuzhiyun xdp.data_hard_start = *data_ptr - offset;
137*4882a593Smuzhiyun xdp.data = *data_ptr;
138*4882a593Smuzhiyun xdp_set_data_meta_invalid(&xdp);
139*4882a593Smuzhiyun xdp.data_end = *data_ptr + *len;
140*4882a593Smuzhiyun xdp.rxq = &rxr->xdp_rxq;
141*4882a593Smuzhiyun xdp.frame_sz = PAGE_SIZE; /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
142*4882a593Smuzhiyun orig_data = xdp.data;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun rcu_read_lock();
145*4882a593Smuzhiyun act = bpf_prog_run_xdp(xdp_prog, &xdp);
146*4882a593Smuzhiyun rcu_read_unlock();
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun tx_avail = bnxt_tx_avail(bp, txr);
149*4882a593Smuzhiyun /* If the tx ring is not full, we must not update the rx producer yet
150*4882a593Smuzhiyun * because we may still be transmitting on some BDs.
151*4882a593Smuzhiyun */
152*4882a593Smuzhiyun if (tx_avail != bp->tx_ring_size)
153*4882a593Smuzhiyun *event &= ~BNXT_RX_EVENT;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun *len = xdp.data_end - xdp.data;
156*4882a593Smuzhiyun if (orig_data != xdp.data) {
157*4882a593Smuzhiyun offset = xdp.data - xdp.data_hard_start;
158*4882a593Smuzhiyun *data_ptr = xdp.data_hard_start + offset;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun switch (act) {
161*4882a593Smuzhiyun case XDP_PASS:
162*4882a593Smuzhiyun return false;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun case XDP_TX:
165*4882a593Smuzhiyun if (tx_avail < 1) {
166*4882a593Smuzhiyun trace_xdp_exception(bp->dev, xdp_prog, act);
167*4882a593Smuzhiyun bnxt_reuse_rx_data(rxr, cons, page);
168*4882a593Smuzhiyun return true;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun *event = BNXT_TX_EVENT;
172*4882a593Smuzhiyun dma_sync_single_for_device(&pdev->dev, mapping + offset, *len,
173*4882a593Smuzhiyun bp->rx_dir);
174*4882a593Smuzhiyun __bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
175*4882a593Smuzhiyun NEXT_RX(rxr->rx_prod));
176*4882a593Smuzhiyun bnxt_reuse_rx_data(rxr, cons, page);
177*4882a593Smuzhiyun return true;
178*4882a593Smuzhiyun case XDP_REDIRECT:
179*4882a593Smuzhiyun /* if we are calling this here then we know that the
180*4882a593Smuzhiyun * redirect is coming from a frame received by the
181*4882a593Smuzhiyun * bnxt_en driver.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun dma_unmap_page_attrs(&pdev->dev, mapping,
184*4882a593Smuzhiyun PAGE_SIZE, bp->rx_dir,
185*4882a593Smuzhiyun DMA_ATTR_WEAK_ORDERING);
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun /* if we are unable to allocate a new buffer, abort and reuse */
188*4882a593Smuzhiyun if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
189*4882a593Smuzhiyun trace_xdp_exception(bp->dev, xdp_prog, act);
190*4882a593Smuzhiyun bnxt_reuse_rx_data(rxr, cons, page);
191*4882a593Smuzhiyun return true;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) {
195*4882a593Smuzhiyun trace_xdp_exception(bp->dev, xdp_prog, act);
196*4882a593Smuzhiyun page_pool_recycle_direct(rxr->page_pool, page);
197*4882a593Smuzhiyun return true;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun *event |= BNXT_REDIRECT_EVENT;
201*4882a593Smuzhiyun break;
202*4882a593Smuzhiyun default:
203*4882a593Smuzhiyun bpf_warn_invalid_xdp_action(act);
204*4882a593Smuzhiyun fallthrough;
205*4882a593Smuzhiyun case XDP_ABORTED:
206*4882a593Smuzhiyun trace_xdp_exception(bp->dev, xdp_prog, act);
207*4882a593Smuzhiyun fallthrough;
208*4882a593Smuzhiyun case XDP_DROP:
209*4882a593Smuzhiyun bnxt_reuse_rx_data(rxr, cons, page);
210*4882a593Smuzhiyun break;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun return true;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
bnxt_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)215*4882a593Smuzhiyun int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
216*4882a593Smuzhiyun struct xdp_frame **frames, u32 flags)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
219*4882a593Smuzhiyun struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog);
220*4882a593Smuzhiyun struct pci_dev *pdev = bp->pdev;
221*4882a593Smuzhiyun struct bnxt_tx_ring_info *txr;
222*4882a593Smuzhiyun dma_addr_t mapping;
223*4882a593Smuzhiyun int drops = 0;
224*4882a593Smuzhiyun int ring;
225*4882a593Smuzhiyun int i;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!test_bit(BNXT_STATE_OPEN, &bp->state) ||
228*4882a593Smuzhiyun !bp->tx_nr_rings_xdp ||
229*4882a593Smuzhiyun !xdp_prog)
230*4882a593Smuzhiyun return -EINVAL;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun ring = smp_processor_id() % bp->tx_nr_rings_xdp;
233*4882a593Smuzhiyun txr = &bp->tx_ring[ring];
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun for (i = 0; i < num_frames; i++) {
236*4882a593Smuzhiyun struct xdp_frame *xdp = frames[i];
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun if (!txr || !bnxt_tx_avail(bp, txr) ||
239*4882a593Smuzhiyun !(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
240*4882a593Smuzhiyun xdp_return_frame_rx_napi(xdp);
241*4882a593Smuzhiyun drops++;
242*4882a593Smuzhiyun continue;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
246*4882a593Smuzhiyun DMA_TO_DEVICE);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (dma_mapping_error(&pdev->dev, mapping)) {
249*4882a593Smuzhiyun xdp_return_frame_rx_napi(xdp);
250*4882a593Smuzhiyun drops++;
251*4882a593Smuzhiyun continue;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (flags & XDP_XMIT_FLUSH) {
257*4882a593Smuzhiyun /* Sync BD data before updating doorbell */
258*4882a593Smuzhiyun wmb();
259*4882a593Smuzhiyun bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return num_frames - drops;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* Under rtnl_lock */
bnxt_xdp_set(struct bnxt * bp,struct bpf_prog * prog)266*4882a593Smuzhiyun static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun struct net_device *dev = bp->dev;
269*4882a593Smuzhiyun int tx_xdp = 0, rc, tc;
270*4882a593Smuzhiyun struct bpf_prog *old;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun if (prog && bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) {
273*4882a593Smuzhiyun netdev_warn(dev, "MTU %d larger than largest XDP supported MTU %d.\n",
274*4882a593Smuzhiyun bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU);
275*4882a593Smuzhiyun return -EOPNOTSUPP;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) {
278*4882a593Smuzhiyun netdev_warn(dev, "ethtool rx/tx channels must be combined to support XDP.\n");
279*4882a593Smuzhiyun return -EOPNOTSUPP;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun if (prog)
282*4882a593Smuzhiyun tx_xdp = bp->rx_nr_rings;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun tc = netdev_get_num_tc(dev);
285*4882a593Smuzhiyun if (!tc)
286*4882a593Smuzhiyun tc = 1;
287*4882a593Smuzhiyun rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
288*4882a593Smuzhiyun true, tc, tx_xdp);
289*4882a593Smuzhiyun if (rc) {
290*4882a593Smuzhiyun netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n");
291*4882a593Smuzhiyun return rc;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun if (netif_running(dev))
294*4882a593Smuzhiyun bnxt_close_nic(bp, true, false);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun old = xchg(&bp->xdp_prog, prog);
297*4882a593Smuzhiyun if (old)
298*4882a593Smuzhiyun bpf_prog_put(old);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (prog) {
301*4882a593Smuzhiyun bnxt_set_rx_skb_mode(bp, true);
302*4882a593Smuzhiyun } else {
303*4882a593Smuzhiyun int rx, tx;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun bnxt_set_rx_skb_mode(bp, false);
306*4882a593Smuzhiyun bnxt_get_max_rings(bp, &rx, &tx, true);
307*4882a593Smuzhiyun if (rx > 1) {
308*4882a593Smuzhiyun bp->flags &= ~BNXT_FLAG_NO_AGG_RINGS;
309*4882a593Smuzhiyun bp->dev->hw_features |= NETIF_F_LRO;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun bp->tx_nr_rings_xdp = tx_xdp;
313*4882a593Smuzhiyun bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp;
314*4882a593Smuzhiyun bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
315*4882a593Smuzhiyun bnxt_set_tpa_flags(bp);
316*4882a593Smuzhiyun bnxt_set_ring_params(bp);
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun if (netif_running(dev))
319*4882a593Smuzhiyun return bnxt_open_nic(bp, true, false);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
bnxt_xdp(struct net_device * dev,struct netdev_bpf * xdp)324*4882a593Smuzhiyun int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct bnxt *bp = netdev_priv(dev);
327*4882a593Smuzhiyun int rc;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun switch (xdp->command) {
330*4882a593Smuzhiyun case XDP_SETUP_PROG:
331*4882a593Smuzhiyun rc = bnxt_xdp_set(bp, xdp->prog);
332*4882a593Smuzhiyun break;
333*4882a593Smuzhiyun default:
334*4882a593Smuzhiyun rc = -EINVAL;
335*4882a593Smuzhiyun break;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun return rc;
338*4882a593Smuzhiyun }
339