1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2018 Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/bpf_trace.h>
5*4882a593Smuzhiyun #include <net/xdp_sock_drv.h>
6*4882a593Smuzhiyun #include <net/xdp.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include "ixgbe.h"
9*4882a593Smuzhiyun #include "ixgbe_txrx_common.h"
10*4882a593Smuzhiyun
ixgbe_xsk_pool(struct ixgbe_adapter * adapter,struct ixgbe_ring * ring)11*4882a593Smuzhiyun struct xsk_buff_pool *ixgbe_xsk_pool(struct ixgbe_adapter *adapter,
12*4882a593Smuzhiyun struct ixgbe_ring *ring)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun bool xdp_on = READ_ONCE(adapter->xdp_prog);
15*4882a593Smuzhiyun int qid = ring->ring_idx;
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps))
18*4882a593Smuzhiyun return NULL;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun return xsk_get_pool_from_qid(adapter->netdev, qid);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
ixgbe_xsk_pool_enable(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)23*4882a593Smuzhiyun static int ixgbe_xsk_pool_enable(struct ixgbe_adapter *adapter,
24*4882a593Smuzhiyun struct xsk_buff_pool *pool,
25*4882a593Smuzhiyun u16 qid)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct net_device *netdev = adapter->netdev;
28*4882a593Smuzhiyun bool if_running;
29*4882a593Smuzhiyun int err;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun if (qid >= adapter->num_rx_queues)
32*4882a593Smuzhiyun return -EINVAL;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun if (qid >= netdev->real_num_rx_queues ||
35*4882a593Smuzhiyun qid >= netdev->real_num_tx_queues)
36*4882a593Smuzhiyun return -EINVAL;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR);
39*4882a593Smuzhiyun if (err)
40*4882a593Smuzhiyun return err;
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if_running = netif_running(adapter->netdev) &&
43*4882a593Smuzhiyun ixgbe_enabled_xdp_adapter(adapter);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun if (if_running)
46*4882a593Smuzhiyun ixgbe_txrx_ring_disable(adapter, qid);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun set_bit(qid, adapter->af_xdp_zc_qps);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (if_running) {
51*4882a593Smuzhiyun ixgbe_txrx_ring_enable(adapter, qid);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun /* Kick start the NAPI context so that receiving will start */
54*4882a593Smuzhiyun err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX);
55*4882a593Smuzhiyun if (err) {
56*4882a593Smuzhiyun clear_bit(qid, adapter->af_xdp_zc_qps);
57*4882a593Smuzhiyun xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
58*4882a593Smuzhiyun return err;
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
ixgbe_xsk_pool_disable(struct ixgbe_adapter * adapter,u16 qid)65*4882a593Smuzhiyun static int ixgbe_xsk_pool_disable(struct ixgbe_adapter *adapter, u16 qid)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun struct xsk_buff_pool *pool;
68*4882a593Smuzhiyun bool if_running;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun pool = xsk_get_pool_from_qid(adapter->netdev, qid);
71*4882a593Smuzhiyun if (!pool)
72*4882a593Smuzhiyun return -EINVAL;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun if_running = netif_running(adapter->netdev) &&
75*4882a593Smuzhiyun ixgbe_enabled_xdp_adapter(adapter);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (if_running)
78*4882a593Smuzhiyun ixgbe_txrx_ring_disable(adapter, qid);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun clear_bit(qid, adapter->af_xdp_zc_qps);
81*4882a593Smuzhiyun xsk_pool_dma_unmap(pool, IXGBE_RX_DMA_ATTR);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun if (if_running)
84*4882a593Smuzhiyun ixgbe_txrx_ring_enable(adapter, qid);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
ixgbe_xsk_pool_setup(struct ixgbe_adapter * adapter,struct xsk_buff_pool * pool,u16 qid)89*4882a593Smuzhiyun int ixgbe_xsk_pool_setup(struct ixgbe_adapter *adapter,
90*4882a593Smuzhiyun struct xsk_buff_pool *pool,
91*4882a593Smuzhiyun u16 qid)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun return pool ? ixgbe_xsk_pool_enable(adapter, pool, qid) :
94*4882a593Smuzhiyun ixgbe_xsk_pool_disable(adapter, qid);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
ixgbe_run_xdp_zc(struct ixgbe_adapter * adapter,struct ixgbe_ring * rx_ring,struct xdp_buff * xdp)97*4882a593Smuzhiyun static int ixgbe_run_xdp_zc(struct ixgbe_adapter *adapter,
98*4882a593Smuzhiyun struct ixgbe_ring *rx_ring,
99*4882a593Smuzhiyun struct xdp_buff *xdp)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun int err, result = IXGBE_XDP_PASS;
102*4882a593Smuzhiyun struct bpf_prog *xdp_prog;
103*4882a593Smuzhiyun struct xdp_frame *xdpf;
104*4882a593Smuzhiyun u32 act;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun rcu_read_lock();
107*4882a593Smuzhiyun xdp_prog = READ_ONCE(rx_ring->xdp_prog);
108*4882a593Smuzhiyun act = bpf_prog_run_xdp(xdp_prog, xdp);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (likely(act == XDP_REDIRECT)) {
111*4882a593Smuzhiyun err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
112*4882a593Smuzhiyun if (err)
113*4882a593Smuzhiyun goto out_failure;
114*4882a593Smuzhiyun rcu_read_unlock();
115*4882a593Smuzhiyun return IXGBE_XDP_REDIR;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun switch (act) {
119*4882a593Smuzhiyun case XDP_PASS:
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun case XDP_TX:
122*4882a593Smuzhiyun xdpf = xdp_convert_buff_to_frame(xdp);
123*4882a593Smuzhiyun if (unlikely(!xdpf))
124*4882a593Smuzhiyun goto out_failure;
125*4882a593Smuzhiyun result = ixgbe_xmit_xdp_ring(adapter, xdpf);
126*4882a593Smuzhiyun if (result == IXGBE_XDP_CONSUMED)
127*4882a593Smuzhiyun goto out_failure;
128*4882a593Smuzhiyun break;
129*4882a593Smuzhiyun default:
130*4882a593Smuzhiyun bpf_warn_invalid_xdp_action(act);
131*4882a593Smuzhiyun fallthrough;
132*4882a593Smuzhiyun case XDP_ABORTED:
133*4882a593Smuzhiyun out_failure:
134*4882a593Smuzhiyun trace_xdp_exception(rx_ring->netdev, xdp_prog, act);
135*4882a593Smuzhiyun fallthrough; /* handle aborts by dropping packet */
136*4882a593Smuzhiyun case XDP_DROP:
137*4882a593Smuzhiyun result = IXGBE_XDP_CONSUMED;
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun rcu_read_unlock();
141*4882a593Smuzhiyun return result;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring * rx_ring,u16 count)144*4882a593Smuzhiyun bool ixgbe_alloc_rx_buffers_zc(struct ixgbe_ring *rx_ring, u16 count)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun union ixgbe_adv_rx_desc *rx_desc;
147*4882a593Smuzhiyun struct ixgbe_rx_buffer *bi;
148*4882a593Smuzhiyun u16 i = rx_ring->next_to_use;
149*4882a593Smuzhiyun dma_addr_t dma;
150*4882a593Smuzhiyun bool ok = true;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* nothing to do */
153*4882a593Smuzhiyun if (!count)
154*4882a593Smuzhiyun return true;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun rx_desc = IXGBE_RX_DESC(rx_ring, i);
157*4882a593Smuzhiyun bi = &rx_ring->rx_buffer_info[i];
158*4882a593Smuzhiyun i -= rx_ring->count;
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun do {
161*4882a593Smuzhiyun bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool);
162*4882a593Smuzhiyun if (!bi->xdp) {
163*4882a593Smuzhiyun ok = false;
164*4882a593Smuzhiyun break;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun dma = xsk_buff_xdp_get_dma(bi->xdp);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* Refresh the desc even if buffer_addrs didn't change
170*4882a593Smuzhiyun * because each write-back erases this info.
171*4882a593Smuzhiyun */
172*4882a593Smuzhiyun rx_desc->read.pkt_addr = cpu_to_le64(dma);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun rx_desc++;
175*4882a593Smuzhiyun bi++;
176*4882a593Smuzhiyun i++;
177*4882a593Smuzhiyun if (unlikely(!i)) {
178*4882a593Smuzhiyun rx_desc = IXGBE_RX_DESC(rx_ring, 0);
179*4882a593Smuzhiyun bi = rx_ring->rx_buffer_info;
180*4882a593Smuzhiyun i -= rx_ring->count;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* clear the length for the next_to_use descriptor */
184*4882a593Smuzhiyun rx_desc->wb.upper.length = 0;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun count--;
187*4882a593Smuzhiyun } while (count);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun i += rx_ring->count;
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun if (rx_ring->next_to_use != i) {
192*4882a593Smuzhiyun rx_ring->next_to_use = i;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* Force memory writes to complete before letting h/w
195*4882a593Smuzhiyun * know there are new descriptors to fetch. (Only
196*4882a593Smuzhiyun * applicable for weak-ordered memory model archs,
197*4882a593Smuzhiyun * such as IA-64).
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun wmb();
200*4882a593Smuzhiyun writel(i, rx_ring->tail);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun return ok;
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
ixgbe_construct_skb_zc(struct ixgbe_ring * rx_ring,struct ixgbe_rx_buffer * bi)206*4882a593Smuzhiyun static struct sk_buff *ixgbe_construct_skb_zc(struct ixgbe_ring *rx_ring,
207*4882a593Smuzhiyun struct ixgbe_rx_buffer *bi)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun unsigned int metasize = bi->xdp->data - bi->xdp->data_meta;
210*4882a593Smuzhiyun unsigned int datasize = bi->xdp->data_end - bi->xdp->data;
211*4882a593Smuzhiyun struct sk_buff *skb;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* allocate a skb to store the frags */
214*4882a593Smuzhiyun skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
215*4882a593Smuzhiyun bi->xdp->data_end - bi->xdp->data_hard_start,
216*4882a593Smuzhiyun GFP_ATOMIC | __GFP_NOWARN);
217*4882a593Smuzhiyun if (unlikely(!skb))
218*4882a593Smuzhiyun return NULL;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun skb_reserve(skb, bi->xdp->data - bi->xdp->data_hard_start);
221*4882a593Smuzhiyun memcpy(__skb_put(skb, datasize), bi->xdp->data, datasize);
222*4882a593Smuzhiyun if (metasize)
223*4882a593Smuzhiyun skb_metadata_set(skb, metasize);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun xsk_buff_free(bi->xdp);
226*4882a593Smuzhiyun bi->xdp = NULL;
227*4882a593Smuzhiyun return skb;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
ixgbe_inc_ntc(struct ixgbe_ring * rx_ring)230*4882a593Smuzhiyun static void ixgbe_inc_ntc(struct ixgbe_ring *rx_ring)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun u32 ntc = rx_ring->next_to_clean + 1;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun ntc = (ntc < rx_ring->count) ? ntc : 0;
235*4882a593Smuzhiyun rx_ring->next_to_clean = ntc;
236*4882a593Smuzhiyun prefetch(IXGBE_RX_DESC(rx_ring, ntc));
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * rx_ring,const int budget)239*4882a593Smuzhiyun int ixgbe_clean_rx_irq_zc(struct ixgbe_q_vector *q_vector,
240*4882a593Smuzhiyun struct ixgbe_ring *rx_ring,
241*4882a593Smuzhiyun const int budget)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun unsigned int total_rx_bytes = 0, total_rx_packets = 0;
244*4882a593Smuzhiyun struct ixgbe_adapter *adapter = q_vector->adapter;
245*4882a593Smuzhiyun u16 cleaned_count = ixgbe_desc_unused(rx_ring);
246*4882a593Smuzhiyun unsigned int xdp_res, xdp_xmit = 0;
247*4882a593Smuzhiyun bool failure = false;
248*4882a593Smuzhiyun struct sk_buff *skb;
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun while (likely(total_rx_packets < budget)) {
251*4882a593Smuzhiyun union ixgbe_adv_rx_desc *rx_desc;
252*4882a593Smuzhiyun struct ixgbe_rx_buffer *bi;
253*4882a593Smuzhiyun unsigned int size;
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun /* return some buffers to hardware, one at a time is too slow */
256*4882a593Smuzhiyun if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
257*4882a593Smuzhiyun failure = failure ||
258*4882a593Smuzhiyun !ixgbe_alloc_rx_buffers_zc(rx_ring,
259*4882a593Smuzhiyun cleaned_count);
260*4882a593Smuzhiyun cleaned_count = 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean);
264*4882a593Smuzhiyun size = le16_to_cpu(rx_desc->wb.upper.length);
265*4882a593Smuzhiyun if (!size)
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* This memory barrier is needed to keep us from reading
269*4882a593Smuzhiyun * any other fields out of the rx_desc until we know the
270*4882a593Smuzhiyun * descriptor has been written back
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun dma_rmb();
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (unlikely(!ixgbe_test_staterr(rx_desc,
277*4882a593Smuzhiyun IXGBE_RXD_STAT_EOP))) {
278*4882a593Smuzhiyun struct ixgbe_rx_buffer *next_bi;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun xsk_buff_free(bi->xdp);
281*4882a593Smuzhiyun bi->xdp = NULL;
282*4882a593Smuzhiyun ixgbe_inc_ntc(rx_ring);
283*4882a593Smuzhiyun next_bi =
284*4882a593Smuzhiyun &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
285*4882a593Smuzhiyun next_bi->discard = true;
286*4882a593Smuzhiyun continue;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (unlikely(bi->discard)) {
290*4882a593Smuzhiyun xsk_buff_free(bi->xdp);
291*4882a593Smuzhiyun bi->xdp = NULL;
292*4882a593Smuzhiyun bi->discard = false;
293*4882a593Smuzhiyun ixgbe_inc_ntc(rx_ring);
294*4882a593Smuzhiyun continue;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun bi->xdp->data_end = bi->xdp->data + size;
298*4882a593Smuzhiyun xsk_buff_dma_sync_for_cpu(bi->xdp, rx_ring->xsk_pool);
299*4882a593Smuzhiyun xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (xdp_res) {
302*4882a593Smuzhiyun if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR))
303*4882a593Smuzhiyun xdp_xmit |= xdp_res;
304*4882a593Smuzhiyun else
305*4882a593Smuzhiyun xsk_buff_free(bi->xdp);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun bi->xdp = NULL;
308*4882a593Smuzhiyun total_rx_packets++;
309*4882a593Smuzhiyun total_rx_bytes += size;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun cleaned_count++;
312*4882a593Smuzhiyun ixgbe_inc_ntc(rx_ring);
313*4882a593Smuzhiyun continue;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* XDP_PASS path */
317*4882a593Smuzhiyun skb = ixgbe_construct_skb_zc(rx_ring, bi);
318*4882a593Smuzhiyun if (!skb) {
319*4882a593Smuzhiyun rx_ring->rx_stats.alloc_rx_buff_failed++;
320*4882a593Smuzhiyun break;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun cleaned_count++;
324*4882a593Smuzhiyun ixgbe_inc_ntc(rx_ring);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (eth_skb_pad(skb))
327*4882a593Smuzhiyun continue;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun total_rx_bytes += skb->len;
330*4882a593Smuzhiyun total_rx_packets++;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun ixgbe_process_skb_fields(rx_ring, rx_desc, skb);
333*4882a593Smuzhiyun ixgbe_rx_skb(q_vector, skb);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (xdp_xmit & IXGBE_XDP_REDIR)
337*4882a593Smuzhiyun xdp_do_flush_map();
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (xdp_xmit & IXGBE_XDP_TX) {
340*4882a593Smuzhiyun struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Force memory writes to complete before letting h/w
343*4882a593Smuzhiyun * know there are new descriptors to fetch.
344*4882a593Smuzhiyun */
345*4882a593Smuzhiyun wmb();
346*4882a593Smuzhiyun writel(ring->next_to_use, ring->tail);
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun u64_stats_update_begin(&rx_ring->syncp);
350*4882a593Smuzhiyun rx_ring->stats.packets += total_rx_packets;
351*4882a593Smuzhiyun rx_ring->stats.bytes += total_rx_bytes;
352*4882a593Smuzhiyun u64_stats_update_end(&rx_ring->syncp);
353*4882a593Smuzhiyun q_vector->rx.total_packets += total_rx_packets;
354*4882a593Smuzhiyun q_vector->rx.total_bytes += total_rx_bytes;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) {
357*4882a593Smuzhiyun if (failure || rx_ring->next_to_clean == rx_ring->next_to_use)
358*4882a593Smuzhiyun xsk_set_rx_need_wakeup(rx_ring->xsk_pool);
359*4882a593Smuzhiyun else
360*4882a593Smuzhiyun xsk_clear_rx_need_wakeup(rx_ring->xsk_pool);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun return (int)total_rx_packets;
363*4882a593Smuzhiyun }
364*4882a593Smuzhiyun return failure ? budget : (int)total_rx_packets;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
ixgbe_xsk_clean_rx_ring(struct ixgbe_ring * rx_ring)367*4882a593Smuzhiyun void ixgbe_xsk_clean_rx_ring(struct ixgbe_ring *rx_ring)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct ixgbe_rx_buffer *bi;
370*4882a593Smuzhiyun u16 i;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun for (i = 0; i < rx_ring->count; i++) {
373*4882a593Smuzhiyun bi = &rx_ring->rx_buffer_info[i];
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun if (!bi->xdp)
376*4882a593Smuzhiyun continue;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun xsk_buff_free(bi->xdp);
379*4882a593Smuzhiyun bi->xdp = NULL;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
ixgbe_xmit_zc(struct ixgbe_ring * xdp_ring,unsigned int budget)383*4882a593Smuzhiyun static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
384*4882a593Smuzhiyun {
385*4882a593Smuzhiyun struct xsk_buff_pool *pool = xdp_ring->xsk_pool;
386*4882a593Smuzhiyun union ixgbe_adv_tx_desc *tx_desc = NULL;
387*4882a593Smuzhiyun struct ixgbe_tx_buffer *tx_bi;
388*4882a593Smuzhiyun bool work_done = true;
389*4882a593Smuzhiyun struct xdp_desc desc;
390*4882a593Smuzhiyun dma_addr_t dma;
391*4882a593Smuzhiyun u32 cmd_type;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun while (budget-- > 0) {
394*4882a593Smuzhiyun if (unlikely(!ixgbe_desc_unused(xdp_ring))) {
395*4882a593Smuzhiyun work_done = false;
396*4882a593Smuzhiyun break;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun if (!netif_carrier_ok(xdp_ring->netdev))
400*4882a593Smuzhiyun break;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun if (!xsk_tx_peek_desc(pool, &desc))
403*4882a593Smuzhiyun break;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun dma = xsk_buff_raw_get_dma(pool, desc.addr);
406*4882a593Smuzhiyun xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
409*4882a593Smuzhiyun tx_bi->bytecount = desc.len;
410*4882a593Smuzhiyun tx_bi->xdpf = NULL;
411*4882a593Smuzhiyun tx_bi->gso_segs = 1;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use);
414*4882a593Smuzhiyun tx_desc->read.buffer_addr = cpu_to_le64(dma);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* put descriptor type bits */
417*4882a593Smuzhiyun cmd_type = IXGBE_ADVTXD_DTYP_DATA |
418*4882a593Smuzhiyun IXGBE_ADVTXD_DCMD_DEXT |
419*4882a593Smuzhiyun IXGBE_ADVTXD_DCMD_IFCS;
420*4882a593Smuzhiyun cmd_type |= desc.len | IXGBE_TXD_CMD;
421*4882a593Smuzhiyun tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
422*4882a593Smuzhiyun tx_desc->read.olinfo_status =
423*4882a593Smuzhiyun cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun xdp_ring->next_to_use++;
426*4882a593Smuzhiyun if (xdp_ring->next_to_use == xdp_ring->count)
427*4882a593Smuzhiyun xdp_ring->next_to_use = 0;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (tx_desc) {
431*4882a593Smuzhiyun ixgbe_xdp_ring_update_tail(xdp_ring);
432*4882a593Smuzhiyun xsk_tx_release(pool);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun return !!budget && work_done;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * tx_bi)438*4882a593Smuzhiyun static void ixgbe_clean_xdp_tx_buffer(struct ixgbe_ring *tx_ring,
439*4882a593Smuzhiyun struct ixgbe_tx_buffer *tx_bi)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun xdp_return_frame(tx_bi->xdpf);
442*4882a593Smuzhiyun dma_unmap_single(tx_ring->dev,
443*4882a593Smuzhiyun dma_unmap_addr(tx_bi, dma),
444*4882a593Smuzhiyun dma_unmap_len(tx_bi, len), DMA_TO_DEVICE);
445*4882a593Smuzhiyun dma_unmap_len_set(tx_bi, len, 0);
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun
ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector * q_vector,struct ixgbe_ring * tx_ring,int napi_budget)448*4882a593Smuzhiyun bool ixgbe_clean_xdp_tx_irq(struct ixgbe_q_vector *q_vector,
449*4882a593Smuzhiyun struct ixgbe_ring *tx_ring, int napi_budget)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
452*4882a593Smuzhiyun unsigned int total_packets = 0, total_bytes = 0;
453*4882a593Smuzhiyun struct xsk_buff_pool *pool = tx_ring->xsk_pool;
454*4882a593Smuzhiyun union ixgbe_adv_tx_desc *tx_desc;
455*4882a593Smuzhiyun struct ixgbe_tx_buffer *tx_bi;
456*4882a593Smuzhiyun u32 xsk_frames = 0;
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun tx_bi = &tx_ring->tx_buffer_info[ntc];
459*4882a593Smuzhiyun tx_desc = IXGBE_TX_DESC(tx_ring, ntc);
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun while (ntc != ntu) {
462*4882a593Smuzhiyun if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
463*4882a593Smuzhiyun break;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun total_bytes += tx_bi->bytecount;
466*4882a593Smuzhiyun total_packets += tx_bi->gso_segs;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun if (tx_bi->xdpf)
469*4882a593Smuzhiyun ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
470*4882a593Smuzhiyun else
471*4882a593Smuzhiyun xsk_frames++;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun tx_bi->xdpf = NULL;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun tx_bi++;
476*4882a593Smuzhiyun tx_desc++;
477*4882a593Smuzhiyun ntc++;
478*4882a593Smuzhiyun if (unlikely(ntc == tx_ring->count)) {
479*4882a593Smuzhiyun ntc = 0;
480*4882a593Smuzhiyun tx_bi = tx_ring->tx_buffer_info;
481*4882a593Smuzhiyun tx_desc = IXGBE_TX_DESC(tx_ring, 0);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun /* issue prefetch for next Tx descriptor */
485*4882a593Smuzhiyun prefetch(tx_desc);
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun tx_ring->next_to_clean = ntc;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun u64_stats_update_begin(&tx_ring->syncp);
491*4882a593Smuzhiyun tx_ring->stats.bytes += total_bytes;
492*4882a593Smuzhiyun tx_ring->stats.packets += total_packets;
493*4882a593Smuzhiyun u64_stats_update_end(&tx_ring->syncp);
494*4882a593Smuzhiyun q_vector->tx.total_bytes += total_bytes;
495*4882a593Smuzhiyun q_vector->tx.total_packets += total_packets;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (xsk_frames)
498*4882a593Smuzhiyun xsk_tx_completed(pool, xsk_frames);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun if (xsk_uses_need_wakeup(pool))
501*4882a593Smuzhiyun xsk_set_tx_need_wakeup(pool);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
ixgbe_xsk_wakeup(struct net_device * dev,u32 qid,u32 flags)506*4882a593Smuzhiyun int ixgbe_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct ixgbe_adapter *adapter = netdev_priv(dev);
509*4882a593Smuzhiyun struct ixgbe_ring *ring;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun if (test_bit(__IXGBE_DOWN, &adapter->state))
512*4882a593Smuzhiyun return -ENETDOWN;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!READ_ONCE(adapter->xdp_prog))
515*4882a593Smuzhiyun return -ENXIO;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun if (qid >= adapter->num_xdp_queues)
518*4882a593Smuzhiyun return -ENXIO;
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun ring = adapter->xdp_ring[qid];
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (test_bit(__IXGBE_TX_DISABLED, &ring->state))
523*4882a593Smuzhiyun return -ENETDOWN;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun if (!ring->xsk_pool)
526*4882a593Smuzhiyun return -ENXIO;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
529*4882a593Smuzhiyun u64 eics = BIT_ULL(ring->q_vector->v_idx);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun ixgbe_irq_rearm_queues(adapter, eics);
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun return 0;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
ixgbe_xsk_clean_tx_ring(struct ixgbe_ring * tx_ring)537*4882a593Smuzhiyun void ixgbe_xsk_clean_tx_ring(struct ixgbe_ring *tx_ring)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use;
540*4882a593Smuzhiyun struct xsk_buff_pool *pool = tx_ring->xsk_pool;
541*4882a593Smuzhiyun struct ixgbe_tx_buffer *tx_bi;
542*4882a593Smuzhiyun u32 xsk_frames = 0;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun while (ntc != ntu) {
545*4882a593Smuzhiyun tx_bi = &tx_ring->tx_buffer_info[ntc];
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (tx_bi->xdpf)
548*4882a593Smuzhiyun ixgbe_clean_xdp_tx_buffer(tx_ring, tx_bi);
549*4882a593Smuzhiyun else
550*4882a593Smuzhiyun xsk_frames++;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun tx_bi->xdpf = NULL;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun ntc++;
555*4882a593Smuzhiyun if (ntc == tx_ring->count)
556*4882a593Smuzhiyun ntc = 0;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun if (xsk_frames)
560*4882a593Smuzhiyun xsk_tx_completed(pool, xsk_frames);
561*4882a593Smuzhiyun }
562