1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2*4882a593Smuzhiyun /* QLogic qede NIC Driver
3*4882a593Smuzhiyun * Copyright (c) 2015-2017 QLogic Corporation
4*4882a593Smuzhiyun * Copyright (c) 2019-2020 Marvell International Ltd.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/netdevice.h>
8*4882a593Smuzhiyun #include <linux/etherdevice.h>
9*4882a593Smuzhiyun #include <linux/skbuff.h>
10*4882a593Smuzhiyun #include <linux/bpf_trace.h>
11*4882a593Smuzhiyun #include <net/udp_tunnel.h>
12*4882a593Smuzhiyun #include <linux/ip.h>
13*4882a593Smuzhiyun #include <net/ipv6.h>
14*4882a593Smuzhiyun #include <net/tcp.h>
15*4882a593Smuzhiyun #include <linux/if_ether.h>
16*4882a593Smuzhiyun #include <linux/if_vlan.h>
17*4882a593Smuzhiyun #include <net/ip6_checksum.h>
18*4882a593Smuzhiyun #include "qede_ptp.h"
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include <linux/qed/qed_if.h>
21*4882a593Smuzhiyun #include "qede.h"
22*4882a593Smuzhiyun /*********************************
23*4882a593Smuzhiyun * Content also used by slowpath *
24*4882a593Smuzhiyun *********************************/
25*4882a593Smuzhiyun
qede_alloc_rx_buffer(struct qede_rx_queue * rxq,bool allow_lazy)26*4882a593Smuzhiyun int qede_alloc_rx_buffer(struct qede_rx_queue *rxq, bool allow_lazy)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun struct sw_rx_data *sw_rx_data;
29*4882a593Smuzhiyun struct eth_rx_bd *rx_bd;
30*4882a593Smuzhiyun dma_addr_t mapping;
31*4882a593Smuzhiyun struct page *data;
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* In case lazy-allocation is allowed, postpone allocation until the
34*4882a593Smuzhiyun * end of the NAPI run. We'd still need to make sure the Rx ring has
35*4882a593Smuzhiyun * sufficient buffers to guarantee an additional Rx interrupt.
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun if (allow_lazy && likely(rxq->filled_buffers > 12)) {
38*4882a593Smuzhiyun rxq->filled_buffers--;
39*4882a593Smuzhiyun return 0;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun data = alloc_pages(GFP_ATOMIC, 0);
43*4882a593Smuzhiyun if (unlikely(!data))
44*4882a593Smuzhiyun return -ENOMEM;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Map the entire page as it would be used
47*4882a593Smuzhiyun * for multiple RX buffer segment size mapping.
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun mapping = dma_map_page(rxq->dev, data, 0,
50*4882a593Smuzhiyun PAGE_SIZE, rxq->data_direction);
51*4882a593Smuzhiyun if (unlikely(dma_mapping_error(rxq->dev, mapping))) {
52*4882a593Smuzhiyun __free_page(data);
53*4882a593Smuzhiyun return -ENOMEM;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun sw_rx_data = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
57*4882a593Smuzhiyun sw_rx_data->page_offset = 0;
58*4882a593Smuzhiyun sw_rx_data->data = data;
59*4882a593Smuzhiyun sw_rx_data->mapping = mapping;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Advance PROD and get BD pointer */
62*4882a593Smuzhiyun rx_bd = (struct eth_rx_bd *)qed_chain_produce(&rxq->rx_bd_ring);
63*4882a593Smuzhiyun WARN_ON(!rx_bd);
64*4882a593Smuzhiyun rx_bd->addr.hi = cpu_to_le32(upper_32_bits(mapping));
65*4882a593Smuzhiyun rx_bd->addr.lo = cpu_to_le32(lower_32_bits(mapping) +
66*4882a593Smuzhiyun rxq->rx_headroom);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun rxq->sw_rx_prod++;
69*4882a593Smuzhiyun rxq->filled_buffers++;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun return 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* Unmap the data and free skb */
qede_free_tx_pkt(struct qede_dev * edev,struct qede_tx_queue * txq,int * len)75*4882a593Smuzhiyun int qede_free_tx_pkt(struct qede_dev *edev, struct qede_tx_queue *txq, int *len)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun u16 idx = txq->sw_tx_cons;
78*4882a593Smuzhiyun struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
79*4882a593Smuzhiyun struct eth_tx_1st_bd *first_bd;
80*4882a593Smuzhiyun struct eth_tx_bd *tx_data_bd;
81*4882a593Smuzhiyun int bds_consumed = 0;
82*4882a593Smuzhiyun int nbds;
83*4882a593Smuzhiyun bool data_split = txq->sw_tx_ring.skbs[idx].flags & QEDE_TSO_SPLIT_BD;
84*4882a593Smuzhiyun int i, split_bd_len = 0;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (unlikely(!skb)) {
87*4882a593Smuzhiyun DP_ERR(edev,
88*4882a593Smuzhiyun "skb is null for txq idx=%d txq->sw_tx_cons=%d txq->sw_tx_prod=%d\n",
89*4882a593Smuzhiyun idx, txq->sw_tx_cons, txq->sw_tx_prod);
90*4882a593Smuzhiyun return -1;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun *len = skb->len;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun first_bd = (struct eth_tx_1st_bd *)qed_chain_consume(&txq->tx_pbl);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun bds_consumed++;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun nbds = first_bd->data.nbds;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun if (data_split) {
102*4882a593Smuzhiyun struct eth_tx_bd *split = (struct eth_tx_bd *)
103*4882a593Smuzhiyun qed_chain_consume(&txq->tx_pbl);
104*4882a593Smuzhiyun split_bd_len = BD_UNMAP_LEN(split);
105*4882a593Smuzhiyun bds_consumed++;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun dma_unmap_single(&edev->pdev->dev, BD_UNMAP_ADDR(first_bd),
108*4882a593Smuzhiyun BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Unmap the data of the skb frags */
111*4882a593Smuzhiyun for (i = 0; i < skb_shinfo(skb)->nr_frags; i++, bds_consumed++) {
112*4882a593Smuzhiyun tx_data_bd = (struct eth_tx_bd *)
113*4882a593Smuzhiyun qed_chain_consume(&txq->tx_pbl);
114*4882a593Smuzhiyun dma_unmap_page(&edev->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
115*4882a593Smuzhiyun BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun while (bds_consumed++ < nbds)
119*4882a593Smuzhiyun qed_chain_consume(&txq->tx_pbl);
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* Free skb */
122*4882a593Smuzhiyun dev_kfree_skb_any(skb);
123*4882a593Smuzhiyun txq->sw_tx_ring.skbs[idx].skb = NULL;
124*4882a593Smuzhiyun txq->sw_tx_ring.skbs[idx].flags = 0;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Unmap the data and free skb when mapping failed during start_xmit */
qede_free_failed_tx_pkt(struct qede_tx_queue * txq,struct eth_tx_1st_bd * first_bd,int nbd,bool data_split)130*4882a593Smuzhiyun static void qede_free_failed_tx_pkt(struct qede_tx_queue *txq,
131*4882a593Smuzhiyun struct eth_tx_1st_bd *first_bd,
132*4882a593Smuzhiyun int nbd, bool data_split)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun u16 idx = txq->sw_tx_prod;
135*4882a593Smuzhiyun struct sk_buff *skb = txq->sw_tx_ring.skbs[idx].skb;
136*4882a593Smuzhiyun struct eth_tx_bd *tx_data_bd;
137*4882a593Smuzhiyun int i, split_bd_len = 0;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* Return prod to its position before this skb was handled */
140*4882a593Smuzhiyun qed_chain_set_prod(&txq->tx_pbl,
141*4882a593Smuzhiyun le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun first_bd = (struct eth_tx_1st_bd *)qed_chain_produce(&txq->tx_pbl);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun if (data_split) {
146*4882a593Smuzhiyun struct eth_tx_bd *split = (struct eth_tx_bd *)
147*4882a593Smuzhiyun qed_chain_produce(&txq->tx_pbl);
148*4882a593Smuzhiyun split_bd_len = BD_UNMAP_LEN(split);
149*4882a593Smuzhiyun nbd--;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun dma_unmap_single(txq->dev, BD_UNMAP_ADDR(first_bd),
153*4882a593Smuzhiyun BD_UNMAP_LEN(first_bd) + split_bd_len, DMA_TO_DEVICE);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* Unmap the data of the skb frags */
156*4882a593Smuzhiyun for (i = 0; i < nbd; i++) {
157*4882a593Smuzhiyun tx_data_bd = (struct eth_tx_bd *)
158*4882a593Smuzhiyun qed_chain_produce(&txq->tx_pbl);
159*4882a593Smuzhiyun if (tx_data_bd->nbytes)
160*4882a593Smuzhiyun dma_unmap_page(txq->dev,
161*4882a593Smuzhiyun BD_UNMAP_ADDR(tx_data_bd),
162*4882a593Smuzhiyun BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* Return again prod to its position before this skb was handled */
166*4882a593Smuzhiyun qed_chain_set_prod(&txq->tx_pbl,
167*4882a593Smuzhiyun le16_to_cpu(txq->tx_db.data.bd_prod), first_bd);
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* Free skb */
170*4882a593Smuzhiyun dev_kfree_skb_any(skb);
171*4882a593Smuzhiyun txq->sw_tx_ring.skbs[idx].skb = NULL;
172*4882a593Smuzhiyun txq->sw_tx_ring.skbs[idx].flags = 0;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
qede_xmit_type(struct sk_buff * skb,int * ipv6_ext)175*4882a593Smuzhiyun static u32 qede_xmit_type(struct sk_buff *skb, int *ipv6_ext)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun u32 rc = XMIT_L4_CSUM;
178*4882a593Smuzhiyun __be16 l3_proto;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun if (skb->ip_summed != CHECKSUM_PARTIAL)
181*4882a593Smuzhiyun return XMIT_PLAIN;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun l3_proto = vlan_get_protocol(skb);
184*4882a593Smuzhiyun if (l3_proto == htons(ETH_P_IPV6) &&
185*4882a593Smuzhiyun (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
186*4882a593Smuzhiyun *ipv6_ext = 1;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun if (skb->encapsulation) {
189*4882a593Smuzhiyun rc |= XMIT_ENC;
190*4882a593Smuzhiyun if (skb_is_gso(skb)) {
191*4882a593Smuzhiyun unsigned short gso_type = skb_shinfo(skb)->gso_type;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if ((gso_type & SKB_GSO_UDP_TUNNEL_CSUM) ||
194*4882a593Smuzhiyun (gso_type & SKB_GSO_GRE_CSUM))
195*4882a593Smuzhiyun rc |= XMIT_ENC_GSO_L4_CSUM;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun rc |= XMIT_LSO;
198*4882a593Smuzhiyun return rc;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (skb_is_gso(skb))
203*4882a593Smuzhiyun rc |= XMIT_LSO;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return rc;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
qede_set_params_for_ipv6_ext(struct sk_buff * skb,struct eth_tx_2nd_bd * second_bd,struct eth_tx_3rd_bd * third_bd)208*4882a593Smuzhiyun static void qede_set_params_for_ipv6_ext(struct sk_buff *skb,
209*4882a593Smuzhiyun struct eth_tx_2nd_bd *second_bd,
210*4882a593Smuzhiyun struct eth_tx_3rd_bd *third_bd)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun u8 l4_proto;
213*4882a593Smuzhiyun u16 bd2_bits1 = 0, bd2_bits2 = 0;
214*4882a593Smuzhiyun
215*4882a593Smuzhiyun bd2_bits1 |= (1 << ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun bd2_bits2 |= ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) &
218*4882a593Smuzhiyun ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
219*4882a593Smuzhiyun << ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun bd2_bits1 |= (ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
222*4882a593Smuzhiyun ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun if (vlan_get_protocol(skb) == htons(ETH_P_IPV6))
225*4882a593Smuzhiyun l4_proto = ipv6_hdr(skb)->nexthdr;
226*4882a593Smuzhiyun else
227*4882a593Smuzhiyun l4_proto = ip_hdr(skb)->protocol;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun if (l4_proto == IPPROTO_UDP)
230*4882a593Smuzhiyun bd2_bits1 |= 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun if (third_bd)
233*4882a593Smuzhiyun third_bd->data.bitfields |=
234*4882a593Smuzhiyun cpu_to_le16(((tcp_hdrlen(skb) / 4) &
235*4882a593Smuzhiyun ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK) <<
236*4882a593Smuzhiyun ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun second_bd->data.bitfields1 = cpu_to_le16(bd2_bits1);
239*4882a593Smuzhiyun second_bd->data.bitfields2 = cpu_to_le16(bd2_bits2);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
map_frag_to_bd(struct qede_tx_queue * txq,skb_frag_t * frag,struct eth_tx_bd * bd)242*4882a593Smuzhiyun static int map_frag_to_bd(struct qede_tx_queue *txq,
243*4882a593Smuzhiyun skb_frag_t *frag, struct eth_tx_bd *bd)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun dma_addr_t mapping;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* Map skb non-linear frag data for DMA */
248*4882a593Smuzhiyun mapping = skb_frag_dma_map(txq->dev, frag, 0,
249*4882a593Smuzhiyun skb_frag_size(frag), DMA_TO_DEVICE);
250*4882a593Smuzhiyun if (unlikely(dma_mapping_error(txq->dev, mapping)))
251*4882a593Smuzhiyun return -ENOMEM;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Setup the data pointer of the frag data */
254*4882a593Smuzhiyun BD_SET_UNMAP_ADDR_LEN(bd, mapping, skb_frag_size(frag));
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun return 0;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun
qede_get_skb_hlen(struct sk_buff * skb,bool is_encap_pkt)259*4882a593Smuzhiyun static u16 qede_get_skb_hlen(struct sk_buff *skb, bool is_encap_pkt)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun if (is_encap_pkt)
262*4882a593Smuzhiyun return (skb_inner_transport_header(skb) +
263*4882a593Smuzhiyun inner_tcp_hdrlen(skb) - skb->data);
264*4882a593Smuzhiyun else
265*4882a593Smuzhiyun return (skb_transport_header(skb) +
266*4882a593Smuzhiyun tcp_hdrlen(skb) - skb->data);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* +2 for 1st BD for headers and 2nd BD for headlen (if required) */
270*4882a593Smuzhiyun #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
qede_pkt_req_lin(struct sk_buff * skb,u8 xmit_type)271*4882a593Smuzhiyun static bool qede_pkt_req_lin(struct sk_buff *skb, u8 xmit_type)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun int allowed_frags = ETH_TX_MAX_BDS_PER_NON_LSO_PACKET - 1;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (xmit_type & XMIT_LSO) {
276*4882a593Smuzhiyun int hlen;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun hlen = qede_get_skb_hlen(skb, xmit_type & XMIT_ENC);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /* linear payload would require its own BD */
281*4882a593Smuzhiyun if (skb_headlen(skb) > hlen)
282*4882a593Smuzhiyun allowed_frags--;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return (skb_shinfo(skb)->nr_frags > allowed_frags);
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun #endif
288*4882a593Smuzhiyun
qede_update_tx_producer(struct qede_tx_queue * txq)289*4882a593Smuzhiyun static inline void qede_update_tx_producer(struct qede_tx_queue *txq)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun /* wmb makes sure that the BDs data is updated before updating the
292*4882a593Smuzhiyun * producer, otherwise FW may read old data from the BDs.
293*4882a593Smuzhiyun */
294*4882a593Smuzhiyun wmb();
295*4882a593Smuzhiyun barrier();
296*4882a593Smuzhiyun writel(txq->tx_db.raw, txq->doorbell_addr);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* Fence required to flush the write combined buffer, since another
299*4882a593Smuzhiyun * CPU may write to the same doorbell address and data may be lost
300*4882a593Smuzhiyun * due to relaxed order nature of write combined bar.
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun wmb();
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
qede_xdp_xmit(struct qede_tx_queue * txq,dma_addr_t dma,u16 pad,u16 len,struct page * page,struct xdp_frame * xdpf)305*4882a593Smuzhiyun static int qede_xdp_xmit(struct qede_tx_queue *txq, dma_addr_t dma, u16 pad,
306*4882a593Smuzhiyun u16 len, struct page *page, struct xdp_frame *xdpf)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct eth_tx_1st_bd *bd;
309*4882a593Smuzhiyun struct sw_tx_xdp *xdp;
310*4882a593Smuzhiyun u16 val;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (unlikely(qed_chain_get_elem_used(&txq->tx_pbl) >=
313*4882a593Smuzhiyun txq->num_tx_buffers)) {
314*4882a593Smuzhiyun txq->stopped_cnt++;
315*4882a593Smuzhiyun return -ENOMEM;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun bd = qed_chain_produce(&txq->tx_pbl);
319*4882a593Smuzhiyun bd->data.nbds = 1;
320*4882a593Smuzhiyun bd->data.bd_flags.bitfields = BIT(ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun val = (len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
323*4882a593Smuzhiyun ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun bd->data.bitfields = cpu_to_le16(val);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /* We can safely ignore the offset, as it's 0 for XDP */
328*4882a593Smuzhiyun BD_SET_UNMAP_ADDR_LEN(bd, dma + pad, len);
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun xdp = txq->sw_tx_ring.xdp + txq->sw_tx_prod;
331*4882a593Smuzhiyun xdp->mapping = dma;
332*4882a593Smuzhiyun xdp->page = page;
333*4882a593Smuzhiyun xdp->xdpf = xdpf;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
qede_xdp_transmit(struct net_device * dev,int n_frames,struct xdp_frame ** frames,u32 flags)340*4882a593Smuzhiyun int qede_xdp_transmit(struct net_device *dev, int n_frames,
341*4882a593Smuzhiyun struct xdp_frame **frames, u32 flags)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun struct qede_dev *edev = netdev_priv(dev);
344*4882a593Smuzhiyun struct device *dmadev = &edev->pdev->dev;
345*4882a593Smuzhiyun struct qede_tx_queue *xdp_tx;
346*4882a593Smuzhiyun struct xdp_frame *xdpf;
347*4882a593Smuzhiyun dma_addr_t mapping;
348*4882a593Smuzhiyun int i, drops = 0;
349*4882a593Smuzhiyun u16 xdp_prod;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
352*4882a593Smuzhiyun return -EINVAL;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (unlikely(!netif_running(dev)))
355*4882a593Smuzhiyun return -ENETDOWN;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun i = smp_processor_id() % edev->total_xdp_queues;
358*4882a593Smuzhiyun xdp_tx = edev->fp_array[i].xdp_tx;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun spin_lock(&xdp_tx->xdp_tx_lock);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun for (i = 0; i < n_frames; i++) {
363*4882a593Smuzhiyun xdpf = frames[i];
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
366*4882a593Smuzhiyun DMA_TO_DEVICE);
367*4882a593Smuzhiyun if (unlikely(dma_mapping_error(dmadev, mapping))) {
368*4882a593Smuzhiyun xdp_return_frame_rx_napi(xdpf);
369*4882a593Smuzhiyun drops++;
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun continue;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
375*4882a593Smuzhiyun NULL, xdpf))) {
376*4882a593Smuzhiyun xdp_return_frame_rx_napi(xdpf);
377*4882a593Smuzhiyun drops++;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun if (flags & XDP_XMIT_FLUSH) {
382*4882a593Smuzhiyun xdp_prod = qed_chain_get_prod_idx(&xdp_tx->tx_pbl);
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
385*4882a593Smuzhiyun qede_update_tx_producer(xdp_tx);
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun spin_unlock(&xdp_tx->xdp_tx_lock);
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return n_frames - drops;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
qede_txq_has_work(struct qede_tx_queue * txq)393*4882a593Smuzhiyun int qede_txq_has_work(struct qede_tx_queue *txq)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun u16 hw_bd_cons;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Tell compiler that consumer and producer can change */
398*4882a593Smuzhiyun barrier();
399*4882a593Smuzhiyun hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
400*4882a593Smuzhiyun if (qed_chain_get_cons_idx(&txq->tx_pbl) == hw_bd_cons + 1)
401*4882a593Smuzhiyun return 0;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
qede_xdp_tx_int(struct qede_dev * edev,struct qede_tx_queue * txq)406*4882a593Smuzhiyun static void qede_xdp_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct sw_tx_xdp *xdp_info, *xdp_arr = txq->sw_tx_ring.xdp;
409*4882a593Smuzhiyun struct device *dev = &edev->pdev->dev;
410*4882a593Smuzhiyun struct xdp_frame *xdpf;
411*4882a593Smuzhiyun u16 hw_bd_cons;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
414*4882a593Smuzhiyun barrier();
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
417*4882a593Smuzhiyun xdp_info = xdp_arr + txq->sw_tx_cons;
418*4882a593Smuzhiyun xdpf = xdp_info->xdpf;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (xdpf) {
421*4882a593Smuzhiyun dma_unmap_single(dev, xdp_info->mapping, xdpf->len,
422*4882a593Smuzhiyun DMA_TO_DEVICE);
423*4882a593Smuzhiyun xdp_return_frame(xdpf);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun xdp_info->xdpf = NULL;
426*4882a593Smuzhiyun } else {
427*4882a593Smuzhiyun dma_unmap_page(dev, xdp_info->mapping, PAGE_SIZE,
428*4882a593Smuzhiyun DMA_BIDIRECTIONAL);
429*4882a593Smuzhiyun __free_page(xdp_info->page);
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun qed_chain_consume(&txq->tx_pbl);
433*4882a593Smuzhiyun txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
434*4882a593Smuzhiyun txq->xmit_pkts++;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
qede_tx_int(struct qede_dev * edev,struct qede_tx_queue * txq)438*4882a593Smuzhiyun static int qede_tx_int(struct qede_dev *edev, struct qede_tx_queue *txq)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun unsigned int pkts_compl = 0, bytes_compl = 0;
441*4882a593Smuzhiyun struct netdev_queue *netdev_txq;
442*4882a593Smuzhiyun u16 hw_bd_cons;
443*4882a593Smuzhiyun int rc;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun netdev_txq = netdev_get_tx_queue(edev->ndev, txq->ndev_txq_id);
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun hw_bd_cons = le16_to_cpu(*txq->hw_cons_ptr);
448*4882a593Smuzhiyun barrier();
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun while (hw_bd_cons != qed_chain_get_cons_idx(&txq->tx_pbl)) {
451*4882a593Smuzhiyun int len = 0;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun rc = qede_free_tx_pkt(edev, txq, &len);
454*4882a593Smuzhiyun if (rc) {
455*4882a593Smuzhiyun DP_NOTICE(edev, "hw_bd_cons = %d, chain_cons=%d\n",
456*4882a593Smuzhiyun hw_bd_cons,
457*4882a593Smuzhiyun qed_chain_get_cons_idx(&txq->tx_pbl));
458*4882a593Smuzhiyun break;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun bytes_compl += len;
462*4882a593Smuzhiyun pkts_compl++;
463*4882a593Smuzhiyun txq->sw_tx_cons = (txq->sw_tx_cons + 1) % txq->num_tx_buffers;
464*4882a593Smuzhiyun txq->xmit_pkts++;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun netdev_tx_completed_queue(netdev_txq, pkts_compl, bytes_compl);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun /* Need to make the tx_bd_cons update visible to start_xmit()
470*4882a593Smuzhiyun * before checking for netif_tx_queue_stopped(). Without the
471*4882a593Smuzhiyun * memory barrier, there is a small possibility that
472*4882a593Smuzhiyun * start_xmit() will miss it and cause the queue to be stopped
473*4882a593Smuzhiyun * forever.
474*4882a593Smuzhiyun * On the other hand we need an rmb() here to ensure the proper
475*4882a593Smuzhiyun * ordering of bit testing in the following
476*4882a593Smuzhiyun * netif_tx_queue_stopped(txq) call.
477*4882a593Smuzhiyun */
478*4882a593Smuzhiyun smp_mb();
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (unlikely(netif_tx_queue_stopped(netdev_txq))) {
481*4882a593Smuzhiyun /* Taking tx_lock is needed to prevent reenabling the queue
482*4882a593Smuzhiyun * while it's empty. This could have happen if rx_action() gets
483*4882a593Smuzhiyun * suspended in qede_tx_int() after the condition before
484*4882a593Smuzhiyun * netif_tx_wake_queue(), while tx_action (qede_start_xmit()):
485*4882a593Smuzhiyun *
486*4882a593Smuzhiyun * stops the queue->sees fresh tx_bd_cons->releases the queue->
487*4882a593Smuzhiyun * sends some packets consuming the whole queue again->
488*4882a593Smuzhiyun * stops the queue
489*4882a593Smuzhiyun */
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun __netif_tx_lock(netdev_txq, smp_processor_id());
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if ((netif_tx_queue_stopped(netdev_txq)) &&
494*4882a593Smuzhiyun (edev->state == QEDE_STATE_OPEN) &&
495*4882a593Smuzhiyun (qed_chain_get_elem_left(&txq->tx_pbl)
496*4882a593Smuzhiyun >= (MAX_SKB_FRAGS + 1))) {
497*4882a593Smuzhiyun netif_tx_wake_queue(netdev_txq);
498*4882a593Smuzhiyun DP_VERBOSE(edev, NETIF_MSG_TX_DONE,
499*4882a593Smuzhiyun "Wake queue was called\n");
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun __netif_tx_unlock(netdev_txq);
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun return 0;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
qede_has_rx_work(struct qede_rx_queue * rxq)508*4882a593Smuzhiyun bool qede_has_rx_work(struct qede_rx_queue *rxq)
509*4882a593Smuzhiyun {
510*4882a593Smuzhiyun u16 hw_comp_cons, sw_comp_cons;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /* Tell compiler that status block fields can change */
513*4882a593Smuzhiyun barrier();
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
516*4882a593Smuzhiyun sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun return hw_comp_cons != sw_comp_cons;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
qede_rx_bd_ring_consume(struct qede_rx_queue * rxq)521*4882a593Smuzhiyun static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun qed_chain_consume(&rxq->rx_bd_ring);
524*4882a593Smuzhiyun rxq->sw_rx_cons++;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* This function reuses the buffer(from an offset) from
528*4882a593Smuzhiyun * consumer index to producer index in the bd ring
529*4882a593Smuzhiyun */
qede_reuse_page(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons)530*4882a593Smuzhiyun static inline void qede_reuse_page(struct qede_rx_queue *rxq,
531*4882a593Smuzhiyun struct sw_rx_data *curr_cons)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct eth_rx_bd *rx_bd_prod = qed_chain_produce(&rxq->rx_bd_ring);
534*4882a593Smuzhiyun struct sw_rx_data *curr_prod;
535*4882a593Smuzhiyun dma_addr_t new_mapping;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun curr_prod = &rxq->sw_rx_ring[rxq->sw_rx_prod & NUM_RX_BDS_MAX];
538*4882a593Smuzhiyun *curr_prod = *curr_cons;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun new_mapping = curr_prod->mapping + curr_prod->page_offset;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun rx_bd_prod->addr.hi = cpu_to_le32(upper_32_bits(new_mapping));
543*4882a593Smuzhiyun rx_bd_prod->addr.lo = cpu_to_le32(lower_32_bits(new_mapping) +
544*4882a593Smuzhiyun rxq->rx_headroom);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun rxq->sw_rx_prod++;
547*4882a593Smuzhiyun curr_cons->data = NULL;
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun /* In case of allocation failures reuse buffers
551*4882a593Smuzhiyun * from consumer index to produce buffers for firmware
552*4882a593Smuzhiyun */
qede_recycle_rx_bd_ring(struct qede_rx_queue * rxq,u8 count)553*4882a593Smuzhiyun void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct sw_rx_data *curr_cons;
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun for (; count > 0; count--) {
558*4882a593Smuzhiyun curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
559*4882a593Smuzhiyun qede_reuse_page(rxq, curr_cons);
560*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun
qede_realloc_rx_buffer(struct qede_rx_queue * rxq,struct sw_rx_data * curr_cons)564*4882a593Smuzhiyun static inline int qede_realloc_rx_buffer(struct qede_rx_queue *rxq,
565*4882a593Smuzhiyun struct sw_rx_data *curr_cons)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun /* Move to the next segment in the page */
568*4882a593Smuzhiyun curr_cons->page_offset += rxq->rx_buf_seg_size;
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun if (curr_cons->page_offset == PAGE_SIZE) {
571*4882a593Smuzhiyun if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
572*4882a593Smuzhiyun /* Since we failed to allocate new buffer
573*4882a593Smuzhiyun * current buffer can be used again.
574*4882a593Smuzhiyun */
575*4882a593Smuzhiyun curr_cons->page_offset -= rxq->rx_buf_seg_size;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun return -ENOMEM;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun dma_unmap_page(rxq->dev, curr_cons->mapping,
581*4882a593Smuzhiyun PAGE_SIZE, rxq->data_direction);
582*4882a593Smuzhiyun } else {
583*4882a593Smuzhiyun /* Increment refcount of the page as we don't want
584*4882a593Smuzhiyun * network stack to take the ownership of the page
585*4882a593Smuzhiyun * which can be recycled multiple times by the driver.
586*4882a593Smuzhiyun */
587*4882a593Smuzhiyun page_ref_inc(curr_cons->data);
588*4882a593Smuzhiyun qede_reuse_page(rxq, curr_cons);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun return 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
qede_update_rx_prod(struct qede_dev * edev,struct qede_rx_queue * rxq)594*4882a593Smuzhiyun void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun u16 bd_prod = qed_chain_get_prod_idx(&rxq->rx_bd_ring);
597*4882a593Smuzhiyun u16 cqe_prod = qed_chain_get_prod_idx(&rxq->rx_comp_ring);
598*4882a593Smuzhiyun struct eth_rx_prod_data rx_prods = {0};
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /* Update producers */
601*4882a593Smuzhiyun rx_prods.bd_prod = cpu_to_le16(bd_prod);
602*4882a593Smuzhiyun rx_prods.cqe_prod = cpu_to_le16(cqe_prod);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /* Make sure that the BD and SGE data is updated before updating the
605*4882a593Smuzhiyun * producers since FW might read the BD/SGE right after the producer
606*4882a593Smuzhiyun * is updated.
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun wmb();
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
611*4882a593Smuzhiyun (u32 *)&rx_prods);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun
qede_get_rxhash(struct sk_buff * skb,u8 bitfields,__le32 rss_hash)614*4882a593Smuzhiyun static void qede_get_rxhash(struct sk_buff *skb, u8 bitfields, __le32 rss_hash)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun enum pkt_hash_types hash_type = PKT_HASH_TYPE_NONE;
617*4882a593Smuzhiyun enum rss_hash_type htype;
618*4882a593Smuzhiyun u32 hash = 0;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun htype = GET_FIELD(bitfields, ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
621*4882a593Smuzhiyun if (htype) {
622*4882a593Smuzhiyun hash_type = ((htype == RSS_HASH_TYPE_IPV4) ||
623*4882a593Smuzhiyun (htype == RSS_HASH_TYPE_IPV6)) ?
624*4882a593Smuzhiyun PKT_HASH_TYPE_L3 : PKT_HASH_TYPE_L4;
625*4882a593Smuzhiyun hash = le32_to_cpu(rss_hash);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun skb_set_hash(skb, hash, hash_type);
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
qede_set_skb_csum(struct sk_buff * skb,u8 csum_flag)630*4882a593Smuzhiyun static void qede_set_skb_csum(struct sk_buff *skb, u8 csum_flag)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun skb_checksum_none_assert(skb);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun if (csum_flag & QEDE_CSUM_UNNECESSARY)
635*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (csum_flag & QEDE_TUNN_CSUM_UNNECESSARY) {
638*4882a593Smuzhiyun skb->csum_level = 1;
639*4882a593Smuzhiyun skb->encapsulation = 1;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
qede_skb_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct sk_buff * skb,u16 vlan_tag)643*4882a593Smuzhiyun static inline void qede_skb_receive(struct qede_dev *edev,
644*4882a593Smuzhiyun struct qede_fastpath *fp,
645*4882a593Smuzhiyun struct qede_rx_queue *rxq,
646*4882a593Smuzhiyun struct sk_buff *skb, u16 vlan_tag)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun if (vlan_tag)
649*4882a593Smuzhiyun __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
650*4882a593Smuzhiyun
651*4882a593Smuzhiyun napi_gro_receive(&fp->napi, skb);
652*4882a593Smuzhiyun }
653*4882a593Smuzhiyun
qede_set_gro_params(struct qede_dev * edev,struct sk_buff * skb,struct eth_fast_path_rx_tpa_start_cqe * cqe)654*4882a593Smuzhiyun static void qede_set_gro_params(struct qede_dev *edev,
655*4882a593Smuzhiyun struct sk_buff *skb,
656*4882a593Smuzhiyun struct eth_fast_path_rx_tpa_start_cqe *cqe)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun u16 parsing_flags = le16_to_cpu(cqe->pars_flags.flags);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun if (((parsing_flags >> PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) &
661*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_L3TYPE_MASK) == 2)
662*4882a593Smuzhiyun skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
663*4882a593Smuzhiyun else
664*4882a593Smuzhiyun skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun skb_shinfo(skb)->gso_size = __le16_to_cpu(cqe->len_on_first_bd) -
667*4882a593Smuzhiyun cqe->header_len;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
qede_fill_frag_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,u8 tpa_agg_index,u16 len_on_bd)670*4882a593Smuzhiyun static int qede_fill_frag_skb(struct qede_dev *edev,
671*4882a593Smuzhiyun struct qede_rx_queue *rxq,
672*4882a593Smuzhiyun u8 tpa_agg_index, u16 len_on_bd)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct sw_rx_data *current_bd = &rxq->sw_rx_ring[rxq->sw_rx_cons &
675*4882a593Smuzhiyun NUM_RX_BDS_MAX];
676*4882a593Smuzhiyun struct qede_agg_info *tpa_info = &rxq->tpa_info[tpa_agg_index];
677*4882a593Smuzhiyun struct sk_buff *skb = tpa_info->skb;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
680*4882a593Smuzhiyun goto out;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* Add one frag and update the appropriate fields in the skb */
683*4882a593Smuzhiyun skb_fill_page_desc(skb, tpa_info->frag_id++,
684*4882a593Smuzhiyun current_bd->data,
685*4882a593Smuzhiyun current_bd->page_offset + rxq->rx_headroom,
686*4882a593Smuzhiyun len_on_bd);
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun if (unlikely(qede_realloc_rx_buffer(rxq, current_bd))) {
689*4882a593Smuzhiyun /* Incr page ref count to reuse on allocation failure
690*4882a593Smuzhiyun * so that it doesn't get freed while freeing SKB.
691*4882a593Smuzhiyun */
692*4882a593Smuzhiyun page_ref_inc(current_bd->data);
693*4882a593Smuzhiyun goto out;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun skb->data_len += len_on_bd;
699*4882a593Smuzhiyun skb->truesize += rxq->rx_buf_seg_size;
700*4882a593Smuzhiyun skb->len += len_on_bd;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun return 0;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun out:
705*4882a593Smuzhiyun tpa_info->state = QEDE_AGG_STATE_ERROR;
706*4882a593Smuzhiyun qede_recycle_rx_bd_ring(rxq, 1);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun return -ENOMEM;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
qede_tunn_exist(u16 flag)711*4882a593Smuzhiyun static bool qede_tunn_exist(u16 flag)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun return !!(flag & (PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
714*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT));
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
qede_check_tunn_csum(u16 flag)717*4882a593Smuzhiyun static u8 qede_check_tunn_csum(u16 flag)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun u16 csum_flag = 0;
720*4882a593Smuzhiyun u8 tcsum = 0;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (flag & (PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
723*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT))
724*4882a593Smuzhiyun csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
725*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
728*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
729*4882a593Smuzhiyun csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
730*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
731*4882a593Smuzhiyun tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
735*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
736*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
737*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (csum_flag & flag)
740*4882a593Smuzhiyun return QEDE_CSUM_ERROR;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun return QEDE_CSUM_UNNECESSARY | tcsum;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun static inline struct sk_buff *
qede_build_skb(struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad)746*4882a593Smuzhiyun qede_build_skb(struct qede_rx_queue *rxq,
747*4882a593Smuzhiyun struct sw_rx_data *bd, u16 len, u16 pad)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun struct sk_buff *skb;
750*4882a593Smuzhiyun void *buf;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun buf = page_address(bd->data) + bd->page_offset;
753*4882a593Smuzhiyun skb = build_skb(buf, rxq->rx_buf_seg_size);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun if (unlikely(!skb))
756*4882a593Smuzhiyun return NULL;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun skb_reserve(skb, pad);
759*4882a593Smuzhiyun skb_put(skb, len);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun return skb;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun static struct sk_buff *
qede_tpa_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad,bool alloc_skb)765*4882a593Smuzhiyun qede_tpa_rx_build_skb(struct qede_dev *edev,
766*4882a593Smuzhiyun struct qede_rx_queue *rxq,
767*4882a593Smuzhiyun struct sw_rx_data *bd, u16 len, u16 pad,
768*4882a593Smuzhiyun bool alloc_skb)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun struct sk_buff *skb;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun skb = qede_build_skb(rxq, bd, len, pad);
773*4882a593Smuzhiyun bd->page_offset += rxq->rx_buf_seg_size;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (bd->page_offset == PAGE_SIZE) {
776*4882a593Smuzhiyun if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
777*4882a593Smuzhiyun DP_NOTICE(edev,
778*4882a593Smuzhiyun "Failed to allocate RX buffer for tpa start\n");
779*4882a593Smuzhiyun bd->page_offset -= rxq->rx_buf_seg_size;
780*4882a593Smuzhiyun page_ref_inc(bd->data);
781*4882a593Smuzhiyun dev_kfree_skb_any(skb);
782*4882a593Smuzhiyun return NULL;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun } else {
785*4882a593Smuzhiyun page_ref_inc(bd->data);
786*4882a593Smuzhiyun qede_reuse_page(rxq, bd);
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* We've consumed the first BD and prepared an SKB */
790*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun return skb;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun static struct sk_buff *
qede_rx_build_skb(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sw_rx_data * bd,u16 len,u16 pad)796*4882a593Smuzhiyun qede_rx_build_skb(struct qede_dev *edev,
797*4882a593Smuzhiyun struct qede_rx_queue *rxq,
798*4882a593Smuzhiyun struct sw_rx_data *bd, u16 len, u16 pad)
799*4882a593Smuzhiyun {
800*4882a593Smuzhiyun struct sk_buff *skb = NULL;
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun /* For smaller frames still need to allocate skb, memcpy
803*4882a593Smuzhiyun * data and benefit in reusing the page segment instead of
804*4882a593Smuzhiyun * un-mapping it.
805*4882a593Smuzhiyun */
806*4882a593Smuzhiyun if ((len + pad <= edev->rx_copybreak)) {
807*4882a593Smuzhiyun unsigned int offset = bd->page_offset + pad;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun skb = netdev_alloc_skb(edev->ndev, QEDE_RX_HDR_SIZE);
810*4882a593Smuzhiyun if (unlikely(!skb))
811*4882a593Smuzhiyun return NULL;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun skb_reserve(skb, pad);
814*4882a593Smuzhiyun skb_put_data(skb, page_address(bd->data) + offset, len);
815*4882a593Smuzhiyun qede_reuse_page(rxq, bd);
816*4882a593Smuzhiyun goto out;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun skb = qede_build_skb(rxq, bd, len, pad);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (unlikely(qede_realloc_rx_buffer(rxq, bd))) {
822*4882a593Smuzhiyun /* Incr page ref count to reuse on allocation failure so
823*4882a593Smuzhiyun * that it doesn't get freed while freeing SKB [as its
824*4882a593Smuzhiyun * already mapped there].
825*4882a593Smuzhiyun */
826*4882a593Smuzhiyun page_ref_inc(bd->data);
827*4882a593Smuzhiyun dev_kfree_skb_any(skb);
828*4882a593Smuzhiyun return NULL;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun out:
831*4882a593Smuzhiyun /* We've consumed the first BD and prepared an SKB */
832*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun return skb;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
qede_tpa_start(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_start_cqe * cqe)837*4882a593Smuzhiyun static void qede_tpa_start(struct qede_dev *edev,
838*4882a593Smuzhiyun struct qede_rx_queue *rxq,
839*4882a593Smuzhiyun struct eth_fast_path_rx_tpa_start_cqe *cqe)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun struct qede_agg_info *tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
842*4882a593Smuzhiyun struct sw_rx_data *sw_rx_data_cons;
843*4882a593Smuzhiyun u16 pad;
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun sw_rx_data_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS_MAX];
846*4882a593Smuzhiyun pad = cqe->placement_offset + rxq->rx_headroom;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun tpa_info->skb = qede_tpa_rx_build_skb(edev, rxq, sw_rx_data_cons,
849*4882a593Smuzhiyun le16_to_cpu(cqe->len_on_first_bd),
850*4882a593Smuzhiyun pad, false);
851*4882a593Smuzhiyun tpa_info->buffer.page_offset = sw_rx_data_cons->page_offset;
852*4882a593Smuzhiyun tpa_info->buffer.mapping = sw_rx_data_cons->mapping;
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (unlikely(!tpa_info->skb)) {
855*4882a593Smuzhiyun DP_NOTICE(edev, "Failed to allocate SKB for gro\n");
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun /* Consume from ring but do not produce since
858*4882a593Smuzhiyun * this might be used by FW still, it will be re-used
859*4882a593Smuzhiyun * at TPA end.
860*4882a593Smuzhiyun */
861*4882a593Smuzhiyun tpa_info->tpa_start_fail = true;
862*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
863*4882a593Smuzhiyun tpa_info->state = QEDE_AGG_STATE_ERROR;
864*4882a593Smuzhiyun goto cons_buf;
865*4882a593Smuzhiyun }
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun tpa_info->frag_id = 0;
868*4882a593Smuzhiyun tpa_info->state = QEDE_AGG_STATE_START;
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun if ((le16_to_cpu(cqe->pars_flags.flags) >>
871*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT) &
872*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK)
873*4882a593Smuzhiyun tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
874*4882a593Smuzhiyun else
875*4882a593Smuzhiyun tpa_info->vlan_tag = 0;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun qede_get_rxhash(tpa_info->skb, cqe->bitfields, cqe->rss_hash);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun /* This is needed in order to enable forwarding support */
880*4882a593Smuzhiyun qede_set_gro_params(edev, tpa_info->skb, cqe);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun cons_buf: /* We still need to handle bd_len_list to consume buffers */
883*4882a593Smuzhiyun if (likely(cqe->bw_ext_bd_len_list[0]))
884*4882a593Smuzhiyun qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
885*4882a593Smuzhiyun le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (unlikely(cqe->bw_ext_bd_len_list[1])) {
888*4882a593Smuzhiyun DP_ERR(edev,
889*4882a593Smuzhiyun "Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
890*4882a593Smuzhiyun tpa_info->state = QEDE_AGG_STATE_ERROR;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun }
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun #ifdef CONFIG_INET
qede_gro_ip_csum(struct sk_buff * skb)895*4882a593Smuzhiyun static void qede_gro_ip_csum(struct sk_buff *skb)
896*4882a593Smuzhiyun {
897*4882a593Smuzhiyun const struct iphdr *iph = ip_hdr(skb);
898*4882a593Smuzhiyun struct tcphdr *th;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun skb_set_transport_header(skb, sizeof(struct iphdr));
901*4882a593Smuzhiyun th = tcp_hdr(skb);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
904*4882a593Smuzhiyun iph->saddr, iph->daddr, 0);
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun tcp_gro_complete(skb);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
qede_gro_ipv6_csum(struct sk_buff * skb)909*4882a593Smuzhiyun static void qede_gro_ipv6_csum(struct sk_buff *skb)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun struct ipv6hdr *iph = ipv6_hdr(skb);
912*4882a593Smuzhiyun struct tcphdr *th;
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun skb_set_transport_header(skb, sizeof(struct ipv6hdr));
915*4882a593Smuzhiyun th = tcp_hdr(skb);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
918*4882a593Smuzhiyun &iph->saddr, &iph->daddr, 0);
919*4882a593Smuzhiyun tcp_gro_complete(skb);
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun #endif
922*4882a593Smuzhiyun
qede_gro_receive(struct qede_dev * edev,struct qede_fastpath * fp,struct sk_buff * skb,u16 vlan_tag)923*4882a593Smuzhiyun static void qede_gro_receive(struct qede_dev *edev,
924*4882a593Smuzhiyun struct qede_fastpath *fp,
925*4882a593Smuzhiyun struct sk_buff *skb,
926*4882a593Smuzhiyun u16 vlan_tag)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun /* FW can send a single MTU sized packet from gro flow
929*4882a593Smuzhiyun * due to aggregation timeout/last segment etc. which
930*4882a593Smuzhiyun * is not expected to be a gro packet. If a skb has zero
931*4882a593Smuzhiyun * frags then simply push it in the stack as non gso skb.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun if (unlikely(!skb->data_len)) {
934*4882a593Smuzhiyun skb_shinfo(skb)->gso_type = 0;
935*4882a593Smuzhiyun skb_shinfo(skb)->gso_size = 0;
936*4882a593Smuzhiyun goto send_skb;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun #ifdef CONFIG_INET
940*4882a593Smuzhiyun if (skb_shinfo(skb)->gso_size) {
941*4882a593Smuzhiyun skb_reset_network_header(skb);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun switch (skb->protocol) {
944*4882a593Smuzhiyun case htons(ETH_P_IP):
945*4882a593Smuzhiyun qede_gro_ip_csum(skb);
946*4882a593Smuzhiyun break;
947*4882a593Smuzhiyun case htons(ETH_P_IPV6):
948*4882a593Smuzhiyun qede_gro_ipv6_csum(skb);
949*4882a593Smuzhiyun break;
950*4882a593Smuzhiyun default:
951*4882a593Smuzhiyun DP_ERR(edev,
952*4882a593Smuzhiyun "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
953*4882a593Smuzhiyun ntohs(skb->protocol));
954*4882a593Smuzhiyun }
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun #endif
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun send_skb:
959*4882a593Smuzhiyun skb_record_rx_queue(skb, fp->rxq->rxq_id);
960*4882a593Smuzhiyun qede_skb_receive(edev, fp, fp->rxq, skb, vlan_tag);
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
qede_tpa_cont(struct qede_dev * edev,struct qede_rx_queue * rxq,struct eth_fast_path_rx_tpa_cont_cqe * cqe)963*4882a593Smuzhiyun static inline void qede_tpa_cont(struct qede_dev *edev,
964*4882a593Smuzhiyun struct qede_rx_queue *rxq,
965*4882a593Smuzhiyun struct eth_fast_path_rx_tpa_cont_cqe *cqe)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun int i;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun for (i = 0; cqe->len_list[i]; i++)
970*4882a593Smuzhiyun qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
971*4882a593Smuzhiyun le16_to_cpu(cqe->len_list[i]));
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun if (unlikely(i > 1))
974*4882a593Smuzhiyun DP_ERR(edev,
975*4882a593Smuzhiyun "Strange - TPA cont with more than a single len_list entry\n");
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
qede_tpa_end(struct qede_dev * edev,struct qede_fastpath * fp,struct eth_fast_path_rx_tpa_end_cqe * cqe)978*4882a593Smuzhiyun static int qede_tpa_end(struct qede_dev *edev,
979*4882a593Smuzhiyun struct qede_fastpath *fp,
980*4882a593Smuzhiyun struct eth_fast_path_rx_tpa_end_cqe *cqe)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun struct qede_rx_queue *rxq = fp->rxq;
983*4882a593Smuzhiyun struct qede_agg_info *tpa_info;
984*4882a593Smuzhiyun struct sk_buff *skb;
985*4882a593Smuzhiyun int i;
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun tpa_info = &rxq->tpa_info[cqe->tpa_agg_index];
988*4882a593Smuzhiyun skb = tpa_info->skb;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun if (tpa_info->buffer.page_offset == PAGE_SIZE)
991*4882a593Smuzhiyun dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
992*4882a593Smuzhiyun PAGE_SIZE, rxq->data_direction);
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun for (i = 0; cqe->len_list[i]; i++)
995*4882a593Smuzhiyun qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
996*4882a593Smuzhiyun le16_to_cpu(cqe->len_list[i]));
997*4882a593Smuzhiyun if (unlikely(i > 1))
998*4882a593Smuzhiyun DP_ERR(edev,
999*4882a593Smuzhiyun "Strange - TPA emd with more than a single len_list entry\n");
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun if (unlikely(tpa_info->state != QEDE_AGG_STATE_START))
1002*4882a593Smuzhiyun goto err;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /* Sanity */
1005*4882a593Smuzhiyun if (unlikely(cqe->num_of_bds != tpa_info->frag_id + 1))
1006*4882a593Smuzhiyun DP_ERR(edev,
1007*4882a593Smuzhiyun "Strange - TPA had %02x BDs, but SKB has only %d frags\n",
1008*4882a593Smuzhiyun cqe->num_of_bds, tpa_info->frag_id);
1009*4882a593Smuzhiyun if (unlikely(skb->len != le16_to_cpu(cqe->total_packet_len)))
1010*4882a593Smuzhiyun DP_ERR(edev,
1011*4882a593Smuzhiyun "Strange - total packet len [cqe] is %4x but SKB has len %04x\n",
1012*4882a593Smuzhiyun le16_to_cpu(cqe->total_packet_len), skb->len);
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun /* Finalize the SKB */
1015*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, edev->ndev);
1016*4882a593Smuzhiyun skb->ip_summed = CHECKSUM_UNNECESSARY;
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
1019*4882a593Smuzhiyun * to skb_shinfo(skb)->gso_segs
1020*4882a593Smuzhiyun */
1021*4882a593Smuzhiyun NAPI_GRO_CB(skb)->count = le16_to_cpu(cqe->num_of_coalesced_segs);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun qede_gro_receive(edev, fp, skb, tpa_info->vlan_tag);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun tpa_info->state = QEDE_AGG_STATE_NONE;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun return 1;
1028*4882a593Smuzhiyun err:
1029*4882a593Smuzhiyun tpa_info->state = QEDE_AGG_STATE_NONE;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (tpa_info->tpa_start_fail) {
1032*4882a593Smuzhiyun qede_reuse_page(rxq, &tpa_info->buffer);
1033*4882a593Smuzhiyun tpa_info->tpa_start_fail = false;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun dev_kfree_skb_any(tpa_info->skb);
1037*4882a593Smuzhiyun tpa_info->skb = NULL;
1038*4882a593Smuzhiyun return 0;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
qede_check_notunn_csum(u16 flag)1041*4882a593Smuzhiyun static u8 qede_check_notunn_csum(u16 flag)
1042*4882a593Smuzhiyun {
1043*4882a593Smuzhiyun u16 csum_flag = 0;
1044*4882a593Smuzhiyun u8 csum = 0;
1045*4882a593Smuzhiyun
1046*4882a593Smuzhiyun if (flag & (PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
1047*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT)) {
1048*4882a593Smuzhiyun csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
1049*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
1050*4882a593Smuzhiyun csum = QEDE_CSUM_UNNECESSARY;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1054*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun if (csum_flag & flag)
1057*4882a593Smuzhiyun return QEDE_CSUM_ERROR;
1058*4882a593Smuzhiyun
1059*4882a593Smuzhiyun return csum;
1060*4882a593Smuzhiyun }
1061*4882a593Smuzhiyun
qede_check_csum(u16 flag)1062*4882a593Smuzhiyun static u8 qede_check_csum(u16 flag)
1063*4882a593Smuzhiyun {
1064*4882a593Smuzhiyun if (!qede_tunn_exist(flag))
1065*4882a593Smuzhiyun return qede_check_notunn_csum(flag);
1066*4882a593Smuzhiyun else
1067*4882a593Smuzhiyun return qede_check_tunn_csum(flag);
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun
qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe * cqe,u16 flag)1070*4882a593Smuzhiyun static bool qede_pkt_is_ip_fragmented(struct eth_fast_path_rx_reg_cqe *cqe,
1071*4882a593Smuzhiyun u16 flag)
1072*4882a593Smuzhiyun {
1073*4882a593Smuzhiyun u8 tun_pars_flg = cqe->tunnel_pars_flags.flags;
1074*4882a593Smuzhiyun
1075*4882a593Smuzhiyun if ((tun_pars_flg & (ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK <<
1076*4882a593Smuzhiyun ETH_TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT)) ||
1077*4882a593Smuzhiyun (flag & (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1078*4882a593Smuzhiyun PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT)))
1079*4882a593Smuzhiyun return true;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun return false;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun /* Return true iff packet is to be passed to stack */
qede_rx_xdp(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,struct bpf_prog * prog,struct sw_rx_data * bd,struct eth_fast_path_rx_reg_cqe * cqe,u16 * data_offset,u16 * len)1085*4882a593Smuzhiyun static bool qede_rx_xdp(struct qede_dev *edev,
1086*4882a593Smuzhiyun struct qede_fastpath *fp,
1087*4882a593Smuzhiyun struct qede_rx_queue *rxq,
1088*4882a593Smuzhiyun struct bpf_prog *prog,
1089*4882a593Smuzhiyun struct sw_rx_data *bd,
1090*4882a593Smuzhiyun struct eth_fast_path_rx_reg_cqe *cqe,
1091*4882a593Smuzhiyun u16 *data_offset, u16 *len)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun struct xdp_buff xdp;
1094*4882a593Smuzhiyun enum xdp_action act;
1095*4882a593Smuzhiyun
1096*4882a593Smuzhiyun xdp.data_hard_start = page_address(bd->data);
1097*4882a593Smuzhiyun xdp.data = xdp.data_hard_start + *data_offset;
1098*4882a593Smuzhiyun xdp_set_data_meta_invalid(&xdp);
1099*4882a593Smuzhiyun xdp.data_end = xdp.data + *len;
1100*4882a593Smuzhiyun xdp.rxq = &rxq->xdp_rxq;
1101*4882a593Smuzhiyun xdp.frame_sz = rxq->rx_buf_seg_size; /* PAGE_SIZE when XDP enabled */
1102*4882a593Smuzhiyun
1103*4882a593Smuzhiyun /* Queues always have a full reset currently, so for the time
1104*4882a593Smuzhiyun * being until there's atomic program replace just mark read
1105*4882a593Smuzhiyun * side for map helpers.
1106*4882a593Smuzhiyun */
1107*4882a593Smuzhiyun rcu_read_lock();
1108*4882a593Smuzhiyun act = bpf_prog_run_xdp(prog, &xdp);
1109*4882a593Smuzhiyun rcu_read_unlock();
1110*4882a593Smuzhiyun
1111*4882a593Smuzhiyun /* Recalculate, as XDP might have changed the headers */
1112*4882a593Smuzhiyun *data_offset = xdp.data - xdp.data_hard_start;
1113*4882a593Smuzhiyun *len = xdp.data_end - xdp.data;
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun if (act == XDP_PASS)
1116*4882a593Smuzhiyun return true;
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun /* Count number of packets not to be passed to stack */
1119*4882a593Smuzhiyun rxq->xdp_no_pass++;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun switch (act) {
1122*4882a593Smuzhiyun case XDP_TX:
1123*4882a593Smuzhiyun /* We need the replacement buffer before transmit. */
1124*4882a593Smuzhiyun if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1125*4882a593Smuzhiyun qede_recycle_rx_bd_ring(rxq, 1);
1126*4882a593Smuzhiyun
1127*4882a593Smuzhiyun trace_xdp_exception(edev->ndev, prog, act);
1128*4882a593Smuzhiyun break;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /* Now if there's a transmission problem, we'd still have to
1132*4882a593Smuzhiyun * throw current buffer, as replacement was already allocated.
1133*4882a593Smuzhiyun */
1134*4882a593Smuzhiyun if (unlikely(qede_xdp_xmit(fp->xdp_tx, bd->mapping,
1135*4882a593Smuzhiyun *data_offset, *len, bd->data,
1136*4882a593Smuzhiyun NULL))) {
1137*4882a593Smuzhiyun dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1138*4882a593Smuzhiyun rxq->data_direction);
1139*4882a593Smuzhiyun __free_page(bd->data);
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun trace_xdp_exception(edev->ndev, prog, act);
1142*4882a593Smuzhiyun } else {
1143*4882a593Smuzhiyun dma_sync_single_for_device(rxq->dev,
1144*4882a593Smuzhiyun bd->mapping + *data_offset,
1145*4882a593Smuzhiyun *len, rxq->data_direction);
1146*4882a593Smuzhiyun fp->xdp_xmit |= QEDE_XDP_TX;
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun /* Regardless, we've consumed an Rx BD */
1150*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
1151*4882a593Smuzhiyun break;
1152*4882a593Smuzhiyun case XDP_REDIRECT:
1153*4882a593Smuzhiyun /* We need the replacement buffer before transmit. */
1154*4882a593Smuzhiyun if (unlikely(qede_alloc_rx_buffer(rxq, true))) {
1155*4882a593Smuzhiyun qede_recycle_rx_bd_ring(rxq, 1);
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun trace_xdp_exception(edev->ndev, prog, act);
1158*4882a593Smuzhiyun break;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun dma_unmap_page(rxq->dev, bd->mapping, PAGE_SIZE,
1162*4882a593Smuzhiyun rxq->data_direction);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun if (unlikely(xdp_do_redirect(edev->ndev, &xdp, prog)))
1165*4882a593Smuzhiyun DP_NOTICE(edev, "Failed to redirect the packet\n");
1166*4882a593Smuzhiyun else
1167*4882a593Smuzhiyun fp->xdp_xmit |= QEDE_XDP_REDIRECT;
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
1170*4882a593Smuzhiyun break;
1171*4882a593Smuzhiyun default:
1172*4882a593Smuzhiyun bpf_warn_invalid_xdp_action(act);
1173*4882a593Smuzhiyun fallthrough;
1174*4882a593Smuzhiyun case XDP_ABORTED:
1175*4882a593Smuzhiyun trace_xdp_exception(edev->ndev, prog, act);
1176*4882a593Smuzhiyun fallthrough;
1177*4882a593Smuzhiyun case XDP_DROP:
1178*4882a593Smuzhiyun qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun return false;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
qede_rx_build_jumbo(struct qede_dev * edev,struct qede_rx_queue * rxq,struct sk_buff * skb,struct eth_fast_path_rx_reg_cqe * cqe,u16 first_bd_len)1184*4882a593Smuzhiyun static int qede_rx_build_jumbo(struct qede_dev *edev,
1185*4882a593Smuzhiyun struct qede_rx_queue *rxq,
1186*4882a593Smuzhiyun struct sk_buff *skb,
1187*4882a593Smuzhiyun struct eth_fast_path_rx_reg_cqe *cqe,
1188*4882a593Smuzhiyun u16 first_bd_len)
1189*4882a593Smuzhiyun {
1190*4882a593Smuzhiyun u16 pkt_len = le16_to_cpu(cqe->pkt_len);
1191*4882a593Smuzhiyun struct sw_rx_data *bd;
1192*4882a593Smuzhiyun u16 bd_cons_idx;
1193*4882a593Smuzhiyun u8 num_frags;
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun pkt_len -= first_bd_len;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /* We've already used one BD for the SKB. Now take care of the rest */
1198*4882a593Smuzhiyun for (num_frags = cqe->bd_num - 1; num_frags > 0; num_frags--) {
1199*4882a593Smuzhiyun u16 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1200*4882a593Smuzhiyun pkt_len;
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun if (unlikely(!cur_size)) {
1203*4882a593Smuzhiyun DP_ERR(edev,
1204*4882a593Smuzhiyun "Still got %d BDs for mapping jumbo, but length became 0\n",
1205*4882a593Smuzhiyun num_frags);
1206*4882a593Smuzhiyun goto out;
1207*4882a593Smuzhiyun }
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* We need a replacement buffer for each BD */
1210*4882a593Smuzhiyun if (unlikely(qede_alloc_rx_buffer(rxq, true)))
1211*4882a593Smuzhiyun goto out;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun /* Now that we've allocated the replacement buffer,
1214*4882a593Smuzhiyun * we can safely consume the next BD and map it to the SKB.
1215*4882a593Smuzhiyun */
1216*4882a593Smuzhiyun bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1217*4882a593Smuzhiyun bd = &rxq->sw_rx_ring[bd_cons_idx];
1218*4882a593Smuzhiyun qede_rx_bd_ring_consume(rxq);
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun dma_unmap_page(rxq->dev, bd->mapping,
1221*4882a593Smuzhiyun PAGE_SIZE, DMA_FROM_DEVICE);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags++,
1224*4882a593Smuzhiyun bd->data, rxq->rx_headroom, cur_size);
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun skb->truesize += PAGE_SIZE;
1227*4882a593Smuzhiyun skb->data_len += cur_size;
1228*4882a593Smuzhiyun skb->len += cur_size;
1229*4882a593Smuzhiyun pkt_len -= cur_size;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun if (unlikely(pkt_len))
1233*4882a593Smuzhiyun DP_ERR(edev,
1234*4882a593Smuzhiyun "Mapped all BDs of jumbo, but still have %d bytes\n",
1235*4882a593Smuzhiyun pkt_len);
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun out:
1238*4882a593Smuzhiyun return num_frags;
1239*4882a593Smuzhiyun }
1240*4882a593Smuzhiyun
qede_rx_process_tpa_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq,union eth_rx_cqe * cqe,enum eth_rx_cqe_type type)1241*4882a593Smuzhiyun static int qede_rx_process_tpa_cqe(struct qede_dev *edev,
1242*4882a593Smuzhiyun struct qede_fastpath *fp,
1243*4882a593Smuzhiyun struct qede_rx_queue *rxq,
1244*4882a593Smuzhiyun union eth_rx_cqe *cqe,
1245*4882a593Smuzhiyun enum eth_rx_cqe_type type)
1246*4882a593Smuzhiyun {
1247*4882a593Smuzhiyun switch (type) {
1248*4882a593Smuzhiyun case ETH_RX_CQE_TYPE_TPA_START:
1249*4882a593Smuzhiyun qede_tpa_start(edev, rxq, &cqe->fast_path_tpa_start);
1250*4882a593Smuzhiyun return 0;
1251*4882a593Smuzhiyun case ETH_RX_CQE_TYPE_TPA_CONT:
1252*4882a593Smuzhiyun qede_tpa_cont(edev, rxq, &cqe->fast_path_tpa_cont);
1253*4882a593Smuzhiyun return 0;
1254*4882a593Smuzhiyun case ETH_RX_CQE_TYPE_TPA_END:
1255*4882a593Smuzhiyun return qede_tpa_end(edev, fp, &cqe->fast_path_tpa_end);
1256*4882a593Smuzhiyun default:
1257*4882a593Smuzhiyun return 0;
1258*4882a593Smuzhiyun }
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun
qede_rx_process_cqe(struct qede_dev * edev,struct qede_fastpath * fp,struct qede_rx_queue * rxq)1261*4882a593Smuzhiyun static int qede_rx_process_cqe(struct qede_dev *edev,
1262*4882a593Smuzhiyun struct qede_fastpath *fp,
1263*4882a593Smuzhiyun struct qede_rx_queue *rxq)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun struct bpf_prog *xdp_prog = READ_ONCE(rxq->xdp_prog);
1266*4882a593Smuzhiyun struct eth_fast_path_rx_reg_cqe *fp_cqe;
1267*4882a593Smuzhiyun u16 len, pad, bd_cons_idx, parse_flag;
1268*4882a593Smuzhiyun enum eth_rx_cqe_type cqe_type;
1269*4882a593Smuzhiyun union eth_rx_cqe *cqe;
1270*4882a593Smuzhiyun struct sw_rx_data *bd;
1271*4882a593Smuzhiyun struct sk_buff *skb;
1272*4882a593Smuzhiyun __le16 flags;
1273*4882a593Smuzhiyun u8 csum_flag;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun /* Get the CQE from the completion ring */
1276*4882a593Smuzhiyun cqe = (union eth_rx_cqe *)qed_chain_consume(&rxq->rx_comp_ring);
1277*4882a593Smuzhiyun cqe_type = cqe->fast_path_regular.type;
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun /* Process an unlikely slowpath event */
1280*4882a593Smuzhiyun if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
1281*4882a593Smuzhiyun struct eth_slow_path_rx_cqe *sp_cqe;
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun sp_cqe = (struct eth_slow_path_rx_cqe *)cqe;
1284*4882a593Smuzhiyun edev->ops->eth_cqe_completion(edev->cdev, fp->id, sp_cqe);
1285*4882a593Smuzhiyun return 0;
1286*4882a593Smuzhiyun }
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun /* Handle TPA cqes */
1289*4882a593Smuzhiyun if (cqe_type != ETH_RX_CQE_TYPE_REGULAR)
1290*4882a593Smuzhiyun return qede_rx_process_tpa_cqe(edev, fp, rxq, cqe, cqe_type);
1291*4882a593Smuzhiyun
1292*4882a593Smuzhiyun /* Get the data from the SW ring; Consume it only after it's evident
1293*4882a593Smuzhiyun * we wouldn't recycle it.
1294*4882a593Smuzhiyun */
1295*4882a593Smuzhiyun bd_cons_idx = rxq->sw_rx_cons & NUM_RX_BDS_MAX;
1296*4882a593Smuzhiyun bd = &rxq->sw_rx_ring[bd_cons_idx];
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun fp_cqe = &cqe->fast_path_regular;
1299*4882a593Smuzhiyun len = le16_to_cpu(fp_cqe->len_on_first_bd);
1300*4882a593Smuzhiyun pad = fp_cqe->placement_offset + rxq->rx_headroom;
1301*4882a593Smuzhiyun
1302*4882a593Smuzhiyun /* Run eBPF program if one is attached */
1303*4882a593Smuzhiyun if (xdp_prog)
1304*4882a593Smuzhiyun if (!qede_rx_xdp(edev, fp, rxq, xdp_prog, bd, fp_cqe,
1305*4882a593Smuzhiyun &pad, &len))
1306*4882a593Smuzhiyun return 0;
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /* If this is an error packet then drop it */
1309*4882a593Smuzhiyun flags = cqe->fast_path_regular.pars_flags.flags;
1310*4882a593Smuzhiyun parse_flag = le16_to_cpu(flags);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun csum_flag = qede_check_csum(parse_flag);
1313*4882a593Smuzhiyun if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
1314*4882a593Smuzhiyun if (qede_pkt_is_ip_fragmented(fp_cqe, parse_flag))
1315*4882a593Smuzhiyun rxq->rx_ip_frags++;
1316*4882a593Smuzhiyun else
1317*4882a593Smuzhiyun rxq->rx_hw_errors++;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun /* Basic validation passed; Need to prepare an SKB. This would also
1321*4882a593Smuzhiyun * guarantee to finally consume the first BD upon success.
1322*4882a593Smuzhiyun */
1323*4882a593Smuzhiyun skb = qede_rx_build_skb(edev, rxq, bd, len, pad);
1324*4882a593Smuzhiyun if (!skb) {
1325*4882a593Smuzhiyun rxq->rx_alloc_errors++;
1326*4882a593Smuzhiyun qede_recycle_rx_bd_ring(rxq, fp_cqe->bd_num);
1327*4882a593Smuzhiyun return 0;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun /* In case of Jumbo packet, several PAGE_SIZEd buffers will be pointed
1331*4882a593Smuzhiyun * by a single cqe.
1332*4882a593Smuzhiyun */
1333*4882a593Smuzhiyun if (fp_cqe->bd_num > 1) {
1334*4882a593Smuzhiyun u16 unmapped_frags = qede_rx_build_jumbo(edev, rxq, skb,
1335*4882a593Smuzhiyun fp_cqe, len);
1336*4882a593Smuzhiyun
1337*4882a593Smuzhiyun if (unlikely(unmapped_frags > 0)) {
1338*4882a593Smuzhiyun qede_recycle_rx_bd_ring(rxq, unmapped_frags);
1339*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1340*4882a593Smuzhiyun return 0;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun
1344*4882a593Smuzhiyun /* The SKB contains all the data. Now prepare meta-magic */
1345*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, edev->ndev);
1346*4882a593Smuzhiyun qede_get_rxhash(skb, fp_cqe->bitfields, fp_cqe->rss_hash);
1347*4882a593Smuzhiyun qede_set_skb_csum(skb, csum_flag);
1348*4882a593Smuzhiyun skb_record_rx_queue(skb, rxq->rxq_id);
1349*4882a593Smuzhiyun qede_ptp_record_rx_ts(edev, cqe, skb);
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun /* SKB is prepared - pass it to stack */
1352*4882a593Smuzhiyun qede_skb_receive(edev, fp, rxq, skb, le16_to_cpu(fp_cqe->vlan_tag));
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun return 1;
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun
qede_rx_int(struct qede_fastpath * fp,int budget)1357*4882a593Smuzhiyun static int qede_rx_int(struct qede_fastpath *fp, int budget)
1358*4882a593Smuzhiyun {
1359*4882a593Smuzhiyun struct qede_rx_queue *rxq = fp->rxq;
1360*4882a593Smuzhiyun struct qede_dev *edev = fp->edev;
1361*4882a593Smuzhiyun int work_done = 0, rcv_pkts = 0;
1362*4882a593Smuzhiyun u16 hw_comp_cons, sw_comp_cons;
1363*4882a593Smuzhiyun
1364*4882a593Smuzhiyun hw_comp_cons = le16_to_cpu(*rxq->hw_cons_ptr);
1365*4882a593Smuzhiyun sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1366*4882a593Smuzhiyun
1367*4882a593Smuzhiyun /* Memory barrier to prevent the CPU from doing speculative reads of CQE
1368*4882a593Smuzhiyun * / BD in the while-loop before reading hw_comp_cons. If the CQE is
1369*4882a593Smuzhiyun * read before it is written by FW, then FW writes CQE and SB, and then
1370*4882a593Smuzhiyun * the CPU reads the hw_comp_cons, it will use an old CQE.
1371*4882a593Smuzhiyun */
1372*4882a593Smuzhiyun rmb();
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun /* Loop to complete all indicated BDs */
1375*4882a593Smuzhiyun while ((sw_comp_cons != hw_comp_cons) && (work_done < budget)) {
1376*4882a593Smuzhiyun rcv_pkts += qede_rx_process_cqe(edev, fp, rxq);
1377*4882a593Smuzhiyun qed_chain_recycle_consumed(&rxq->rx_comp_ring);
1378*4882a593Smuzhiyun sw_comp_cons = qed_chain_get_cons_idx(&rxq->rx_comp_ring);
1379*4882a593Smuzhiyun work_done++;
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun rxq->rcv_pkts += rcv_pkts;
1383*4882a593Smuzhiyun
1384*4882a593Smuzhiyun /* Allocate replacement buffers */
1385*4882a593Smuzhiyun while (rxq->num_rx_buffers - rxq->filled_buffers)
1386*4882a593Smuzhiyun if (qede_alloc_rx_buffer(rxq, false))
1387*4882a593Smuzhiyun break;
1388*4882a593Smuzhiyun
1389*4882a593Smuzhiyun /* Update producers */
1390*4882a593Smuzhiyun qede_update_rx_prod(edev, rxq);
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun return work_done;
1393*4882a593Smuzhiyun }
1394*4882a593Smuzhiyun
qede_poll_is_more_work(struct qede_fastpath * fp)1395*4882a593Smuzhiyun static bool qede_poll_is_more_work(struct qede_fastpath *fp)
1396*4882a593Smuzhiyun {
1397*4882a593Smuzhiyun qed_sb_update_sb_idx(fp->sb_info);
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun /* *_has_*_work() reads the status block, thus we need to ensure that
1400*4882a593Smuzhiyun * status block indices have been actually read (qed_sb_update_sb_idx)
1401*4882a593Smuzhiyun * prior to this check (*_has_*_work) so that we won't write the
1402*4882a593Smuzhiyun * "newer" value of the status block to HW (if there was a DMA right
1403*4882a593Smuzhiyun * after qede_has_rx_work and if there is no rmb, the memory reading
1404*4882a593Smuzhiyun * (qed_sb_update_sb_idx) may be postponed to right before *_ack_sb).
1405*4882a593Smuzhiyun * In this case there will never be another interrupt until there is
1406*4882a593Smuzhiyun * another update of the status block, while there is still unhandled
1407*4882a593Smuzhiyun * work.
1408*4882a593Smuzhiyun */
1409*4882a593Smuzhiyun rmb();
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun if (likely(fp->type & QEDE_FASTPATH_RX))
1412*4882a593Smuzhiyun if (qede_has_rx_work(fp->rxq))
1413*4882a593Smuzhiyun return true;
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun if (fp->type & QEDE_FASTPATH_XDP)
1416*4882a593Smuzhiyun if (qede_txq_has_work(fp->xdp_tx))
1417*4882a593Smuzhiyun return true;
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (likely(fp->type & QEDE_FASTPATH_TX)) {
1420*4882a593Smuzhiyun int cos;
1421*4882a593Smuzhiyun
1422*4882a593Smuzhiyun for_each_cos_in_txq(fp->edev, cos) {
1423*4882a593Smuzhiyun if (qede_txq_has_work(&fp->txq[cos]))
1424*4882a593Smuzhiyun return true;
1425*4882a593Smuzhiyun }
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun return false;
1429*4882a593Smuzhiyun }
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun /*********************
1432*4882a593Smuzhiyun * NDO & API related *
1433*4882a593Smuzhiyun *********************/
qede_poll(struct napi_struct * napi,int budget)1434*4882a593Smuzhiyun int qede_poll(struct napi_struct *napi, int budget)
1435*4882a593Smuzhiyun {
1436*4882a593Smuzhiyun struct qede_fastpath *fp = container_of(napi, struct qede_fastpath,
1437*4882a593Smuzhiyun napi);
1438*4882a593Smuzhiyun struct qede_dev *edev = fp->edev;
1439*4882a593Smuzhiyun int rx_work_done = 0;
1440*4882a593Smuzhiyun u16 xdp_prod;
1441*4882a593Smuzhiyun
1442*4882a593Smuzhiyun fp->xdp_xmit = 0;
1443*4882a593Smuzhiyun
1444*4882a593Smuzhiyun if (likely(fp->type & QEDE_FASTPATH_TX)) {
1445*4882a593Smuzhiyun int cos;
1446*4882a593Smuzhiyun
1447*4882a593Smuzhiyun for_each_cos_in_txq(fp->edev, cos) {
1448*4882a593Smuzhiyun if (qede_txq_has_work(&fp->txq[cos]))
1449*4882a593Smuzhiyun qede_tx_int(edev, &fp->txq[cos]);
1450*4882a593Smuzhiyun }
1451*4882a593Smuzhiyun }
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun if ((fp->type & QEDE_FASTPATH_XDP) && qede_txq_has_work(fp->xdp_tx))
1454*4882a593Smuzhiyun qede_xdp_tx_int(edev, fp->xdp_tx);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun rx_work_done = (likely(fp->type & QEDE_FASTPATH_RX) &&
1457*4882a593Smuzhiyun qede_has_rx_work(fp->rxq)) ?
1458*4882a593Smuzhiyun qede_rx_int(fp, budget) : 0;
1459*4882a593Smuzhiyun if (rx_work_done < budget) {
1460*4882a593Smuzhiyun if (!qede_poll_is_more_work(fp)) {
1461*4882a593Smuzhiyun napi_complete_done(napi, rx_work_done);
1462*4882a593Smuzhiyun
1463*4882a593Smuzhiyun /* Update and reenable interrupts */
1464*4882a593Smuzhiyun qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
1465*4882a593Smuzhiyun } else {
1466*4882a593Smuzhiyun rx_work_done = budget;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun if (fp->xdp_xmit & QEDE_XDP_TX) {
1471*4882a593Smuzhiyun xdp_prod = qed_chain_get_prod_idx(&fp->xdp_tx->tx_pbl);
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun fp->xdp_tx->tx_db.data.bd_prod = cpu_to_le16(xdp_prod);
1474*4882a593Smuzhiyun qede_update_tx_producer(fp->xdp_tx);
1475*4882a593Smuzhiyun }
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun if (fp->xdp_xmit & QEDE_XDP_REDIRECT)
1478*4882a593Smuzhiyun xdp_do_flush_map();
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun return rx_work_done;
1481*4882a593Smuzhiyun }
1482*4882a593Smuzhiyun
qede_msix_fp_int(int irq,void * fp_cookie)1483*4882a593Smuzhiyun irqreturn_t qede_msix_fp_int(int irq, void *fp_cookie)
1484*4882a593Smuzhiyun {
1485*4882a593Smuzhiyun struct qede_fastpath *fp = fp_cookie;
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun qed_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0 /*do not update*/);
1488*4882a593Smuzhiyun
1489*4882a593Smuzhiyun napi_schedule_irqoff(&fp->napi);
1490*4882a593Smuzhiyun return IRQ_HANDLED;
1491*4882a593Smuzhiyun }
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /* Main transmit function */
qede_start_xmit(struct sk_buff * skb,struct net_device * ndev)1494*4882a593Smuzhiyun netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun struct qede_dev *edev = netdev_priv(ndev);
1497*4882a593Smuzhiyun struct netdev_queue *netdev_txq;
1498*4882a593Smuzhiyun struct qede_tx_queue *txq;
1499*4882a593Smuzhiyun struct eth_tx_1st_bd *first_bd;
1500*4882a593Smuzhiyun struct eth_tx_2nd_bd *second_bd = NULL;
1501*4882a593Smuzhiyun struct eth_tx_3rd_bd *third_bd = NULL;
1502*4882a593Smuzhiyun struct eth_tx_bd *tx_data_bd = NULL;
1503*4882a593Smuzhiyun u16 txq_index, val = 0;
1504*4882a593Smuzhiyun u8 nbd = 0;
1505*4882a593Smuzhiyun dma_addr_t mapping;
1506*4882a593Smuzhiyun int rc, frag_idx = 0, ipv6_ext = 0;
1507*4882a593Smuzhiyun u8 xmit_type;
1508*4882a593Smuzhiyun u16 idx;
1509*4882a593Smuzhiyun u16 hlen;
1510*4882a593Smuzhiyun bool data_split = false;
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun /* Get tx-queue context and netdev index */
1513*4882a593Smuzhiyun txq_index = skb_get_queue_mapping(skb);
1514*4882a593Smuzhiyun WARN_ON(txq_index >= QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc);
1515*4882a593Smuzhiyun txq = QEDE_NDEV_TXQ_ID_TO_TXQ(edev, txq_index);
1516*4882a593Smuzhiyun netdev_txq = netdev_get_tx_queue(ndev, txq_index);
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun WARN_ON(qed_chain_get_elem_left(&txq->tx_pbl) < (MAX_SKB_FRAGS + 1));
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun xmit_type = qede_xmit_type(skb, &ipv6_ext);
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET)
1523*4882a593Smuzhiyun if (qede_pkt_req_lin(skb, xmit_type)) {
1524*4882a593Smuzhiyun if (skb_linearize(skb)) {
1525*4882a593Smuzhiyun txq->tx_mem_alloc_err++;
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun dev_kfree_skb_any(skb);
1528*4882a593Smuzhiyun return NETDEV_TX_OK;
1529*4882a593Smuzhiyun }
1530*4882a593Smuzhiyun }
1531*4882a593Smuzhiyun #endif
1532*4882a593Smuzhiyun
1533*4882a593Smuzhiyun /* Fill the entry in the SW ring and the BDs in the FW ring */
1534*4882a593Smuzhiyun idx = txq->sw_tx_prod;
1535*4882a593Smuzhiyun txq->sw_tx_ring.skbs[idx].skb = skb;
1536*4882a593Smuzhiyun first_bd = (struct eth_tx_1st_bd *)
1537*4882a593Smuzhiyun qed_chain_produce(&txq->tx_pbl);
1538*4882a593Smuzhiyun memset(first_bd, 0, sizeof(*first_bd));
1539*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields =
1540*4882a593Smuzhiyun 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
1543*4882a593Smuzhiyun qede_ptp_tx_ts(edev, skb);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun /* Map skb linear data for DMA and set in the first BD */
1546*4882a593Smuzhiyun mapping = dma_map_single(txq->dev, skb->data,
1547*4882a593Smuzhiyun skb_headlen(skb), DMA_TO_DEVICE);
1548*4882a593Smuzhiyun if (unlikely(dma_mapping_error(txq->dev, mapping))) {
1549*4882a593Smuzhiyun DP_NOTICE(edev, "SKB mapping failed\n");
1550*4882a593Smuzhiyun qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1551*4882a593Smuzhiyun qede_update_tx_producer(txq);
1552*4882a593Smuzhiyun return NETDEV_TX_OK;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun nbd++;
1555*4882a593Smuzhiyun BD_SET_UNMAP_ADDR_LEN(first_bd, mapping, skb_headlen(skb));
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /* In case there is IPv6 with extension headers or LSO we need 2nd and
1558*4882a593Smuzhiyun * 3rd BDs.
1559*4882a593Smuzhiyun */
1560*4882a593Smuzhiyun if (unlikely((xmit_type & XMIT_LSO) | ipv6_ext)) {
1561*4882a593Smuzhiyun second_bd = (struct eth_tx_2nd_bd *)
1562*4882a593Smuzhiyun qed_chain_produce(&txq->tx_pbl);
1563*4882a593Smuzhiyun memset(second_bd, 0, sizeof(*second_bd));
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun nbd++;
1566*4882a593Smuzhiyun third_bd = (struct eth_tx_3rd_bd *)
1567*4882a593Smuzhiyun qed_chain_produce(&txq->tx_pbl);
1568*4882a593Smuzhiyun memset(third_bd, 0, sizeof(*third_bd));
1569*4882a593Smuzhiyun
1570*4882a593Smuzhiyun nbd++;
1571*4882a593Smuzhiyun /* We need to fill in additional data in second_bd... */
1572*4882a593Smuzhiyun tx_data_bd = (struct eth_tx_bd *)second_bd;
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun if (skb_vlan_tag_present(skb)) {
1576*4882a593Smuzhiyun first_bd->data.vlan = cpu_to_le16(skb_vlan_tag_get(skb));
1577*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |=
1578*4882a593Smuzhiyun 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun
1581*4882a593Smuzhiyun /* Fill the parsing flags & params according to the requested offload */
1582*4882a593Smuzhiyun if (xmit_type & XMIT_L4_CSUM) {
1583*4882a593Smuzhiyun /* We don't re-calculate IP checksum as it is already done by
1584*4882a593Smuzhiyun * the upper stack
1585*4882a593Smuzhiyun */
1586*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |=
1587*4882a593Smuzhiyun 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun if (xmit_type & XMIT_ENC) {
1590*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |=
1591*4882a593Smuzhiyun 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1592*4882a593Smuzhiyun
1593*4882a593Smuzhiyun val |= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1594*4882a593Smuzhiyun }
1595*4882a593Smuzhiyun
1596*4882a593Smuzhiyun /* Legacy FW had flipped behavior in regard to this bit -
1597*4882a593Smuzhiyun * I.e., needed to set to prevent FW from touching encapsulated
1598*4882a593Smuzhiyun * packets when it didn't need to.
1599*4882a593Smuzhiyun */
1600*4882a593Smuzhiyun if (unlikely(txq->is_legacy))
1601*4882a593Smuzhiyun val ^= (1 << ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT);
1602*4882a593Smuzhiyun
1603*4882a593Smuzhiyun /* If the packet is IPv6 with extension header, indicate that
1604*4882a593Smuzhiyun * to FW and pass few params, since the device cracker doesn't
1605*4882a593Smuzhiyun * support parsing IPv6 with extension header/s.
1606*4882a593Smuzhiyun */
1607*4882a593Smuzhiyun if (unlikely(ipv6_ext))
1608*4882a593Smuzhiyun qede_set_params_for_ipv6_ext(skb, second_bd, third_bd);
1609*4882a593Smuzhiyun }
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun if (xmit_type & XMIT_LSO) {
1612*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |=
1613*4882a593Smuzhiyun (1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT);
1614*4882a593Smuzhiyun third_bd->data.lso_mss =
1615*4882a593Smuzhiyun cpu_to_le16(skb_shinfo(skb)->gso_size);
1616*4882a593Smuzhiyun
1617*4882a593Smuzhiyun if (unlikely(xmit_type & XMIT_ENC)) {
1618*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |=
1619*4882a593Smuzhiyun 1 << ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1620*4882a593Smuzhiyun
1621*4882a593Smuzhiyun if (xmit_type & XMIT_ENC_GSO_L4_CSUM) {
1622*4882a593Smuzhiyun u8 tmp = ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
1623*4882a593Smuzhiyun
1624*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |= 1 << tmp;
1625*4882a593Smuzhiyun }
1626*4882a593Smuzhiyun hlen = qede_get_skb_hlen(skb, true);
1627*4882a593Smuzhiyun } else {
1628*4882a593Smuzhiyun first_bd->data.bd_flags.bitfields |=
1629*4882a593Smuzhiyun 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1630*4882a593Smuzhiyun hlen = qede_get_skb_hlen(skb, false);
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun /* @@@TBD - if will not be removed need to check */
1634*4882a593Smuzhiyun third_bd->data.bitfields |=
1635*4882a593Smuzhiyun cpu_to_le16(1 << ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
1636*4882a593Smuzhiyun
1637*4882a593Smuzhiyun /* Make life easier for FW guys who can't deal with header and
1638*4882a593Smuzhiyun * data on same BD. If we need to split, use the second bd...
1639*4882a593Smuzhiyun */
1640*4882a593Smuzhiyun if (unlikely(skb_headlen(skb) > hlen)) {
1641*4882a593Smuzhiyun DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1642*4882a593Smuzhiyun "TSO split header size is %d (%x:%x)\n",
1643*4882a593Smuzhiyun first_bd->nbytes, first_bd->addr.hi,
1644*4882a593Smuzhiyun first_bd->addr.lo);
1645*4882a593Smuzhiyun
1646*4882a593Smuzhiyun mapping = HILO_U64(le32_to_cpu(first_bd->addr.hi),
1647*4882a593Smuzhiyun le32_to_cpu(first_bd->addr.lo)) +
1648*4882a593Smuzhiyun hlen;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun BD_SET_UNMAP_ADDR_LEN(tx_data_bd, mapping,
1651*4882a593Smuzhiyun le16_to_cpu(first_bd->nbytes) -
1652*4882a593Smuzhiyun hlen);
1653*4882a593Smuzhiyun
1654*4882a593Smuzhiyun /* this marks the BD as one that has no
1655*4882a593Smuzhiyun * individual mapping
1656*4882a593Smuzhiyun */
1657*4882a593Smuzhiyun txq->sw_tx_ring.skbs[idx].flags |= QEDE_TSO_SPLIT_BD;
1658*4882a593Smuzhiyun
1659*4882a593Smuzhiyun first_bd->nbytes = cpu_to_le16(hlen);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun tx_data_bd = (struct eth_tx_bd *)third_bd;
1662*4882a593Smuzhiyun data_split = true;
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun } else {
1665*4882a593Smuzhiyun if (unlikely(skb->len > ETH_TX_MAX_NON_LSO_PKT_LEN)) {
1666*4882a593Smuzhiyun DP_ERR(edev, "Unexpected non LSO skb length = 0x%x\n", skb->len);
1667*4882a593Smuzhiyun qede_free_failed_tx_pkt(txq, first_bd, 0, false);
1668*4882a593Smuzhiyun qede_update_tx_producer(txq);
1669*4882a593Smuzhiyun return NETDEV_TX_OK;
1670*4882a593Smuzhiyun }
1671*4882a593Smuzhiyun
1672*4882a593Smuzhiyun val |= ((skb->len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK) <<
1673*4882a593Smuzhiyun ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT);
1674*4882a593Smuzhiyun }
1675*4882a593Smuzhiyun
1676*4882a593Smuzhiyun first_bd->data.bitfields = cpu_to_le16(val);
1677*4882a593Smuzhiyun
1678*4882a593Smuzhiyun /* Handle fragmented skb */
1679*4882a593Smuzhiyun /* special handle for frags inside 2nd and 3rd bds.. */
1680*4882a593Smuzhiyun while (tx_data_bd && frag_idx < skb_shinfo(skb)->nr_frags) {
1681*4882a593Smuzhiyun rc = map_frag_to_bd(txq,
1682*4882a593Smuzhiyun &skb_shinfo(skb)->frags[frag_idx],
1683*4882a593Smuzhiyun tx_data_bd);
1684*4882a593Smuzhiyun if (rc) {
1685*4882a593Smuzhiyun qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1686*4882a593Smuzhiyun qede_update_tx_producer(txq);
1687*4882a593Smuzhiyun return NETDEV_TX_OK;
1688*4882a593Smuzhiyun }
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun if (tx_data_bd == (struct eth_tx_bd *)second_bd)
1691*4882a593Smuzhiyun tx_data_bd = (struct eth_tx_bd *)third_bd;
1692*4882a593Smuzhiyun else
1693*4882a593Smuzhiyun tx_data_bd = NULL;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun frag_idx++;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun /* map last frags into 4th, 5th .... */
1699*4882a593Smuzhiyun for (; frag_idx < skb_shinfo(skb)->nr_frags; frag_idx++, nbd++) {
1700*4882a593Smuzhiyun tx_data_bd = (struct eth_tx_bd *)
1701*4882a593Smuzhiyun qed_chain_produce(&txq->tx_pbl);
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun memset(tx_data_bd, 0, sizeof(*tx_data_bd));
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun rc = map_frag_to_bd(txq,
1706*4882a593Smuzhiyun &skb_shinfo(skb)->frags[frag_idx],
1707*4882a593Smuzhiyun tx_data_bd);
1708*4882a593Smuzhiyun if (rc) {
1709*4882a593Smuzhiyun qede_free_failed_tx_pkt(txq, first_bd, nbd, data_split);
1710*4882a593Smuzhiyun qede_update_tx_producer(txq);
1711*4882a593Smuzhiyun return NETDEV_TX_OK;
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
1715*4882a593Smuzhiyun /* update the first BD with the actual num BDs */
1716*4882a593Smuzhiyun first_bd->data.nbds = nbd;
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun netdev_tx_sent_queue(netdev_txq, skb->len);
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun skb_tx_timestamp(skb);
1721*4882a593Smuzhiyun
1722*4882a593Smuzhiyun /* Advance packet producer only before sending the packet since mapping
1723*4882a593Smuzhiyun * of pages may fail.
1724*4882a593Smuzhiyun */
1725*4882a593Smuzhiyun txq->sw_tx_prod = (txq->sw_tx_prod + 1) % txq->num_tx_buffers;
1726*4882a593Smuzhiyun
1727*4882a593Smuzhiyun /* 'next page' entries are counted in the producer value */
1728*4882a593Smuzhiyun txq->tx_db.data.bd_prod =
1729*4882a593Smuzhiyun cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
1730*4882a593Smuzhiyun
1731*4882a593Smuzhiyun if (!netdev_xmit_more() || netif_xmit_stopped(netdev_txq))
1732*4882a593Smuzhiyun qede_update_tx_producer(txq);
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
1735*4882a593Smuzhiyun < (MAX_SKB_FRAGS + 1))) {
1736*4882a593Smuzhiyun if (netdev_xmit_more())
1737*4882a593Smuzhiyun qede_update_tx_producer(txq);
1738*4882a593Smuzhiyun
1739*4882a593Smuzhiyun netif_tx_stop_queue(netdev_txq);
1740*4882a593Smuzhiyun txq->stopped_cnt++;
1741*4882a593Smuzhiyun DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1742*4882a593Smuzhiyun "Stop queue was called\n");
1743*4882a593Smuzhiyun /* paired memory barrier is in qede_tx_int(), we have to keep
1744*4882a593Smuzhiyun * ordering of set_bit() in netif_tx_stop_queue() and read of
1745*4882a593Smuzhiyun * fp->bd_tx_cons
1746*4882a593Smuzhiyun */
1747*4882a593Smuzhiyun smp_mb();
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun if ((qed_chain_get_elem_left(&txq->tx_pbl) >=
1750*4882a593Smuzhiyun (MAX_SKB_FRAGS + 1)) &&
1751*4882a593Smuzhiyun (edev->state == QEDE_STATE_OPEN)) {
1752*4882a593Smuzhiyun netif_tx_wake_queue(netdev_txq);
1753*4882a593Smuzhiyun DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
1754*4882a593Smuzhiyun "Wake queue was called\n");
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun
1758*4882a593Smuzhiyun return NETDEV_TX_OK;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
qede_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)1761*4882a593Smuzhiyun u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
1762*4882a593Smuzhiyun struct net_device *sb_dev)
1763*4882a593Smuzhiyun {
1764*4882a593Smuzhiyun struct qede_dev *edev = netdev_priv(dev);
1765*4882a593Smuzhiyun int total_txq;
1766*4882a593Smuzhiyun
1767*4882a593Smuzhiyun total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
1768*4882a593Smuzhiyun
1769*4882a593Smuzhiyun return QEDE_TSS_COUNT(edev) ?
1770*4882a593Smuzhiyun netdev_pick_tx(dev, skb, NULL) % total_txq : 0;
1771*4882a593Smuzhiyun }
1772*4882a593Smuzhiyun
1773*4882a593Smuzhiyun /* 8B udp header + 8B base tunnel header + 32B option length */
1774*4882a593Smuzhiyun #define QEDE_MAX_TUN_HDR_LEN 48
1775*4882a593Smuzhiyun
qede_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1776*4882a593Smuzhiyun netdev_features_t qede_features_check(struct sk_buff *skb,
1777*4882a593Smuzhiyun struct net_device *dev,
1778*4882a593Smuzhiyun netdev_features_t features)
1779*4882a593Smuzhiyun {
1780*4882a593Smuzhiyun if (skb->encapsulation) {
1781*4882a593Smuzhiyun u8 l4_proto = 0;
1782*4882a593Smuzhiyun
1783*4882a593Smuzhiyun switch (vlan_get_protocol(skb)) {
1784*4882a593Smuzhiyun case htons(ETH_P_IP):
1785*4882a593Smuzhiyun l4_proto = ip_hdr(skb)->protocol;
1786*4882a593Smuzhiyun break;
1787*4882a593Smuzhiyun case htons(ETH_P_IPV6):
1788*4882a593Smuzhiyun l4_proto = ipv6_hdr(skb)->nexthdr;
1789*4882a593Smuzhiyun break;
1790*4882a593Smuzhiyun default:
1791*4882a593Smuzhiyun return features;
1792*4882a593Smuzhiyun }
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun /* Disable offloads for geneve tunnels, as HW can't parse
1795*4882a593Smuzhiyun * the geneve header which has option length greater than 32b
1796*4882a593Smuzhiyun * and disable offloads for the ports which are not offloaded.
1797*4882a593Smuzhiyun */
1798*4882a593Smuzhiyun if (l4_proto == IPPROTO_UDP) {
1799*4882a593Smuzhiyun struct qede_dev *edev = netdev_priv(dev);
1800*4882a593Smuzhiyun u16 hdrlen, vxln_port, gnv_port;
1801*4882a593Smuzhiyun
1802*4882a593Smuzhiyun hdrlen = QEDE_MAX_TUN_HDR_LEN;
1803*4882a593Smuzhiyun vxln_port = edev->vxlan_dst_port;
1804*4882a593Smuzhiyun gnv_port = edev->geneve_dst_port;
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun if ((skb_inner_mac_header(skb) -
1807*4882a593Smuzhiyun skb_transport_header(skb)) > hdrlen ||
1808*4882a593Smuzhiyun (ntohs(udp_hdr(skb)->dest) != vxln_port &&
1809*4882a593Smuzhiyun ntohs(udp_hdr(skb)->dest) != gnv_port))
1810*4882a593Smuzhiyun return features & ~(NETIF_F_CSUM_MASK |
1811*4882a593Smuzhiyun NETIF_F_GSO_MASK);
1812*4882a593Smuzhiyun } else if (l4_proto == IPPROTO_IPIP) {
1813*4882a593Smuzhiyun /* IPIP tunnels are unknown to the device or at least unsupported natively,
1814*4882a593Smuzhiyun * offloads for them can't be done trivially, so disable them for such skb.
1815*4882a593Smuzhiyun */
1816*4882a593Smuzhiyun return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1817*4882a593Smuzhiyun }
1818*4882a593Smuzhiyun }
1819*4882a593Smuzhiyun
1820*4882a593Smuzhiyun return features;
1821*4882a593Smuzhiyun }
1822