xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/pensando/ionic/ionic_txrx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2017 - 2019 Pensando Systems, Inc */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include <linux/ip.h>
5*4882a593Smuzhiyun #include <linux/ipv6.h>
6*4882a593Smuzhiyun #include <linux/if_vlan.h>
7*4882a593Smuzhiyun #include <net/ip6_checksum.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "ionic.h"
10*4882a593Smuzhiyun #include "ionic_lif.h"
11*4882a593Smuzhiyun #include "ionic_txrx.h"
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun static void ionic_rx_clean(struct ionic_queue *q,
14*4882a593Smuzhiyun 			   struct ionic_desc_info *desc_info,
15*4882a593Smuzhiyun 			   struct ionic_cq_info *cq_info,
16*4882a593Smuzhiyun 			   void *cb_arg);
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info);
21*4882a593Smuzhiyun 
ionic_txq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)22*4882a593Smuzhiyun static inline void ionic_txq_post(struct ionic_queue *q, bool ring_dbell,
23*4882a593Smuzhiyun 				  ionic_desc_cb cb_func, void *cb_arg)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	DEBUG_STATS_TXQ_POST(q, ring_dbell);
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
28*4882a593Smuzhiyun }
29*4882a593Smuzhiyun 
ionic_rxq_post(struct ionic_queue * q,bool ring_dbell,ionic_desc_cb cb_func,void * cb_arg)30*4882a593Smuzhiyun static inline void ionic_rxq_post(struct ionic_queue *q, bool ring_dbell,
31*4882a593Smuzhiyun 				  ionic_desc_cb cb_func, void *cb_arg)
32*4882a593Smuzhiyun {
33*4882a593Smuzhiyun 	ionic_q_post(q, ring_dbell, cb_func, cb_arg);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	DEBUG_STATS_RX_BUFF_CNT(q);
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun 
q_to_ndq(struct ionic_queue * q)38*4882a593Smuzhiyun static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	return netdev_get_tx_queue(q->lif->netdev, q->index);
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
ionic_rx_skb_alloc(struct ionic_queue * q,unsigned int len,bool frags)43*4882a593Smuzhiyun static struct sk_buff *ionic_rx_skb_alloc(struct ionic_queue *q,
44*4882a593Smuzhiyun 					  unsigned int len, bool frags)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct ionic_lif *lif = q->lif;
47*4882a593Smuzhiyun 	struct ionic_rx_stats *stats;
48*4882a593Smuzhiyun 	struct net_device *netdev;
49*4882a593Smuzhiyun 	struct sk_buff *skb;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	netdev = lif->netdev;
52*4882a593Smuzhiyun 	stats = &q->lif->rxqstats[q->index];
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	if (frags)
55*4882a593Smuzhiyun 		skb = napi_get_frags(&q_to_qcq(q)->napi);
56*4882a593Smuzhiyun 	else
57*4882a593Smuzhiyun 		skb = netdev_alloc_skb_ip_align(netdev, len);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	if (unlikely(!skb)) {
60*4882a593Smuzhiyun 		net_warn_ratelimited("%s: SKB alloc failed on %s!\n",
61*4882a593Smuzhiyun 				     netdev->name, q->name);
62*4882a593Smuzhiyun 		stats->alloc_err++;
63*4882a593Smuzhiyun 		return NULL;
64*4882a593Smuzhiyun 	}
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return skb;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
ionic_rx_frags(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info)69*4882a593Smuzhiyun static struct sk_buff *ionic_rx_frags(struct ionic_queue *q,
70*4882a593Smuzhiyun 				      struct ionic_desc_info *desc_info,
71*4882a593Smuzhiyun 				      struct ionic_cq_info *cq_info)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
74*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
75*4882a593Smuzhiyun 	struct ionic_page_info *page_info;
76*4882a593Smuzhiyun 	struct sk_buff *skb;
77*4882a593Smuzhiyun 	unsigned int i;
78*4882a593Smuzhiyun 	u16 frag_len;
79*4882a593Smuzhiyun 	u16 len;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	page_info = &desc_info->pages[0];
82*4882a593Smuzhiyun 	len = le16_to_cpu(comp->len);
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	prefetch(page_address(page_info->page) + NET_IP_ALIGN);
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	skb = ionic_rx_skb_alloc(q, len, true);
87*4882a593Smuzhiyun 	if (unlikely(!skb))
88*4882a593Smuzhiyun 		return NULL;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	i = comp->num_sg_elems + 1;
91*4882a593Smuzhiyun 	do {
92*4882a593Smuzhiyun 		if (unlikely(!page_info->page)) {
93*4882a593Smuzhiyun 			struct napi_struct *napi = &q_to_qcq(q)->napi;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 			napi->skb = NULL;
96*4882a593Smuzhiyun 			dev_kfree_skb(skb);
97*4882a593Smuzhiyun 			return NULL;
98*4882a593Smuzhiyun 		}
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		frag_len = min(len, (u16)PAGE_SIZE);
101*4882a593Smuzhiyun 		len -= frag_len;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 		dma_unmap_page(dev, dma_unmap_addr(page_info, dma_addr),
104*4882a593Smuzhiyun 			       PAGE_SIZE, DMA_FROM_DEVICE);
105*4882a593Smuzhiyun 		skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
106*4882a593Smuzhiyun 				page_info->page, 0, frag_len, PAGE_SIZE);
107*4882a593Smuzhiyun 		page_info->page = NULL;
108*4882a593Smuzhiyun 		page_info++;
109*4882a593Smuzhiyun 		i--;
110*4882a593Smuzhiyun 	} while (i > 0);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return skb;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
ionic_rx_copybreak(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info)115*4882a593Smuzhiyun static struct sk_buff *ionic_rx_copybreak(struct ionic_queue *q,
116*4882a593Smuzhiyun 					  struct ionic_desc_info *desc_info,
117*4882a593Smuzhiyun 					  struct ionic_cq_info *cq_info)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
120*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
121*4882a593Smuzhiyun 	struct ionic_page_info *page_info;
122*4882a593Smuzhiyun 	struct sk_buff *skb;
123*4882a593Smuzhiyun 	u16 len;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	page_info = &desc_info->pages[0];
126*4882a593Smuzhiyun 	len = le16_to_cpu(comp->len);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	skb = ionic_rx_skb_alloc(q, len, false);
129*4882a593Smuzhiyun 	if (unlikely(!skb))
130*4882a593Smuzhiyun 		return NULL;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (unlikely(!page_info->page)) {
133*4882a593Smuzhiyun 		dev_kfree_skb(skb);
134*4882a593Smuzhiyun 		return NULL;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	dma_sync_single_for_cpu(dev, dma_unmap_addr(page_info, dma_addr),
138*4882a593Smuzhiyun 				len, DMA_FROM_DEVICE);
139*4882a593Smuzhiyun 	skb_copy_to_linear_data(skb, page_address(page_info->page), len);
140*4882a593Smuzhiyun 	dma_sync_single_for_device(dev, dma_unmap_addr(page_info, dma_addr),
141*4882a593Smuzhiyun 				   len, DMA_FROM_DEVICE);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	skb_put(skb, len);
144*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, q->lif->netdev);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	return skb;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
ionic_rx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)149*4882a593Smuzhiyun static void ionic_rx_clean(struct ionic_queue *q,
150*4882a593Smuzhiyun 			   struct ionic_desc_info *desc_info,
151*4882a593Smuzhiyun 			   struct ionic_cq_info *cq_info,
152*4882a593Smuzhiyun 			   void *cb_arg)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
155*4882a593Smuzhiyun 	struct ionic_qcq *qcq = q_to_qcq(q);
156*4882a593Smuzhiyun 	struct ionic_rx_stats *stats;
157*4882a593Smuzhiyun 	struct net_device *netdev;
158*4882a593Smuzhiyun 	struct sk_buff *skb;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	stats = q_to_rx_stats(q);
161*4882a593Smuzhiyun 	netdev = q->lif->netdev;
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	if (comp->status) {
164*4882a593Smuzhiyun 		stats->dropped++;
165*4882a593Smuzhiyun 		return;
166*4882a593Smuzhiyun 	}
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	stats->pkts++;
169*4882a593Smuzhiyun 	stats->bytes += le16_to_cpu(comp->len);
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
172*4882a593Smuzhiyun 		skb = ionic_rx_copybreak(q, desc_info, cq_info);
173*4882a593Smuzhiyun 	else
174*4882a593Smuzhiyun 		skb = ionic_rx_frags(q, desc_info, cq_info);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (unlikely(!skb)) {
177*4882a593Smuzhiyun 		stats->dropped++;
178*4882a593Smuzhiyun 		return;
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	skb_record_rx_queue(skb, q->index);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (likely(netdev->features & NETIF_F_RXHASH)) {
184*4882a593Smuzhiyun 		switch (comp->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
185*4882a593Smuzhiyun 		case IONIC_PKT_TYPE_IPV4:
186*4882a593Smuzhiyun 		case IONIC_PKT_TYPE_IPV6:
187*4882a593Smuzhiyun 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
188*4882a593Smuzhiyun 				     PKT_HASH_TYPE_L3);
189*4882a593Smuzhiyun 			break;
190*4882a593Smuzhiyun 		case IONIC_PKT_TYPE_IPV4_TCP:
191*4882a593Smuzhiyun 		case IONIC_PKT_TYPE_IPV6_TCP:
192*4882a593Smuzhiyun 		case IONIC_PKT_TYPE_IPV4_UDP:
193*4882a593Smuzhiyun 		case IONIC_PKT_TYPE_IPV6_UDP:
194*4882a593Smuzhiyun 			skb_set_hash(skb, le32_to_cpu(comp->rss_hash),
195*4882a593Smuzhiyun 				     PKT_HASH_TYPE_L4);
196*4882a593Smuzhiyun 			break;
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	if (likely(netdev->features & NETIF_F_RXCSUM) &&
201*4882a593Smuzhiyun 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC)) {
202*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_COMPLETE;
203*4882a593Smuzhiyun 		skb->csum = (__force __wsum)le16_to_cpu(comp->csum);
204*4882a593Smuzhiyun 		stats->csum_complete++;
205*4882a593Smuzhiyun 	} else {
206*4882a593Smuzhiyun 		stats->csum_none++;
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (unlikely((comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
210*4882a593Smuzhiyun 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_BAD) ||
211*4882a593Smuzhiyun 		     (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)))
212*4882a593Smuzhiyun 		stats->csum_error++;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	if (likely(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
215*4882a593Smuzhiyun 	    (comp->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN)) {
216*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
217*4882a593Smuzhiyun 				       le16_to_cpu(comp->vlan_tci));
218*4882a593Smuzhiyun 		stats->vlan_stripped++;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (le16_to_cpu(comp->len) <= q->lif->rx_copybreak)
222*4882a593Smuzhiyun 		napi_gro_receive(&qcq->napi, skb);
223*4882a593Smuzhiyun 	else
224*4882a593Smuzhiyun 		napi_gro_frags(&qcq->napi);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
ionic_rx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)227*4882a593Smuzhiyun static bool ionic_rx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct ionic_rxq_comp *comp = cq_info->cq_desc;
230*4882a593Smuzhiyun 	struct ionic_queue *q = cq->bound_q;
231*4882a593Smuzhiyun 	struct ionic_desc_info *desc_info;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	if (!color_match(comp->pkt_type_color, cq->done_color))
234*4882a593Smuzhiyun 		return false;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* check for empty queue */
237*4882a593Smuzhiyun 	if (q->tail_idx == q->head_idx)
238*4882a593Smuzhiyun 		return false;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	if (q->tail_idx != le16_to_cpu(comp->comp_index))
241*4882a593Smuzhiyun 		return false;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	desc_info = &q->info[q->tail_idx];
244*4882a593Smuzhiyun 	q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* clean the related q entry, only one per qc completion */
247*4882a593Smuzhiyun 	ionic_rx_clean(q, desc_info, cq_info, desc_info->cb_arg);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	desc_info->cb = NULL;
250*4882a593Smuzhiyun 	desc_info->cb_arg = NULL;
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	return true;
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
ionic_rx_page_alloc(struct ionic_queue * q,struct ionic_page_info * page_info)255*4882a593Smuzhiyun static int ionic_rx_page_alloc(struct ionic_queue *q,
256*4882a593Smuzhiyun 			       struct ionic_page_info *page_info)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	struct ionic_lif *lif = q->lif;
259*4882a593Smuzhiyun 	struct ionic_rx_stats *stats;
260*4882a593Smuzhiyun 	struct net_device *netdev;
261*4882a593Smuzhiyun 	struct device *dev;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	netdev = lif->netdev;
264*4882a593Smuzhiyun 	dev = lif->ionic->dev;
265*4882a593Smuzhiyun 	stats = q_to_rx_stats(q);
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	if (unlikely(!page_info)) {
268*4882a593Smuzhiyun 		net_err_ratelimited("%s: %s invalid page_info in alloc\n",
269*4882a593Smuzhiyun 				    netdev->name, q->name);
270*4882a593Smuzhiyun 		return -EINVAL;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	page_info->page = dev_alloc_page();
274*4882a593Smuzhiyun 	if (unlikely(!page_info->page)) {
275*4882a593Smuzhiyun 		net_err_ratelimited("%s: %s page alloc failed\n",
276*4882a593Smuzhiyun 				    netdev->name, q->name);
277*4882a593Smuzhiyun 		stats->alloc_err++;
278*4882a593Smuzhiyun 		return -ENOMEM;
279*4882a593Smuzhiyun 	}
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	page_info->dma_addr = dma_map_page(dev, page_info->page, 0, PAGE_SIZE,
282*4882a593Smuzhiyun 					   DMA_FROM_DEVICE);
283*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(dev, page_info->dma_addr))) {
284*4882a593Smuzhiyun 		put_page(page_info->page);
285*4882a593Smuzhiyun 		page_info->dma_addr = 0;
286*4882a593Smuzhiyun 		page_info->page = NULL;
287*4882a593Smuzhiyun 		net_err_ratelimited("%s: %s dma map failed\n",
288*4882a593Smuzhiyun 				    netdev->name, q->name);
289*4882a593Smuzhiyun 		stats->dma_map_err++;
290*4882a593Smuzhiyun 		return -EIO;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	return 0;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
ionic_rx_page_free(struct ionic_queue * q,struct ionic_page_info * page_info)296*4882a593Smuzhiyun static void ionic_rx_page_free(struct ionic_queue *q,
297*4882a593Smuzhiyun 			       struct ionic_page_info *page_info)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct ionic_lif *lif = q->lif;
300*4882a593Smuzhiyun 	struct net_device *netdev;
301*4882a593Smuzhiyun 	struct device *dev;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	netdev = lif->netdev;
304*4882a593Smuzhiyun 	dev = lif->ionic->dev;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (unlikely(!page_info)) {
307*4882a593Smuzhiyun 		net_err_ratelimited("%s: %s invalid page_info in free\n",
308*4882a593Smuzhiyun 				    netdev->name, q->name);
309*4882a593Smuzhiyun 		return;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (unlikely(!page_info->page)) {
313*4882a593Smuzhiyun 		net_err_ratelimited("%s: %s invalid page in free\n",
314*4882a593Smuzhiyun 				    netdev->name, q->name);
315*4882a593Smuzhiyun 		return;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	dma_unmap_page(dev, page_info->dma_addr, PAGE_SIZE, DMA_FROM_DEVICE);
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	put_page(page_info->page);
321*4882a593Smuzhiyun 	page_info->dma_addr = 0;
322*4882a593Smuzhiyun 	page_info->page = NULL;
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun 
ionic_rx_fill(struct ionic_queue * q)325*4882a593Smuzhiyun void ionic_rx_fill(struct ionic_queue *q)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct net_device *netdev = q->lif->netdev;
328*4882a593Smuzhiyun 	struct ionic_desc_info *desc_info;
329*4882a593Smuzhiyun 	struct ionic_page_info *page_info;
330*4882a593Smuzhiyun 	struct ionic_rxq_sg_desc *sg_desc;
331*4882a593Smuzhiyun 	struct ionic_rxq_sg_elem *sg_elem;
332*4882a593Smuzhiyun 	struct ionic_rxq_desc *desc;
333*4882a593Smuzhiyun 	unsigned int remain_len;
334*4882a593Smuzhiyun 	unsigned int seg_len;
335*4882a593Smuzhiyun 	unsigned int nfrags;
336*4882a593Smuzhiyun 	unsigned int i, j;
337*4882a593Smuzhiyun 	unsigned int len;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
340*4882a593Smuzhiyun 	nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	for (i = ionic_q_space_avail(q); i; i--) {
343*4882a593Smuzhiyun 		remain_len = len;
344*4882a593Smuzhiyun 		desc_info = &q->info[q->head_idx];
345*4882a593Smuzhiyun 		desc = desc_info->desc;
346*4882a593Smuzhiyun 		sg_desc = desc_info->sg_desc;
347*4882a593Smuzhiyun 		page_info = &desc_info->pages[0];
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 		if (page_info->page) { /* recycle the buffer */
350*4882a593Smuzhiyun 			ionic_rxq_post(q, false, ionic_rx_clean, NULL);
351*4882a593Smuzhiyun 			continue;
352*4882a593Smuzhiyun 		}
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 		/* fill main descriptor - pages[0] */
355*4882a593Smuzhiyun 		desc->opcode = (nfrags > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
356*4882a593Smuzhiyun 					      IONIC_RXQ_DESC_OPCODE_SIMPLE;
357*4882a593Smuzhiyun 		desc_info->npages = nfrags;
358*4882a593Smuzhiyun 		if (unlikely(ionic_rx_page_alloc(q, page_info))) {
359*4882a593Smuzhiyun 			desc->addr = 0;
360*4882a593Smuzhiyun 			desc->len = 0;
361*4882a593Smuzhiyun 			return;
362*4882a593Smuzhiyun 		}
363*4882a593Smuzhiyun 		desc->addr = cpu_to_le64(page_info->dma_addr);
364*4882a593Smuzhiyun 		seg_len = min_t(unsigned int, PAGE_SIZE, len);
365*4882a593Smuzhiyun 		desc->len = cpu_to_le16(seg_len);
366*4882a593Smuzhiyun 		remain_len -= seg_len;
367*4882a593Smuzhiyun 		page_info++;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		/* fill sg descriptors - pages[1..n] */
370*4882a593Smuzhiyun 		for (j = 0; j < nfrags - 1; j++) {
371*4882a593Smuzhiyun 			if (page_info->page) /* recycle the sg buffer */
372*4882a593Smuzhiyun 				continue;
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 			sg_elem = &sg_desc->elems[j];
375*4882a593Smuzhiyun 			if (unlikely(ionic_rx_page_alloc(q, page_info))) {
376*4882a593Smuzhiyun 				sg_elem->addr = 0;
377*4882a593Smuzhiyun 				sg_elem->len = 0;
378*4882a593Smuzhiyun 				return;
379*4882a593Smuzhiyun 			}
380*4882a593Smuzhiyun 			sg_elem->addr = cpu_to_le64(page_info->dma_addr);
381*4882a593Smuzhiyun 			seg_len = min_t(unsigned int, PAGE_SIZE, remain_len);
382*4882a593Smuzhiyun 			sg_elem->len = cpu_to_le16(seg_len);
383*4882a593Smuzhiyun 			remain_len -= seg_len;
384*4882a593Smuzhiyun 			page_info++;
385*4882a593Smuzhiyun 		}
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		ionic_rxq_post(q, false, ionic_rx_clean, NULL);
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	ionic_dbell_ring(q->lif->kern_dbpage, q->hw_type,
391*4882a593Smuzhiyun 			 q->dbval | q->head_idx);
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun 
ionic_rx_fill_cb(void * arg)394*4882a593Smuzhiyun static void ionic_rx_fill_cb(void *arg)
395*4882a593Smuzhiyun {
396*4882a593Smuzhiyun 	ionic_rx_fill(arg);
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun 
ionic_rx_empty(struct ionic_queue * q)399*4882a593Smuzhiyun void ionic_rx_empty(struct ionic_queue *q)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct ionic_desc_info *desc_info;
402*4882a593Smuzhiyun 	struct ionic_page_info *page_info;
403*4882a593Smuzhiyun 	unsigned int i, j;
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	for (i = 0; i < q->num_descs; i++) {
406*4882a593Smuzhiyun 		desc_info = &q->info[i];
407*4882a593Smuzhiyun 		for (j = 0; j < IONIC_RX_MAX_SG_ELEMS + 1; j++) {
408*4882a593Smuzhiyun 			page_info = &desc_info->pages[j];
409*4882a593Smuzhiyun 			if (page_info->page)
410*4882a593Smuzhiyun 				ionic_rx_page_free(q, page_info);
411*4882a593Smuzhiyun 		}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		desc_info->npages = 0;
414*4882a593Smuzhiyun 		desc_info->cb = NULL;
415*4882a593Smuzhiyun 		desc_info->cb_arg = NULL;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
ionic_dim_update(struct ionic_qcq * qcq,int napi_mode)419*4882a593Smuzhiyun static void ionic_dim_update(struct ionic_qcq *qcq, int napi_mode)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	struct dim_sample dim_sample;
422*4882a593Smuzhiyun 	struct ionic_lif *lif;
423*4882a593Smuzhiyun 	unsigned int qi;
424*4882a593Smuzhiyun 	u64 pkts, bytes;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (!qcq->intr.dim_coal_hw)
427*4882a593Smuzhiyun 		return;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	lif = qcq->q.lif;
430*4882a593Smuzhiyun 	qi = qcq->cq.bound_q->index;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	switch (napi_mode) {
433*4882a593Smuzhiyun 	case IONIC_LIF_F_TX_DIM_INTR:
434*4882a593Smuzhiyun 		pkts = lif->txqstats[qi].pkts;
435*4882a593Smuzhiyun 		bytes = lif->txqstats[qi].bytes;
436*4882a593Smuzhiyun 		break;
437*4882a593Smuzhiyun 	case IONIC_LIF_F_RX_DIM_INTR:
438*4882a593Smuzhiyun 		pkts = lif->rxqstats[qi].pkts;
439*4882a593Smuzhiyun 		bytes = lif->rxqstats[qi].bytes;
440*4882a593Smuzhiyun 		break;
441*4882a593Smuzhiyun 	default:
442*4882a593Smuzhiyun 		pkts = lif->txqstats[qi].pkts + lif->rxqstats[qi].pkts;
443*4882a593Smuzhiyun 		bytes = lif->txqstats[qi].bytes + lif->rxqstats[qi].bytes;
444*4882a593Smuzhiyun 		break;
445*4882a593Smuzhiyun 	}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	dim_update_sample(qcq->cq.bound_intr->rearm_count,
448*4882a593Smuzhiyun 			  pkts, bytes, &dim_sample);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	net_dim(&qcq->dim, dim_sample);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun 
ionic_tx_napi(struct napi_struct * napi,int budget)453*4882a593Smuzhiyun int ionic_tx_napi(struct napi_struct *napi, int budget)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun 	struct ionic_qcq *qcq = napi_to_qcq(napi);
456*4882a593Smuzhiyun 	struct ionic_cq *cq = napi_to_cq(napi);
457*4882a593Smuzhiyun 	struct ionic_dev *idev;
458*4882a593Smuzhiyun 	struct ionic_lif *lif;
459*4882a593Smuzhiyun 	u32 work_done = 0;
460*4882a593Smuzhiyun 	u32 flags = 0;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	lif = cq->bound_q->lif;
463*4882a593Smuzhiyun 	idev = &lif->ionic->idev;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	work_done = ionic_cq_service(cq, budget,
466*4882a593Smuzhiyun 				     ionic_tx_service, NULL, NULL);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (work_done < budget && napi_complete_done(napi, work_done)) {
469*4882a593Smuzhiyun 		ionic_dim_update(qcq, IONIC_LIF_F_TX_DIM_INTR);
470*4882a593Smuzhiyun 		flags |= IONIC_INTR_CRED_UNMASK;
471*4882a593Smuzhiyun 		cq->bound_intr->rearm_count++;
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	if (work_done || flags) {
475*4882a593Smuzhiyun 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
476*4882a593Smuzhiyun 		ionic_intr_credits(idev->intr_ctrl,
477*4882a593Smuzhiyun 				   cq->bound_intr->index,
478*4882a593Smuzhiyun 				   work_done, flags);
479*4882a593Smuzhiyun 	}
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	return work_done;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
ionic_rx_napi(struct napi_struct * napi,int budget)486*4882a593Smuzhiyun int ionic_rx_napi(struct napi_struct *napi, int budget)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun 	struct ionic_qcq *qcq = napi_to_qcq(napi);
489*4882a593Smuzhiyun 	struct ionic_cq *cq = napi_to_cq(napi);
490*4882a593Smuzhiyun 	struct ionic_dev *idev;
491*4882a593Smuzhiyun 	struct ionic_lif *lif;
492*4882a593Smuzhiyun 	u32 work_done = 0;
493*4882a593Smuzhiyun 	u32 flags = 0;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	lif = cq->bound_q->lif;
496*4882a593Smuzhiyun 	idev = &lif->ionic->idev;
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	work_done = ionic_cq_service(cq, budget,
499*4882a593Smuzhiyun 				     ionic_rx_service, NULL, NULL);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	if (work_done)
502*4882a593Smuzhiyun 		ionic_rx_fill(cq->bound_q);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (work_done < budget && napi_complete_done(napi, work_done)) {
505*4882a593Smuzhiyun 		ionic_dim_update(qcq, IONIC_LIF_F_RX_DIM_INTR);
506*4882a593Smuzhiyun 		flags |= IONIC_INTR_CRED_UNMASK;
507*4882a593Smuzhiyun 		cq->bound_intr->rearm_count++;
508*4882a593Smuzhiyun 	}
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	if (work_done || flags) {
511*4882a593Smuzhiyun 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
512*4882a593Smuzhiyun 		ionic_intr_credits(idev->intr_ctrl,
513*4882a593Smuzhiyun 				   cq->bound_intr->index,
514*4882a593Smuzhiyun 				   work_done, flags);
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	DEBUG_STATS_NAPI_POLL(qcq, work_done);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	return work_done;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun 
ionic_txrx_napi(struct napi_struct * napi,int budget)522*4882a593Smuzhiyun int ionic_txrx_napi(struct napi_struct *napi, int budget)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct ionic_qcq *qcq = napi_to_qcq(napi);
525*4882a593Smuzhiyun 	struct ionic_cq *rxcq = napi_to_cq(napi);
526*4882a593Smuzhiyun 	unsigned int qi = rxcq->bound_q->index;
527*4882a593Smuzhiyun 	struct ionic_dev *idev;
528*4882a593Smuzhiyun 	struct ionic_lif *lif;
529*4882a593Smuzhiyun 	struct ionic_cq *txcq;
530*4882a593Smuzhiyun 	u32 rx_work_done = 0;
531*4882a593Smuzhiyun 	u32 tx_work_done = 0;
532*4882a593Smuzhiyun 	u32 flags = 0;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	lif = rxcq->bound_q->lif;
535*4882a593Smuzhiyun 	idev = &lif->ionic->idev;
536*4882a593Smuzhiyun 	txcq = &lif->txqcqs[qi]->cq;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	tx_work_done = ionic_cq_service(txcq, lif->tx_budget,
539*4882a593Smuzhiyun 					ionic_tx_service, NULL, NULL);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	rx_work_done = ionic_cq_service(rxcq, budget,
542*4882a593Smuzhiyun 					ionic_rx_service, NULL, NULL);
543*4882a593Smuzhiyun 	if (rx_work_done)
544*4882a593Smuzhiyun 		ionic_rx_fill_cb(rxcq->bound_q);
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	if (rx_work_done < budget && napi_complete_done(napi, rx_work_done)) {
547*4882a593Smuzhiyun 		ionic_dim_update(qcq, 0);
548*4882a593Smuzhiyun 		flags |= IONIC_INTR_CRED_UNMASK;
549*4882a593Smuzhiyun 		rxcq->bound_intr->rearm_count++;
550*4882a593Smuzhiyun 	}
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	if (rx_work_done || flags) {
553*4882a593Smuzhiyun 		flags |= IONIC_INTR_CRED_RESET_COALESCE;
554*4882a593Smuzhiyun 		ionic_intr_credits(idev->intr_ctrl, rxcq->bound_intr->index,
555*4882a593Smuzhiyun 				   tx_work_done + rx_work_done, flags);
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	DEBUG_STATS_NAPI_POLL(qcq, rx_work_done);
559*4882a593Smuzhiyun 	DEBUG_STATS_NAPI_POLL(qcq, tx_work_done);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	return rx_work_done;
562*4882a593Smuzhiyun }
563*4882a593Smuzhiyun 
ionic_tx_map_single(struct ionic_queue * q,void * data,size_t len)564*4882a593Smuzhiyun static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
565*4882a593Smuzhiyun 				      void *data, size_t len)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
568*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
569*4882a593Smuzhiyun 	dma_addr_t dma_addr;
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE);
572*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma_addr)) {
573*4882a593Smuzhiyun 		net_warn_ratelimited("%s: DMA single map failed on %s!\n",
574*4882a593Smuzhiyun 				     q->lif->netdev->name, q->name);
575*4882a593Smuzhiyun 		stats->dma_map_err++;
576*4882a593Smuzhiyun 		return 0;
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun 	return dma_addr;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun 
ionic_tx_map_frag(struct ionic_queue * q,const skb_frag_t * frag,size_t offset,size_t len)581*4882a593Smuzhiyun static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
582*4882a593Smuzhiyun 				    const skb_frag_t *frag,
583*4882a593Smuzhiyun 				    size_t offset, size_t len)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
586*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
587*4882a593Smuzhiyun 	dma_addr_t dma_addr;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE);
590*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma_addr)) {
591*4882a593Smuzhiyun 		net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
592*4882a593Smuzhiyun 				     q->lif->netdev->name, q->name);
593*4882a593Smuzhiyun 		stats->dma_map_err++;
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 	return dma_addr;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
ionic_tx_clean(struct ionic_queue * q,struct ionic_desc_info * desc_info,struct ionic_cq_info * cq_info,void * cb_arg)598*4882a593Smuzhiyun static void ionic_tx_clean(struct ionic_queue *q,
599*4882a593Smuzhiyun 			   struct ionic_desc_info *desc_info,
600*4882a593Smuzhiyun 			   struct ionic_cq_info *cq_info,
601*4882a593Smuzhiyun 			   void *cb_arg)
602*4882a593Smuzhiyun {
603*4882a593Smuzhiyun 	struct ionic_txq_sg_desc *sg_desc = desc_info->sg_desc;
604*4882a593Smuzhiyun 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
605*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
606*4882a593Smuzhiyun 	struct ionic_txq_desc *desc = desc_info->desc;
607*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
608*4882a593Smuzhiyun 	u8 opcode, flags, nsge;
609*4882a593Smuzhiyun 	u16 queue_index;
610*4882a593Smuzhiyun 	unsigned int i;
611*4882a593Smuzhiyun 	u64 addr;
612*4882a593Smuzhiyun 
613*4882a593Smuzhiyun 	decode_txq_desc_cmd(le64_to_cpu(desc->cmd),
614*4882a593Smuzhiyun 			    &opcode, &flags, &nsge, &addr);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* use unmap_single only if either this is not TSO,
617*4882a593Smuzhiyun 	 * or this is first descriptor of a TSO
618*4882a593Smuzhiyun 	 */
619*4882a593Smuzhiyun 	if (opcode != IONIC_TXQ_DESC_OPCODE_TSO ||
620*4882a593Smuzhiyun 	    flags & IONIC_TXQ_DESC_FLAG_TSO_SOT)
621*4882a593Smuzhiyun 		dma_unmap_single(dev, (dma_addr_t)addr,
622*4882a593Smuzhiyun 				 le16_to_cpu(desc->len), DMA_TO_DEVICE);
623*4882a593Smuzhiyun 	else
624*4882a593Smuzhiyun 		dma_unmap_page(dev, (dma_addr_t)addr,
625*4882a593Smuzhiyun 			       le16_to_cpu(desc->len), DMA_TO_DEVICE);
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	for (i = 0; i < nsge; i++, elem++)
628*4882a593Smuzhiyun 		dma_unmap_page(dev, (dma_addr_t)le64_to_cpu(elem->addr),
629*4882a593Smuzhiyun 			       le16_to_cpu(elem->len), DMA_TO_DEVICE);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	if (cb_arg) {
632*4882a593Smuzhiyun 		struct sk_buff *skb = cb_arg;
633*4882a593Smuzhiyun 		u32 len = skb->len;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		queue_index = skb_get_queue_mapping(skb);
636*4882a593Smuzhiyun 		if (unlikely(__netif_subqueue_stopped(q->lif->netdev,
637*4882a593Smuzhiyun 						      queue_index))) {
638*4882a593Smuzhiyun 			netif_wake_subqueue(q->lif->netdev, queue_index);
639*4882a593Smuzhiyun 			q->wake++;
640*4882a593Smuzhiyun 		}
641*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
642*4882a593Smuzhiyun 		stats->clean++;
643*4882a593Smuzhiyun 		netdev_tx_completed_queue(q_to_ndq(q), 1, len);
644*4882a593Smuzhiyun 	}
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun 
ionic_tx_service(struct ionic_cq * cq,struct ionic_cq_info * cq_info)647*4882a593Smuzhiyun static bool ionic_tx_service(struct ionic_cq *cq, struct ionic_cq_info *cq_info)
648*4882a593Smuzhiyun {
649*4882a593Smuzhiyun 	struct ionic_txq_comp *comp = cq_info->cq_desc;
650*4882a593Smuzhiyun 	struct ionic_queue *q = cq->bound_q;
651*4882a593Smuzhiyun 	struct ionic_desc_info *desc_info;
652*4882a593Smuzhiyun 	u16 index;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	if (!color_match(comp->color, cq->done_color))
655*4882a593Smuzhiyun 		return false;
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* clean the related q entries, there could be
658*4882a593Smuzhiyun 	 * several q entries completed for each cq completion
659*4882a593Smuzhiyun 	 */
660*4882a593Smuzhiyun 	do {
661*4882a593Smuzhiyun 		desc_info = &q->info[q->tail_idx];
662*4882a593Smuzhiyun 		index = q->tail_idx;
663*4882a593Smuzhiyun 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
664*4882a593Smuzhiyun 		ionic_tx_clean(q, desc_info, cq_info, desc_info->cb_arg);
665*4882a593Smuzhiyun 		desc_info->cb = NULL;
666*4882a593Smuzhiyun 		desc_info->cb_arg = NULL;
667*4882a593Smuzhiyun 	} while (index != le16_to_cpu(comp->comp_index));
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	return true;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun 
ionic_tx_flush(struct ionic_cq * cq)672*4882a593Smuzhiyun void ionic_tx_flush(struct ionic_cq *cq)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	struct ionic_dev *idev = &cq->lif->ionic->idev;
675*4882a593Smuzhiyun 	u32 work_done;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	work_done = ionic_cq_service(cq, cq->num_descs,
678*4882a593Smuzhiyun 				     ionic_tx_service, NULL, NULL);
679*4882a593Smuzhiyun 	if (work_done)
680*4882a593Smuzhiyun 		ionic_intr_credits(idev->intr_ctrl, cq->bound_intr->index,
681*4882a593Smuzhiyun 				   work_done, IONIC_INTR_CRED_RESET_COALESCE);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
ionic_tx_empty(struct ionic_queue * q)684*4882a593Smuzhiyun void ionic_tx_empty(struct ionic_queue *q)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	struct ionic_desc_info *desc_info;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/* walk the not completed tx entries, if any */
689*4882a593Smuzhiyun 	while (q->head_idx != q->tail_idx) {
690*4882a593Smuzhiyun 		desc_info = &q->info[q->tail_idx];
691*4882a593Smuzhiyun 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
692*4882a593Smuzhiyun 		ionic_tx_clean(q, desc_info, NULL, desc_info->cb_arg);
693*4882a593Smuzhiyun 		desc_info->cb = NULL;
694*4882a593Smuzhiyun 		desc_info->cb_arg = NULL;
695*4882a593Smuzhiyun 	}
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun 
ionic_tx_tcp_inner_pseudo_csum(struct sk_buff * skb)698*4882a593Smuzhiyun static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	int err;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 	err = skb_cow_head(skb, 0);
703*4882a593Smuzhiyun 	if (err)
704*4882a593Smuzhiyun 		return err;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
707*4882a593Smuzhiyun 		inner_ip_hdr(skb)->check = 0;
708*4882a593Smuzhiyun 		inner_tcp_hdr(skb)->check =
709*4882a593Smuzhiyun 			~csum_tcpudp_magic(inner_ip_hdr(skb)->saddr,
710*4882a593Smuzhiyun 					   inner_ip_hdr(skb)->daddr,
711*4882a593Smuzhiyun 					   0, IPPROTO_TCP, 0);
712*4882a593Smuzhiyun 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
713*4882a593Smuzhiyun 		inner_tcp_hdr(skb)->check =
714*4882a593Smuzhiyun 			~csum_ipv6_magic(&inner_ipv6_hdr(skb)->saddr,
715*4882a593Smuzhiyun 					 &inner_ipv6_hdr(skb)->daddr,
716*4882a593Smuzhiyun 					 0, IPPROTO_TCP, 0);
717*4882a593Smuzhiyun 	}
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	return 0;
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
ionic_tx_tcp_pseudo_csum(struct sk_buff * skb)722*4882a593Smuzhiyun static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	int err;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	err = skb_cow_head(skb, 0);
727*4882a593Smuzhiyun 	if (err)
728*4882a593Smuzhiyun 		return err;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
731*4882a593Smuzhiyun 		ip_hdr(skb)->check = 0;
732*4882a593Smuzhiyun 		tcp_hdr(skb)->check =
733*4882a593Smuzhiyun 			~csum_tcpudp_magic(ip_hdr(skb)->saddr,
734*4882a593Smuzhiyun 					   ip_hdr(skb)->daddr,
735*4882a593Smuzhiyun 					   0, IPPROTO_TCP, 0);
736*4882a593Smuzhiyun 	} else if (skb->protocol == cpu_to_be16(ETH_P_IPV6)) {
737*4882a593Smuzhiyun 		tcp_v6_gso_csum_prep(skb);
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	return 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun 
ionic_tx_tso_post(struct ionic_queue * q,struct ionic_txq_desc * desc,struct sk_buff * skb,dma_addr_t addr,u8 nsge,u16 len,unsigned int hdrlen,unsigned int mss,bool outer_csum,u16 vlan_tci,bool has_vlan,bool start,bool done)743*4882a593Smuzhiyun static void ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
744*4882a593Smuzhiyun 			      struct sk_buff *skb,
745*4882a593Smuzhiyun 			      dma_addr_t addr, u8 nsge, u16 len,
746*4882a593Smuzhiyun 			      unsigned int hdrlen, unsigned int mss,
747*4882a593Smuzhiyun 			      bool outer_csum,
748*4882a593Smuzhiyun 			      u16 vlan_tci, bool has_vlan,
749*4882a593Smuzhiyun 			      bool start, bool done)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	u8 flags = 0;
752*4882a593Smuzhiyun 	u64 cmd;
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
755*4882a593Smuzhiyun 	flags |= outer_csum ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
756*4882a593Smuzhiyun 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
757*4882a593Smuzhiyun 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, flags, nsge, addr);
760*4882a593Smuzhiyun 	desc->cmd = cpu_to_le64(cmd);
761*4882a593Smuzhiyun 	desc->len = cpu_to_le16(len);
762*4882a593Smuzhiyun 	desc->vlan_tci = cpu_to_le16(vlan_tci);
763*4882a593Smuzhiyun 	desc->hdr_len = cpu_to_le16(hdrlen);
764*4882a593Smuzhiyun 	desc->mss = cpu_to_le16(mss);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	if (done) {
767*4882a593Smuzhiyun 		skb_tx_timestamp(skb);
768*4882a593Smuzhiyun 		netdev_tx_sent_queue(q_to_ndq(q), skb->len);
769*4882a593Smuzhiyun 		ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
770*4882a593Smuzhiyun 	} else {
771*4882a593Smuzhiyun 		ionic_txq_post(q, false, ionic_tx_clean, NULL);
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun }
774*4882a593Smuzhiyun 
ionic_tx_tso_next(struct ionic_queue * q,struct ionic_txq_sg_elem ** elem)775*4882a593Smuzhiyun static struct ionic_txq_desc *ionic_tx_tso_next(struct ionic_queue *q,
776*4882a593Smuzhiyun 						struct ionic_txq_sg_elem **elem)
777*4882a593Smuzhiyun {
778*4882a593Smuzhiyun 	struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
779*4882a593Smuzhiyun 	struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	*elem = sg_desc->elems;
782*4882a593Smuzhiyun 	return desc;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
ionic_tx_tso(struct ionic_queue * q,struct sk_buff * skb)785*4882a593Smuzhiyun static int ionic_tx_tso(struct ionic_queue *q, struct sk_buff *skb)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
788*4882a593Smuzhiyun 	struct ionic_desc_info *rewind_desc_info;
789*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
790*4882a593Smuzhiyun 	struct ionic_txq_sg_elem *elem;
791*4882a593Smuzhiyun 	struct ionic_txq_desc *desc;
792*4882a593Smuzhiyun 	unsigned int frag_left = 0;
793*4882a593Smuzhiyun 	unsigned int offset = 0;
794*4882a593Smuzhiyun 	u16 abort = q->head_idx;
795*4882a593Smuzhiyun 	unsigned int len_left;
796*4882a593Smuzhiyun 	dma_addr_t desc_addr;
797*4882a593Smuzhiyun 	unsigned int hdrlen;
798*4882a593Smuzhiyun 	unsigned int nfrags;
799*4882a593Smuzhiyun 	unsigned int seglen;
800*4882a593Smuzhiyun 	u64 total_bytes = 0;
801*4882a593Smuzhiyun 	u64 total_pkts = 0;
802*4882a593Smuzhiyun 	u16 rewind = abort;
803*4882a593Smuzhiyun 	unsigned int left;
804*4882a593Smuzhiyun 	unsigned int len;
805*4882a593Smuzhiyun 	unsigned int mss;
806*4882a593Smuzhiyun 	skb_frag_t *frag;
807*4882a593Smuzhiyun 	bool start, done;
808*4882a593Smuzhiyun 	bool outer_csum;
809*4882a593Smuzhiyun 	dma_addr_t addr;
810*4882a593Smuzhiyun 	bool has_vlan;
811*4882a593Smuzhiyun 	u16 desc_len;
812*4882a593Smuzhiyun 	u8 desc_nsge;
813*4882a593Smuzhiyun 	u16 vlan_tci;
814*4882a593Smuzhiyun 	bool encap;
815*4882a593Smuzhiyun 	int err;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	mss = skb_shinfo(skb)->gso_size;
818*4882a593Smuzhiyun 	nfrags = skb_shinfo(skb)->nr_frags;
819*4882a593Smuzhiyun 	len_left = skb->len - skb_headlen(skb);
820*4882a593Smuzhiyun 	outer_csum = (skb_shinfo(skb)->gso_type & SKB_GSO_GRE_CSUM) ||
821*4882a593Smuzhiyun 		     (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
822*4882a593Smuzhiyun 	has_vlan = !!skb_vlan_tag_present(skb);
823*4882a593Smuzhiyun 	vlan_tci = skb_vlan_tag_get(skb);
824*4882a593Smuzhiyun 	encap = skb->encapsulation;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	/* Preload inner-most TCP csum field with IP pseudo hdr
827*4882a593Smuzhiyun 	 * calculated with IP length set to zero.  HW will later
828*4882a593Smuzhiyun 	 * add in length to each TCP segment resulting from the TSO.
829*4882a593Smuzhiyun 	 */
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (encap)
832*4882a593Smuzhiyun 		err = ionic_tx_tcp_inner_pseudo_csum(skb);
833*4882a593Smuzhiyun 	else
834*4882a593Smuzhiyun 		err = ionic_tx_tcp_pseudo_csum(skb);
835*4882a593Smuzhiyun 	if (err)
836*4882a593Smuzhiyun 		return err;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (encap)
839*4882a593Smuzhiyun 		hdrlen = skb_inner_transport_header(skb) - skb->data +
840*4882a593Smuzhiyun 			 inner_tcp_hdrlen(skb);
841*4882a593Smuzhiyun 	else
842*4882a593Smuzhiyun 		hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	seglen = hdrlen + mss;
845*4882a593Smuzhiyun 	left = skb_headlen(skb);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	desc = ionic_tx_tso_next(q, &elem);
848*4882a593Smuzhiyun 	start = true;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	/* Chop skb->data up into desc segments */
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	while (left > 0) {
853*4882a593Smuzhiyun 		len = min(seglen, left);
854*4882a593Smuzhiyun 		frag_left = seglen - len;
855*4882a593Smuzhiyun 		desc_addr = ionic_tx_map_single(q, skb->data + offset, len);
856*4882a593Smuzhiyun 		if (dma_mapping_error(dev, desc_addr))
857*4882a593Smuzhiyun 			goto err_out_abort;
858*4882a593Smuzhiyun 		desc_len = len;
859*4882a593Smuzhiyun 		desc_nsge = 0;
860*4882a593Smuzhiyun 		left -= len;
861*4882a593Smuzhiyun 		offset += len;
862*4882a593Smuzhiyun 		if (nfrags > 0 && frag_left > 0)
863*4882a593Smuzhiyun 			continue;
864*4882a593Smuzhiyun 		done = (nfrags == 0 && left == 0);
865*4882a593Smuzhiyun 		ionic_tx_tso_post(q, desc, skb,
866*4882a593Smuzhiyun 				  desc_addr, desc_nsge, desc_len,
867*4882a593Smuzhiyun 				  hdrlen, mss,
868*4882a593Smuzhiyun 				  outer_csum,
869*4882a593Smuzhiyun 				  vlan_tci, has_vlan,
870*4882a593Smuzhiyun 				  start, done);
871*4882a593Smuzhiyun 		total_pkts++;
872*4882a593Smuzhiyun 		total_bytes += start ? len : len + hdrlen;
873*4882a593Smuzhiyun 		desc = ionic_tx_tso_next(q, &elem);
874*4882a593Smuzhiyun 		start = false;
875*4882a593Smuzhiyun 		seglen = mss;
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	/* Chop skb frags into desc segments */
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	for (frag = skb_shinfo(skb)->frags; len_left; frag++) {
881*4882a593Smuzhiyun 		offset = 0;
882*4882a593Smuzhiyun 		left = skb_frag_size(frag);
883*4882a593Smuzhiyun 		len_left -= left;
884*4882a593Smuzhiyun 		nfrags--;
885*4882a593Smuzhiyun 		stats->frags++;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		while (left > 0) {
888*4882a593Smuzhiyun 			if (frag_left > 0) {
889*4882a593Smuzhiyun 				len = min(frag_left, left);
890*4882a593Smuzhiyun 				frag_left -= len;
891*4882a593Smuzhiyun 				addr = ionic_tx_map_frag(q, frag, offset, len);
892*4882a593Smuzhiyun 				if (dma_mapping_error(dev, addr))
893*4882a593Smuzhiyun 					goto err_out_abort;
894*4882a593Smuzhiyun 				elem->addr = cpu_to_le64(addr);
895*4882a593Smuzhiyun 				elem->len = cpu_to_le16(len);
896*4882a593Smuzhiyun 				elem++;
897*4882a593Smuzhiyun 				desc_nsge++;
898*4882a593Smuzhiyun 				left -= len;
899*4882a593Smuzhiyun 				offset += len;
900*4882a593Smuzhiyun 				if (nfrags > 0 && frag_left > 0)
901*4882a593Smuzhiyun 					continue;
902*4882a593Smuzhiyun 				done = (nfrags == 0 && left == 0);
903*4882a593Smuzhiyun 				ionic_tx_tso_post(q, desc, skb, desc_addr,
904*4882a593Smuzhiyun 						  desc_nsge, desc_len,
905*4882a593Smuzhiyun 						  hdrlen, mss, outer_csum,
906*4882a593Smuzhiyun 						  vlan_tci, has_vlan,
907*4882a593Smuzhiyun 						  start, done);
908*4882a593Smuzhiyun 				total_pkts++;
909*4882a593Smuzhiyun 				total_bytes += start ? len : len + hdrlen;
910*4882a593Smuzhiyun 				desc = ionic_tx_tso_next(q, &elem);
911*4882a593Smuzhiyun 				start = false;
912*4882a593Smuzhiyun 			} else {
913*4882a593Smuzhiyun 				len = min(mss, left);
914*4882a593Smuzhiyun 				frag_left = mss - len;
915*4882a593Smuzhiyun 				desc_addr = ionic_tx_map_frag(q, frag,
916*4882a593Smuzhiyun 							      offset, len);
917*4882a593Smuzhiyun 				if (dma_mapping_error(dev, desc_addr))
918*4882a593Smuzhiyun 					goto err_out_abort;
919*4882a593Smuzhiyun 				desc_len = len;
920*4882a593Smuzhiyun 				desc_nsge = 0;
921*4882a593Smuzhiyun 				left -= len;
922*4882a593Smuzhiyun 				offset += len;
923*4882a593Smuzhiyun 				if (nfrags > 0 && frag_left > 0)
924*4882a593Smuzhiyun 					continue;
925*4882a593Smuzhiyun 				done = (nfrags == 0 && left == 0);
926*4882a593Smuzhiyun 				ionic_tx_tso_post(q, desc, skb, desc_addr,
927*4882a593Smuzhiyun 						  desc_nsge, desc_len,
928*4882a593Smuzhiyun 						  hdrlen, mss, outer_csum,
929*4882a593Smuzhiyun 						  vlan_tci, has_vlan,
930*4882a593Smuzhiyun 						  start, done);
931*4882a593Smuzhiyun 				total_pkts++;
932*4882a593Smuzhiyun 				total_bytes += start ? len : len + hdrlen;
933*4882a593Smuzhiyun 				desc = ionic_tx_tso_next(q, &elem);
934*4882a593Smuzhiyun 				start = false;
935*4882a593Smuzhiyun 			}
936*4882a593Smuzhiyun 		}
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	stats->pkts += total_pkts;
940*4882a593Smuzhiyun 	stats->bytes += total_bytes;
941*4882a593Smuzhiyun 	stats->tso++;
942*4882a593Smuzhiyun 	stats->tso_bytes += total_bytes;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	return 0;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun err_out_abort:
947*4882a593Smuzhiyun 	while (rewind != q->head_idx) {
948*4882a593Smuzhiyun 		rewind_desc_info = &q->info[rewind];
949*4882a593Smuzhiyun 		ionic_tx_clean(q, rewind_desc_info, NULL, NULL);
950*4882a593Smuzhiyun 		rewind = (rewind + 1) & (q->num_descs - 1);
951*4882a593Smuzhiyun 	}
952*4882a593Smuzhiyun 	q->head_idx = abort;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	return -ENOMEM;
955*4882a593Smuzhiyun }
956*4882a593Smuzhiyun 
ionic_tx_calc_csum(struct ionic_queue * q,struct sk_buff * skb)957*4882a593Smuzhiyun static int ionic_tx_calc_csum(struct ionic_queue *q, struct sk_buff *skb)
958*4882a593Smuzhiyun {
959*4882a593Smuzhiyun 	struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
960*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
961*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
962*4882a593Smuzhiyun 	dma_addr_t dma_addr;
963*4882a593Smuzhiyun 	bool has_vlan;
964*4882a593Smuzhiyun 	u8 flags = 0;
965*4882a593Smuzhiyun 	bool encap;
966*4882a593Smuzhiyun 	u64 cmd;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	has_vlan = !!skb_vlan_tag_present(skb);
969*4882a593Smuzhiyun 	encap = skb->encapsulation;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
972*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma_addr))
973*4882a593Smuzhiyun 		return -ENOMEM;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
976*4882a593Smuzhiyun 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_PARTIAL,
979*4882a593Smuzhiyun 				  flags, skb_shinfo(skb)->nr_frags, dma_addr);
980*4882a593Smuzhiyun 	desc->cmd = cpu_to_le64(cmd);
981*4882a593Smuzhiyun 	desc->len = cpu_to_le16(skb_headlen(skb));
982*4882a593Smuzhiyun 	desc->csum_start = cpu_to_le16(skb_checksum_start_offset(skb));
983*4882a593Smuzhiyun 	desc->csum_offset = cpu_to_le16(skb->csum_offset);
984*4882a593Smuzhiyun 	if (has_vlan) {
985*4882a593Smuzhiyun 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
986*4882a593Smuzhiyun 		stats->vlan_inserted++;
987*4882a593Smuzhiyun 	}
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	if (skb->csum_not_inet)
990*4882a593Smuzhiyun 		stats->crc32_csum++;
991*4882a593Smuzhiyun 	else
992*4882a593Smuzhiyun 		stats->csum++;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	return 0;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun 
ionic_tx_calc_no_csum(struct ionic_queue * q,struct sk_buff * skb)997*4882a593Smuzhiyun static int ionic_tx_calc_no_csum(struct ionic_queue *q, struct sk_buff *skb)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun 	struct ionic_txq_desc *desc = q->info[q->head_idx].txq_desc;
1000*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1001*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
1002*4882a593Smuzhiyun 	dma_addr_t dma_addr;
1003*4882a593Smuzhiyun 	bool has_vlan;
1004*4882a593Smuzhiyun 	u8 flags = 0;
1005*4882a593Smuzhiyun 	bool encap;
1006*4882a593Smuzhiyun 	u64 cmd;
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 	has_vlan = !!skb_vlan_tag_present(skb);
1009*4882a593Smuzhiyun 	encap = skb->encapsulation;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
1012*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma_addr))
1013*4882a593Smuzhiyun 		return -ENOMEM;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
1016*4882a593Smuzhiyun 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_CSUM_NONE,
1019*4882a593Smuzhiyun 				  flags, skb_shinfo(skb)->nr_frags, dma_addr);
1020*4882a593Smuzhiyun 	desc->cmd = cpu_to_le64(cmd);
1021*4882a593Smuzhiyun 	desc->len = cpu_to_le16(skb_headlen(skb));
1022*4882a593Smuzhiyun 	if (has_vlan) {
1023*4882a593Smuzhiyun 		desc->vlan_tci = cpu_to_le16(skb_vlan_tag_get(skb));
1024*4882a593Smuzhiyun 		stats->vlan_inserted++;
1025*4882a593Smuzhiyun 	}
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	stats->csum_none++;
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 	return 0;
1030*4882a593Smuzhiyun }
1031*4882a593Smuzhiyun 
ionic_tx_skb_frags(struct ionic_queue * q,struct sk_buff * skb)1032*4882a593Smuzhiyun static int ionic_tx_skb_frags(struct ionic_queue *q, struct sk_buff *skb)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	struct ionic_txq_sg_desc *sg_desc = q->info[q->head_idx].txq_sg_desc;
1035*4882a593Smuzhiyun 	unsigned int len_left = skb->len - skb_headlen(skb);
1036*4882a593Smuzhiyun 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
1037*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1038*4882a593Smuzhiyun 	struct device *dev = q->lif->ionic->dev;
1039*4882a593Smuzhiyun 	dma_addr_t dma_addr;
1040*4882a593Smuzhiyun 	skb_frag_t *frag;
1041*4882a593Smuzhiyun 	u16 len;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	for (frag = skb_shinfo(skb)->frags; len_left; frag++, elem++) {
1044*4882a593Smuzhiyun 		len = skb_frag_size(frag);
1045*4882a593Smuzhiyun 		elem->len = cpu_to_le16(len);
1046*4882a593Smuzhiyun 		dma_addr = ionic_tx_map_frag(q, frag, 0, len);
1047*4882a593Smuzhiyun 		if (dma_mapping_error(dev, dma_addr))
1048*4882a593Smuzhiyun 			return -ENOMEM;
1049*4882a593Smuzhiyun 		elem->addr = cpu_to_le64(dma_addr);
1050*4882a593Smuzhiyun 		len_left -= len;
1051*4882a593Smuzhiyun 		stats->frags++;
1052*4882a593Smuzhiyun 	}
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	return 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun 
ionic_tx(struct ionic_queue * q,struct sk_buff * skb)1057*4882a593Smuzhiyun static int ionic_tx(struct ionic_queue *q, struct sk_buff *skb)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1060*4882a593Smuzhiyun 	int err;
1061*4882a593Smuzhiyun 
1062*4882a593Smuzhiyun 	/* set up the initial descriptor */
1063*4882a593Smuzhiyun 	if (skb->ip_summed == CHECKSUM_PARTIAL)
1064*4882a593Smuzhiyun 		err = ionic_tx_calc_csum(q, skb);
1065*4882a593Smuzhiyun 	else
1066*4882a593Smuzhiyun 		err = ionic_tx_calc_no_csum(q, skb);
1067*4882a593Smuzhiyun 	if (err)
1068*4882a593Smuzhiyun 		return err;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	/* add frags */
1071*4882a593Smuzhiyun 	err = ionic_tx_skb_frags(q, skb);
1072*4882a593Smuzhiyun 	if (err)
1073*4882a593Smuzhiyun 		return err;
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
1076*4882a593Smuzhiyun 	stats->pkts++;
1077*4882a593Smuzhiyun 	stats->bytes += skb->len;
1078*4882a593Smuzhiyun 
1079*4882a593Smuzhiyun 	netdev_tx_sent_queue(q_to_ndq(q), skb->len);
1080*4882a593Smuzhiyun 	ionic_txq_post(q, !netdev_xmit_more(), ionic_tx_clean, skb);
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun 	return 0;
1083*4882a593Smuzhiyun }
1084*4882a593Smuzhiyun 
ionic_tx_descs_needed(struct ionic_queue * q,struct sk_buff * skb)1085*4882a593Smuzhiyun static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb)
1086*4882a593Smuzhiyun {
1087*4882a593Smuzhiyun 	int sg_elems = q->lif->qtype_info[IONIC_QTYPE_TXQ].max_sg_elems;
1088*4882a593Smuzhiyun 	struct ionic_tx_stats *stats = q_to_tx_stats(q);
1089*4882a593Smuzhiyun 	int ndescs;
1090*4882a593Smuzhiyun 	int err;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	/* Each desc is mss long max, so a descriptor for each gso_seg */
1093*4882a593Smuzhiyun 	if (skb_is_gso(skb))
1094*4882a593Smuzhiyun 		ndescs = skb_shinfo(skb)->gso_segs;
1095*4882a593Smuzhiyun 	else
1096*4882a593Smuzhiyun 		ndescs = 1;
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	if (skb_shinfo(skb)->nr_frags <= sg_elems)
1099*4882a593Smuzhiyun 		return ndescs;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	/* Too many frags, so linearize */
1102*4882a593Smuzhiyun 	err = skb_linearize(skb);
1103*4882a593Smuzhiyun 	if (err)
1104*4882a593Smuzhiyun 		return err;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	stats->linearize++;
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	return ndescs;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun 
ionic_maybe_stop_tx(struct ionic_queue * q,int ndescs)1111*4882a593Smuzhiyun static int ionic_maybe_stop_tx(struct ionic_queue *q, int ndescs)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun 	int stopped = 0;
1114*4882a593Smuzhiyun 
1115*4882a593Smuzhiyun 	if (unlikely(!ionic_q_has_space(q, ndescs))) {
1116*4882a593Smuzhiyun 		netif_stop_subqueue(q->lif->netdev, q->index);
1117*4882a593Smuzhiyun 		q->stop++;
1118*4882a593Smuzhiyun 		stopped = 1;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 		/* Might race with ionic_tx_clean, check again */
1121*4882a593Smuzhiyun 		smp_rmb();
1122*4882a593Smuzhiyun 		if (ionic_q_has_space(q, ndescs)) {
1123*4882a593Smuzhiyun 			netif_wake_subqueue(q->lif->netdev, q->index);
1124*4882a593Smuzhiyun 			stopped = 0;
1125*4882a593Smuzhiyun 		}
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	return stopped;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
ionic_start_xmit(struct sk_buff * skb,struct net_device * netdev)1131*4882a593Smuzhiyun netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	u16 queue_index = skb_get_queue_mapping(skb);
1134*4882a593Smuzhiyun 	struct ionic_lif *lif = netdev_priv(netdev);
1135*4882a593Smuzhiyun 	struct ionic_queue *q;
1136*4882a593Smuzhiyun 	int ndescs;
1137*4882a593Smuzhiyun 	int err;
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	if (unlikely(!test_bit(IONIC_LIF_F_UP, lif->state))) {
1140*4882a593Smuzhiyun 		dev_kfree_skb(skb);
1141*4882a593Smuzhiyun 		return NETDEV_TX_OK;
1142*4882a593Smuzhiyun 	}
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	if (unlikely(queue_index >= lif->nxqs))
1145*4882a593Smuzhiyun 		queue_index = 0;
1146*4882a593Smuzhiyun 	q = &lif->txqcqs[queue_index]->q;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	ndescs = ionic_tx_descs_needed(q, skb);
1149*4882a593Smuzhiyun 	if (ndescs < 0)
1150*4882a593Smuzhiyun 		goto err_out_drop;
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	if (unlikely(ionic_maybe_stop_tx(q, ndescs)))
1153*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun 	if (skb_is_gso(skb))
1156*4882a593Smuzhiyun 		err = ionic_tx_tso(q, skb);
1157*4882a593Smuzhiyun 	else
1158*4882a593Smuzhiyun 		err = ionic_tx(q, skb);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	if (err)
1161*4882a593Smuzhiyun 		goto err_out_drop;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	/* Stop the queue if there aren't descriptors for the next packet.
1164*4882a593Smuzhiyun 	 * Since our SG lists per descriptor take care of most of the possible
1165*4882a593Smuzhiyun 	 * fragmentation, we don't need to have many descriptors available.
1166*4882a593Smuzhiyun 	 */
1167*4882a593Smuzhiyun 	ionic_maybe_stop_tx(q, 4);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun err_out_drop:
1172*4882a593Smuzhiyun 	q->stop++;
1173*4882a593Smuzhiyun 	q->drop++;
1174*4882a593Smuzhiyun 	dev_kfree_skb(skb);
1175*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1176*4882a593Smuzhiyun }
1177