xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/huawei/hinic/hinic_rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Huawei HiNIC PCI Express Linux driver
4*4882a593Smuzhiyun  * Copyright(c) 2017 Huawei Technologies Co., Ltd
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/types.h>
9*4882a593Smuzhiyun #include <linux/errno.h>
10*4882a593Smuzhiyun #include <linux/pci.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/etherdevice.h>
14*4882a593Smuzhiyun #include <linux/u64_stats_sync.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/skbuff.h>
18*4882a593Smuzhiyun #include <linux/dma-mapping.h>
19*4882a593Smuzhiyun #include <linux/prefetch.h>
20*4882a593Smuzhiyun #include <linux/cpumask.h>
21*4882a593Smuzhiyun #include <linux/if_vlan.h>
22*4882a593Smuzhiyun #include <asm/barrier.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include "hinic_common.h"
25*4882a593Smuzhiyun #include "hinic_hw_if.h"
26*4882a593Smuzhiyun #include "hinic_hw_wqe.h"
27*4882a593Smuzhiyun #include "hinic_hw_wq.h"
28*4882a593Smuzhiyun #include "hinic_hw_qp.h"
29*4882a593Smuzhiyun #include "hinic_hw_dev.h"
30*4882a593Smuzhiyun #include "hinic_rx.h"
31*4882a593Smuzhiyun #include "hinic_dev.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define RX_IRQ_NO_PENDING               0
34*4882a593Smuzhiyun #define RX_IRQ_NO_COALESC               0
35*4882a593Smuzhiyun #define RX_IRQ_NO_LLI_TIMER             0
36*4882a593Smuzhiyun #define RX_IRQ_NO_CREDIT                0
37*4882a593Smuzhiyun #define RX_IRQ_NO_RESEND_TIMER          0
38*4882a593Smuzhiyun #define HINIC_RX_BUFFER_WRITE           16
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define HINIC_RX_IPV6_PKT		7
41*4882a593Smuzhiyun #define LRO_PKT_HDR_LEN_IPV4		66
42*4882a593Smuzhiyun #define LRO_PKT_HDR_LEN_IPV6		86
43*4882a593Smuzhiyun #define LRO_REPLENISH_THLD		256
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define LRO_PKT_HDR_LEN(cqe)		\
46*4882a593Smuzhiyun 	(HINIC_GET_RX_PKT_TYPE(be32_to_cpu((cqe)->offload_type)) == \
47*4882a593Smuzhiyun 	 HINIC_RX_IPV6_PKT ? LRO_PKT_HDR_LEN_IPV6 : LRO_PKT_HDR_LEN_IPV4)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun  * hinic_rxq_clean_stats - Clean the statistics of specific queue
51*4882a593Smuzhiyun  * @rxq: Logical Rx Queue
52*4882a593Smuzhiyun  **/
hinic_rxq_clean_stats(struct hinic_rxq * rxq)53*4882a593Smuzhiyun void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	u64_stats_update_begin(&rxq_stats->syncp);
58*4882a593Smuzhiyun 	rxq_stats->pkts  = 0;
59*4882a593Smuzhiyun 	rxq_stats->bytes = 0;
60*4882a593Smuzhiyun 	rxq_stats->errors = 0;
61*4882a593Smuzhiyun 	rxq_stats->csum_errors = 0;
62*4882a593Smuzhiyun 	rxq_stats->other_errors = 0;
63*4882a593Smuzhiyun 	u64_stats_update_end(&rxq_stats->syncp);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /**
67*4882a593Smuzhiyun  * hinic_rxq_get_stats - get statistics of Rx Queue
68*4882a593Smuzhiyun  * @rxq: Logical Rx Queue
69*4882a593Smuzhiyun  * @stats: return updated stats here
70*4882a593Smuzhiyun  **/
hinic_rxq_get_stats(struct hinic_rxq * rxq,struct hinic_rxq_stats * stats)71*4882a593Smuzhiyun void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
74*4882a593Smuzhiyun 	unsigned int start;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	do {
77*4882a593Smuzhiyun 		start = u64_stats_fetch_begin_irq(&rxq_stats->syncp);
78*4882a593Smuzhiyun 		stats->pkts = rxq_stats->pkts;
79*4882a593Smuzhiyun 		stats->bytes = rxq_stats->bytes;
80*4882a593Smuzhiyun 		stats->errors = rxq_stats->csum_errors +
81*4882a593Smuzhiyun 				rxq_stats->other_errors;
82*4882a593Smuzhiyun 		stats->csum_errors = rxq_stats->csum_errors;
83*4882a593Smuzhiyun 		stats->other_errors = rxq_stats->other_errors;
84*4882a593Smuzhiyun 	} while (u64_stats_fetch_retry_irq(&rxq_stats->syncp, start));
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun  * rxq_stats_init - Initialize the statistics of specific queue
89*4882a593Smuzhiyun  * @rxq: Logical Rx Queue
90*4882a593Smuzhiyun  **/
rxq_stats_init(struct hinic_rxq * rxq)91*4882a593Smuzhiyun static void rxq_stats_init(struct hinic_rxq *rxq)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	u64_stats_init(&rxq_stats->syncp);
96*4882a593Smuzhiyun 	hinic_rxq_clean_stats(rxq);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
rx_csum(struct hinic_rxq * rxq,u32 status,struct sk_buff * skb)99*4882a593Smuzhiyun static void rx_csum(struct hinic_rxq *rxq, u32 status,
100*4882a593Smuzhiyun 		    struct sk_buff *skb)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	struct net_device *netdev = rxq->netdev;
103*4882a593Smuzhiyun 	u32 csum_err;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (!(netdev->features & NETIF_F_RXCSUM))
108*4882a593Smuzhiyun 		return;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	if (!csum_err) {
111*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
112*4882a593Smuzhiyun 	} else {
113*4882a593Smuzhiyun 		if (!(csum_err & (HINIC_RX_CSUM_HW_CHECK_NONE |
114*4882a593Smuzhiyun 			HINIC_RX_CSUM_IPSU_OTHER_ERR)))
115*4882a593Smuzhiyun 			rxq->rxq_stats.csum_errors++;
116*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_NONE;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun /**
120*4882a593Smuzhiyun  * rx_alloc_skb - allocate skb and map it to dma address
121*4882a593Smuzhiyun  * @rxq: rx queue
122*4882a593Smuzhiyun  * @dma_addr: returned dma address for the skb
123*4882a593Smuzhiyun  *
124*4882a593Smuzhiyun  * Return skb
125*4882a593Smuzhiyun  **/
rx_alloc_skb(struct hinic_rxq * rxq,dma_addr_t * dma_addr)126*4882a593Smuzhiyun static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
127*4882a593Smuzhiyun 				    dma_addr_t *dma_addr)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
130*4882a593Smuzhiyun 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
131*4882a593Smuzhiyun 	struct hinic_hwif *hwif = hwdev->hwif;
132*4882a593Smuzhiyun 	struct pci_dev *pdev = hwif->pdev;
133*4882a593Smuzhiyun 	struct sk_buff *skb;
134*4882a593Smuzhiyun 	dma_addr_t addr;
135*4882a593Smuzhiyun 	int err;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
138*4882a593Smuzhiyun 	if (!skb) {
139*4882a593Smuzhiyun 		netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
140*4882a593Smuzhiyun 		return NULL;
141*4882a593Smuzhiyun 	}
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
144*4882a593Smuzhiyun 			      DMA_FROM_DEVICE);
145*4882a593Smuzhiyun 	err = dma_mapping_error(&pdev->dev, addr);
146*4882a593Smuzhiyun 	if (err) {
147*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
148*4882a593Smuzhiyun 		goto err_rx_map;
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	*dma_addr = addr;
152*4882a593Smuzhiyun 	return skb;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun err_rx_map:
155*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
156*4882a593Smuzhiyun 	return NULL;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun /**
160*4882a593Smuzhiyun  * rx_unmap_skb - unmap the dma address of the skb
161*4882a593Smuzhiyun  * @rxq: rx queue
162*4882a593Smuzhiyun  * @dma_addr: dma address of the skb
163*4882a593Smuzhiyun  **/
rx_unmap_skb(struct hinic_rxq * rxq,dma_addr_t dma_addr)164*4882a593Smuzhiyun static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
167*4882a593Smuzhiyun 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
168*4882a593Smuzhiyun 	struct hinic_hwif *hwif = hwdev->hwif;
169*4882a593Smuzhiyun 	struct pci_dev *pdev = hwif->pdev;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
172*4882a593Smuzhiyun 			 DMA_FROM_DEVICE);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun /**
176*4882a593Smuzhiyun  * rx_free_skb - unmap and free skb
177*4882a593Smuzhiyun  * @rxq: rx queue
178*4882a593Smuzhiyun  * @skb: skb to free
179*4882a593Smuzhiyun  * @dma_addr: dma address of the skb
180*4882a593Smuzhiyun  **/
rx_free_skb(struct hinic_rxq * rxq,struct sk_buff * skb,dma_addr_t dma_addr)181*4882a593Smuzhiyun static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
182*4882a593Smuzhiyun 			dma_addr_t dma_addr)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	rx_unmap_skb(rxq, dma_addr);
185*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun /**
189*4882a593Smuzhiyun  * rx_alloc_pkts - allocate pkts in rx queue
190*4882a593Smuzhiyun  * @rxq: rx queue
191*4882a593Smuzhiyun  *
192*4882a593Smuzhiyun  * Return number of skbs allocated
193*4882a593Smuzhiyun  **/
rx_alloc_pkts(struct hinic_rxq * rxq)194*4882a593Smuzhiyun static int rx_alloc_pkts(struct hinic_rxq *rxq)
195*4882a593Smuzhiyun {
196*4882a593Smuzhiyun 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
197*4882a593Smuzhiyun 	struct hinic_rq_wqe *rq_wqe;
198*4882a593Smuzhiyun 	unsigned int free_wqebbs;
199*4882a593Smuzhiyun 	struct hinic_sge sge;
200*4882a593Smuzhiyun 	dma_addr_t dma_addr;
201*4882a593Smuzhiyun 	struct sk_buff *skb;
202*4882a593Smuzhiyun 	u16 prod_idx;
203*4882a593Smuzhiyun 	int i;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Limit the allocation chunks */
208*4882a593Smuzhiyun 	if (free_wqebbs > nic_dev->rx_weight)
209*4882a593Smuzhiyun 		free_wqebbs = nic_dev->rx_weight;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	for (i = 0; i < free_wqebbs; i++) {
212*4882a593Smuzhiyun 		skb = rx_alloc_skb(rxq, &dma_addr);
213*4882a593Smuzhiyun 		if (!skb) {
214*4882a593Smuzhiyun 			netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
215*4882a593Smuzhiyun 			goto skb_out;
216*4882a593Smuzhiyun 		}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 		hinic_set_sge(&sge, dma_addr, skb->len);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
221*4882a593Smuzhiyun 					  &prod_idx);
222*4882a593Smuzhiyun 		if (!rq_wqe) {
223*4882a593Smuzhiyun 			rx_free_skb(rxq, skb, dma_addr);
224*4882a593Smuzhiyun 			goto skb_out;
225*4882a593Smuzhiyun 		}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 		hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun skb_out:
233*4882a593Smuzhiyun 	if (i) {
234*4882a593Smuzhiyun 		wmb();  /* write all the wqes before update PI */
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		hinic_rq_update(rxq->rq, prod_idx);
237*4882a593Smuzhiyun 	}
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	return i;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /**
243*4882a593Smuzhiyun  * free_all_rx_skbs - free all skbs in rx queue
244*4882a593Smuzhiyun  * @rxq: rx queue
245*4882a593Smuzhiyun  **/
free_all_rx_skbs(struct hinic_rxq * rxq)246*4882a593Smuzhiyun static void free_all_rx_skbs(struct hinic_rxq *rxq)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct hinic_rq *rq = rxq->rq;
249*4882a593Smuzhiyun 	struct hinic_hw_wqe *hw_wqe;
250*4882a593Smuzhiyun 	struct hinic_sge sge;
251*4882a593Smuzhiyun 	u16 ci;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
254*4882a593Smuzhiyun 		if (IS_ERR(hw_wqe))
255*4882a593Smuzhiyun 			break;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 		hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 		rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun  * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
267*4882a593Smuzhiyun  * @rxq: rx queue
268*4882a593Smuzhiyun  * @head_skb: the first skb in the list
269*4882a593Smuzhiyun  * @left_pkt_len: left size of the pkt exclude head skb
270*4882a593Smuzhiyun  * @ci: consumer index
271*4882a593Smuzhiyun  *
272*4882a593Smuzhiyun  * Return number of wqes that used for the left of the pkt
273*4882a593Smuzhiyun  **/
rx_recv_jumbo_pkt(struct hinic_rxq * rxq,struct sk_buff * head_skb,unsigned int left_pkt_len,u16 ci)274*4882a593Smuzhiyun static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
275*4882a593Smuzhiyun 			     unsigned int left_pkt_len, u16 ci)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	struct sk_buff *skb, *curr_skb = head_skb;
278*4882a593Smuzhiyun 	struct hinic_rq_wqe *rq_wqe;
279*4882a593Smuzhiyun 	unsigned int curr_len;
280*4882a593Smuzhiyun 	struct hinic_sge sge;
281*4882a593Smuzhiyun 	int num_wqes = 0;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	while (left_pkt_len > 0) {
284*4882a593Smuzhiyun 		rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
285*4882a593Smuzhiyun 						&skb, &ci);
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 		num_wqes++;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 		prefetch(skb->data);
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 		curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
296*4882a593Smuzhiyun 			    left_pkt_len;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		left_pkt_len -= curr_len;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		__skb_put(skb, curr_len);
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		if (curr_skb == head_skb)
303*4882a593Smuzhiyun 			skb_shinfo(head_skb)->frag_list = skb;
304*4882a593Smuzhiyun 		else
305*4882a593Smuzhiyun 			curr_skb->next = skb;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 		head_skb->len += skb->len;
308*4882a593Smuzhiyun 		head_skb->data_len += skb->len;
309*4882a593Smuzhiyun 		head_skb->truesize += skb->truesize;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 		curr_skb = skb;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return num_wqes;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
hinic_copy_lp_data(struct hinic_dev * nic_dev,struct sk_buff * skb)317*4882a593Smuzhiyun static void hinic_copy_lp_data(struct hinic_dev *nic_dev,
318*4882a593Smuzhiyun 			       struct sk_buff *skb)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	struct net_device *netdev = nic_dev->netdev;
321*4882a593Smuzhiyun 	u8 *lb_buf = nic_dev->lb_test_rx_buf;
322*4882a593Smuzhiyun 	int lb_len = nic_dev->lb_pkt_len;
323*4882a593Smuzhiyun 	int pkt_offset, frag_len, i;
324*4882a593Smuzhiyun 	void *frag_data = NULL;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if (nic_dev->lb_test_rx_idx == LP_PKT_CNT) {
327*4882a593Smuzhiyun 		nic_dev->lb_test_rx_idx = 0;
328*4882a593Smuzhiyun 		netif_warn(nic_dev, drv, netdev, "Loopback test warning, receive too more test pkts\n");
329*4882a593Smuzhiyun 	}
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (skb->len != nic_dev->lb_pkt_len) {
332*4882a593Smuzhiyun 		netif_warn(nic_dev, drv, netdev, "Wrong packet length\n");
333*4882a593Smuzhiyun 		nic_dev->lb_test_rx_idx++;
334*4882a593Smuzhiyun 		return;
335*4882a593Smuzhiyun 	}
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	pkt_offset = nic_dev->lb_test_rx_idx * lb_len;
338*4882a593Smuzhiyun 	frag_len = (int)skb_headlen(skb);
339*4882a593Smuzhiyun 	memcpy(lb_buf + pkt_offset, skb->data, frag_len);
340*4882a593Smuzhiyun 	pkt_offset += frag_len;
341*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
342*4882a593Smuzhiyun 		frag_data = skb_frag_address(&skb_shinfo(skb)->frags[i]);
343*4882a593Smuzhiyun 		frag_len = (int)skb_frag_size(&skb_shinfo(skb)->frags[i]);
344*4882a593Smuzhiyun 		memcpy((lb_buf + pkt_offset), frag_data, frag_len);
345*4882a593Smuzhiyun 		pkt_offset += frag_len;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 	nic_dev->lb_test_rx_idx++;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun  * rxq_recv - Rx handler
352*4882a593Smuzhiyun  * @rxq: rx queue
353*4882a593Smuzhiyun  * @budget: maximum pkts to process
354*4882a593Smuzhiyun  *
355*4882a593Smuzhiyun  * Return number of pkts received
356*4882a593Smuzhiyun  **/
rxq_recv(struct hinic_rxq * rxq,int budget)357*4882a593Smuzhiyun static int rxq_recv(struct hinic_rxq *rxq, int budget)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun 	struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
360*4882a593Smuzhiyun 	struct net_device *netdev = rxq->netdev;
361*4882a593Smuzhiyun 	u64 pkt_len = 0, rx_bytes = 0;
362*4882a593Smuzhiyun 	struct hinic_rq *rq = rxq->rq;
363*4882a593Smuzhiyun 	struct hinic_rq_wqe *rq_wqe;
364*4882a593Smuzhiyun 	struct hinic_dev *nic_dev;
365*4882a593Smuzhiyun 	unsigned int free_wqebbs;
366*4882a593Smuzhiyun 	struct hinic_rq_cqe *cqe;
367*4882a593Smuzhiyun 	int num_wqes, pkts = 0;
368*4882a593Smuzhiyun 	struct hinic_sge sge;
369*4882a593Smuzhiyun 	unsigned int status;
370*4882a593Smuzhiyun 	struct sk_buff *skb;
371*4882a593Smuzhiyun 	u32 offload_type;
372*4882a593Smuzhiyun 	u16 ci, num_lro;
373*4882a593Smuzhiyun 	u16 num_wqe = 0;
374*4882a593Smuzhiyun 	u32 vlan_len;
375*4882a593Smuzhiyun 	u16 vid;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	nic_dev = netdev_priv(netdev);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	while (pkts < budget) {
380*4882a593Smuzhiyun 		num_wqes = 0;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 		rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
383*4882a593Smuzhiyun 					   &ci);
384*4882a593Smuzhiyun 		if (!rq_wqe)
385*4882a593Smuzhiyun 			break;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		/* make sure we read rx_done before packet length */
388*4882a593Smuzhiyun 		dma_rmb();
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		cqe = rq->cqe[ci];
391*4882a593Smuzhiyun 		status =  be32_to_cpu(cqe->status);
392*4882a593Smuzhiyun 		hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 		rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		rx_csum(rxq, status, skb);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 		prefetch(skb->data);
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 		pkt_len = sge.len;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 		if (pkt_len <= HINIC_RX_BUF_SZ) {
403*4882a593Smuzhiyun 			__skb_put(skb, pkt_len);
404*4882a593Smuzhiyun 		} else {
405*4882a593Smuzhiyun 			__skb_put(skb, HINIC_RX_BUF_SZ);
406*4882a593Smuzhiyun 			num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
407*4882a593Smuzhiyun 						     HINIC_RX_BUF_SZ, ci);
408*4882a593Smuzhiyun 		}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 		hinic_rq_put_wqe(rq, ci,
411*4882a593Smuzhiyun 				 (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 		offload_type = be32_to_cpu(cqe->offload_type);
414*4882a593Smuzhiyun 		vlan_len = be32_to_cpu(cqe->len);
415*4882a593Smuzhiyun 		if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
416*4882a593Smuzhiyun 		    HINIC_GET_RX_VLAN_OFFLOAD_EN(offload_type)) {
417*4882a593Smuzhiyun 			vid = HINIC_GET_RX_VLAN_TAG(vlan_len);
418*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid);
419*4882a593Smuzhiyun 		}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		if (unlikely(nic_dev->flags & HINIC_LP_TEST))
422*4882a593Smuzhiyun 			hinic_copy_lp_data(nic_dev, skb);
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		skb_record_rx_queue(skb, qp->q_id);
425*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, rxq->netdev);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 		napi_gro_receive(&rxq->napi, skb);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 		pkts++;
430*4882a593Smuzhiyun 		rx_bytes += pkt_len;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		num_lro = HINIC_GET_RX_NUM_LRO(status);
433*4882a593Smuzhiyun 		if (num_lro) {
434*4882a593Smuzhiyun 			rx_bytes += ((num_lro - 1) *
435*4882a593Smuzhiyun 				     LRO_PKT_HDR_LEN(cqe));
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 			num_wqe +=
438*4882a593Smuzhiyun 			(u16)(pkt_len >> rxq->rx_buff_shift) +
439*4882a593Smuzhiyun 			((pkt_len & (rxq->buf_len - 1)) ? 1 : 0);
440*4882a593Smuzhiyun 		}
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 		cqe->status = 0;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 		if (num_wqe >= LRO_REPLENISH_THLD)
445*4882a593Smuzhiyun 			break;
446*4882a593Smuzhiyun 	}
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
449*4882a593Smuzhiyun 	if (free_wqebbs > HINIC_RX_BUFFER_WRITE)
450*4882a593Smuzhiyun 		rx_alloc_pkts(rxq);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	u64_stats_update_begin(&rxq->rxq_stats.syncp);
453*4882a593Smuzhiyun 	rxq->rxq_stats.pkts += pkts;
454*4882a593Smuzhiyun 	rxq->rxq_stats.bytes += rx_bytes;
455*4882a593Smuzhiyun 	u64_stats_update_end(&rxq->rxq_stats.syncp);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	return pkts;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun 
rx_poll(struct napi_struct * napi,int budget)460*4882a593Smuzhiyun static int rx_poll(struct napi_struct *napi, int budget)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
463*4882a593Smuzhiyun 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
464*4882a593Smuzhiyun 	struct hinic_rq *rq = rxq->rq;
465*4882a593Smuzhiyun 	int pkts;
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	pkts = rxq_recv(rxq, budget);
468*4882a593Smuzhiyun 	if (pkts >= budget)
469*4882a593Smuzhiyun 		return budget;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	napi_complete(napi);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
474*4882a593Smuzhiyun 		hinic_hwdev_set_msix_state(nic_dev->hwdev,
475*4882a593Smuzhiyun 					   rq->msix_entry,
476*4882a593Smuzhiyun 					   HINIC_MSIX_ENABLE);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	return pkts;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
rx_add_napi(struct hinic_rxq * rxq)481*4882a593Smuzhiyun static void rx_add_napi(struct hinic_rxq *rxq)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
486*4882a593Smuzhiyun 	napi_enable(&rxq->napi);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun 
rx_del_napi(struct hinic_rxq * rxq)489*4882a593Smuzhiyun static void rx_del_napi(struct hinic_rxq *rxq)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	napi_disable(&rxq->napi);
492*4882a593Smuzhiyun 	netif_napi_del(&rxq->napi);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
rx_irq(int irq,void * data)495*4882a593Smuzhiyun static irqreturn_t rx_irq(int irq, void *data)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun 	struct hinic_rxq *rxq = (struct hinic_rxq *)data;
498*4882a593Smuzhiyun 	struct hinic_rq *rq = rxq->rq;
499*4882a593Smuzhiyun 	struct hinic_dev *nic_dev;
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 	/* Disable the interrupt until napi will be completed */
502*4882a593Smuzhiyun 	nic_dev = netdev_priv(rxq->netdev);
503*4882a593Smuzhiyun 	if (!HINIC_IS_VF(nic_dev->hwdev->hwif))
504*4882a593Smuzhiyun 		hinic_hwdev_set_msix_state(nic_dev->hwdev,
505*4882a593Smuzhiyun 					   rq->msix_entry,
506*4882a593Smuzhiyun 					   HINIC_MSIX_DISABLE);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	nic_dev = netdev_priv(rxq->netdev);
509*4882a593Smuzhiyun 	hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	napi_schedule(&rxq->napi);
512*4882a593Smuzhiyun 	return IRQ_HANDLED;
513*4882a593Smuzhiyun }
514*4882a593Smuzhiyun 
rx_request_irq(struct hinic_rxq * rxq)515*4882a593Smuzhiyun static int rx_request_irq(struct hinic_rxq *rxq)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
518*4882a593Smuzhiyun 	struct hinic_msix_config interrupt_info = {0};
519*4882a593Smuzhiyun 	struct hinic_intr_coal_info *intr_coal = NULL;
520*4882a593Smuzhiyun 	struct hinic_hwdev *hwdev = nic_dev->hwdev;
521*4882a593Smuzhiyun 	struct hinic_rq *rq = rxq->rq;
522*4882a593Smuzhiyun 	struct hinic_qp *qp;
523*4882a593Smuzhiyun 	int err;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	qp = container_of(rq, struct hinic_qp, rq);
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	rx_add_napi(rxq);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	hinic_hwdev_msix_set(hwdev, rq->msix_entry,
530*4882a593Smuzhiyun 			     RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
531*4882a593Smuzhiyun 			     RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
532*4882a593Smuzhiyun 			     RX_IRQ_NO_RESEND_TIMER);
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id];
535*4882a593Smuzhiyun 	interrupt_info.msix_index = rq->msix_entry;
536*4882a593Smuzhiyun 	interrupt_info.coalesce_timer_cnt = intr_coal->coalesce_timer_cfg;
537*4882a593Smuzhiyun 	interrupt_info.pending_cnt = intr_coal->pending_limt;
538*4882a593Smuzhiyun 	interrupt_info.resend_timer_cnt = intr_coal->resend_timer_cfg;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	err = hinic_set_interrupt_cfg(hwdev, &interrupt_info);
541*4882a593Smuzhiyun 	if (err) {
542*4882a593Smuzhiyun 		netif_err(nic_dev, drv, rxq->netdev,
543*4882a593Smuzhiyun 			  "Failed to set RX interrupt coalescing attribute\n");
544*4882a593Smuzhiyun 		goto err_req_irq;
545*4882a593Smuzhiyun 	}
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
548*4882a593Smuzhiyun 	if (err)
549*4882a593Smuzhiyun 		goto err_req_irq;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
552*4882a593Smuzhiyun 	err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
553*4882a593Smuzhiyun 	if (err)
554*4882a593Smuzhiyun 		goto err_irq_affinity;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	return 0;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun err_irq_affinity:
559*4882a593Smuzhiyun 	free_irq(rq->irq, rxq);
560*4882a593Smuzhiyun err_req_irq:
561*4882a593Smuzhiyun 	rx_del_napi(rxq);
562*4882a593Smuzhiyun 	return err;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
rx_free_irq(struct hinic_rxq * rxq)565*4882a593Smuzhiyun static void rx_free_irq(struct hinic_rxq *rxq)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct hinic_rq *rq = rxq->rq;
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	irq_set_affinity_hint(rq->irq, NULL);
570*4882a593Smuzhiyun 	free_irq(rq->irq, rxq);
571*4882a593Smuzhiyun 	rx_del_napi(rxq);
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun /**
575*4882a593Smuzhiyun  * hinic_init_rxq - Initialize the Rx Queue
576*4882a593Smuzhiyun  * @rxq: Logical Rx Queue
577*4882a593Smuzhiyun  * @rq: Hardware Rx Queue to connect the Logical queue with
578*4882a593Smuzhiyun  * @netdev: network device to connect the Logical queue with
579*4882a593Smuzhiyun  *
580*4882a593Smuzhiyun  * Return 0 - Success, negative - Failure
581*4882a593Smuzhiyun  **/
hinic_init_rxq(struct hinic_rxq * rxq,struct hinic_rq * rq,struct net_device * netdev)582*4882a593Smuzhiyun int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
583*4882a593Smuzhiyun 		   struct net_device *netdev)
584*4882a593Smuzhiyun {
585*4882a593Smuzhiyun 	struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
586*4882a593Smuzhiyun 	int err, pkts;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	rxq->netdev = netdev;
589*4882a593Smuzhiyun 	rxq->rq = rq;
590*4882a593Smuzhiyun 	rxq->buf_len = HINIC_RX_BUF_SZ;
591*4882a593Smuzhiyun 	rxq->rx_buff_shift = ilog2(HINIC_RX_BUF_SZ);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	rxq_stats_init(rxq);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	rxq->irq_name = devm_kasprintf(&netdev->dev, GFP_KERNEL,
596*4882a593Smuzhiyun 				       "%s_rxq%d", netdev->name, qp->q_id);
597*4882a593Smuzhiyun 	if (!rxq->irq_name)
598*4882a593Smuzhiyun 		return -ENOMEM;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	pkts = rx_alloc_pkts(rxq);
601*4882a593Smuzhiyun 	if (!pkts) {
602*4882a593Smuzhiyun 		err = -ENOMEM;
603*4882a593Smuzhiyun 		goto err_rx_pkts;
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	err = rx_request_irq(rxq);
607*4882a593Smuzhiyun 	if (err) {
608*4882a593Smuzhiyun 		netdev_err(netdev, "Failed to request Rx irq\n");
609*4882a593Smuzhiyun 		goto err_req_rx_irq;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	return 0;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun err_req_rx_irq:
615*4882a593Smuzhiyun err_rx_pkts:
616*4882a593Smuzhiyun 	free_all_rx_skbs(rxq);
617*4882a593Smuzhiyun 	devm_kfree(&netdev->dev, rxq->irq_name);
618*4882a593Smuzhiyun 	return err;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun /**
622*4882a593Smuzhiyun  * hinic_clean_rxq - Clean the Rx Queue
623*4882a593Smuzhiyun  * @rxq: Logical Rx Queue
624*4882a593Smuzhiyun  **/
hinic_clean_rxq(struct hinic_rxq * rxq)625*4882a593Smuzhiyun void hinic_clean_rxq(struct hinic_rxq *rxq)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	struct net_device *netdev = rxq->netdev;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	rx_free_irq(rxq);
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	free_all_rx_skbs(rxq);
632*4882a593Smuzhiyun 	devm_kfree(&netdev->dev, rxq->irq_name);
633*4882a593Smuzhiyun }
634