xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/mellanox/mlx4/en_rx.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #include <linux/bpf.h>
35*4882a593Smuzhiyun #include <linux/bpf_trace.h>
36*4882a593Smuzhiyun #include <linux/mlx4/cq.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <linux/mlx4/qp.h>
39*4882a593Smuzhiyun #include <linux/skbuff.h>
40*4882a593Smuzhiyun #include <linux/rculist.h>
41*4882a593Smuzhiyun #include <linux/if_ether.h>
42*4882a593Smuzhiyun #include <linux/if_vlan.h>
43*4882a593Smuzhiyun #include <linux/vmalloc.h>
44*4882a593Smuzhiyun #include <linux/irq.h>
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #include <net/ip.h>
47*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
48*4882a593Smuzhiyun #include <net/ip6_checksum.h>
49*4882a593Smuzhiyun #endif
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #include "mlx4_en.h"
52*4882a593Smuzhiyun 
mlx4_alloc_page(struct mlx4_en_priv * priv,struct mlx4_en_rx_alloc * frag,gfp_t gfp)53*4882a593Smuzhiyun static int mlx4_alloc_page(struct mlx4_en_priv *priv,
54*4882a593Smuzhiyun 			   struct mlx4_en_rx_alloc *frag,
55*4882a593Smuzhiyun 			   gfp_t gfp)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	struct page *page;
58*4882a593Smuzhiyun 	dma_addr_t dma;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	page = alloc_page(gfp);
61*4882a593Smuzhiyun 	if (unlikely(!page))
62*4882a593Smuzhiyun 		return -ENOMEM;
63*4882a593Smuzhiyun 	dma = dma_map_page(priv->ddev, page, 0, PAGE_SIZE, priv->dma_dir);
64*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(priv->ddev, dma))) {
65*4882a593Smuzhiyun 		__free_page(page);
66*4882a593Smuzhiyun 		return -ENOMEM;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 	frag->page = page;
69*4882a593Smuzhiyun 	frag->dma = dma;
70*4882a593Smuzhiyun 	frag->page_offset = priv->rx_headroom;
71*4882a593Smuzhiyun 	return 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
mlx4_en_alloc_frags(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,struct mlx4_en_rx_desc * rx_desc,struct mlx4_en_rx_alloc * frags,gfp_t gfp)74*4882a593Smuzhiyun static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv,
75*4882a593Smuzhiyun 			       struct mlx4_en_rx_ring *ring,
76*4882a593Smuzhiyun 			       struct mlx4_en_rx_desc *rx_desc,
77*4882a593Smuzhiyun 			       struct mlx4_en_rx_alloc *frags,
78*4882a593Smuzhiyun 			       gfp_t gfp)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun 	int i;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	for (i = 0; i < priv->num_frags; i++, frags++) {
83*4882a593Smuzhiyun 		if (!frags->page) {
84*4882a593Smuzhiyun 			if (mlx4_alloc_page(priv, frags, gfp))
85*4882a593Smuzhiyun 				return -ENOMEM;
86*4882a593Smuzhiyun 			ring->rx_alloc_pages++;
87*4882a593Smuzhiyun 		}
88*4882a593Smuzhiyun 		rx_desc->data[i].addr = cpu_to_be64(frags->dma +
89*4882a593Smuzhiyun 						    frags->page_offset);
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 	return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun 
mlx4_en_free_frag(const struct mlx4_en_priv * priv,struct mlx4_en_rx_alloc * frag)94*4882a593Smuzhiyun static void mlx4_en_free_frag(const struct mlx4_en_priv *priv,
95*4882a593Smuzhiyun 			      struct mlx4_en_rx_alloc *frag)
96*4882a593Smuzhiyun {
97*4882a593Smuzhiyun 	if (frag->page) {
98*4882a593Smuzhiyun 		dma_unmap_page(priv->ddev, frag->dma,
99*4882a593Smuzhiyun 			       PAGE_SIZE, priv->dma_dir);
100*4882a593Smuzhiyun 		__free_page(frag->page);
101*4882a593Smuzhiyun 	}
102*4882a593Smuzhiyun 	/* We need to clear all fields, otherwise a change of priv->log_rx_info
103*4882a593Smuzhiyun 	 * could lead to see garbage later in frag->page.
104*4882a593Smuzhiyun 	 */
105*4882a593Smuzhiyun 	memset(frag, 0, sizeof(*frag));
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
mlx4_en_init_rx_desc(const struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,int index)108*4882a593Smuzhiyun static void mlx4_en_init_rx_desc(const struct mlx4_en_priv *priv,
109*4882a593Smuzhiyun 				 struct mlx4_en_rx_ring *ring, int index)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct mlx4_en_rx_desc *rx_desc = ring->buf + ring->stride * index;
112*4882a593Smuzhiyun 	int possible_frags;
113*4882a593Smuzhiyun 	int i;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	/* Set size and memtype fields */
116*4882a593Smuzhiyun 	for (i = 0; i < priv->num_frags; i++) {
117*4882a593Smuzhiyun 		rx_desc->data[i].byte_count =
118*4882a593Smuzhiyun 			cpu_to_be32(priv->frag_info[i].frag_size);
119*4882a593Smuzhiyun 		rx_desc->data[i].lkey = cpu_to_be32(priv->mdev->mr.key);
120*4882a593Smuzhiyun 	}
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	/* If the number of used fragments does not fill up the ring stride,
123*4882a593Smuzhiyun 	 * remaining (unused) fragments must be padded with null address/size
124*4882a593Smuzhiyun 	 * and a special memory key */
125*4882a593Smuzhiyun 	possible_frags = (ring->stride - sizeof(struct mlx4_en_rx_desc)) / DS_SIZE;
126*4882a593Smuzhiyun 	for (i = priv->num_frags; i < possible_frags; i++) {
127*4882a593Smuzhiyun 		rx_desc->data[i].byte_count = 0;
128*4882a593Smuzhiyun 		rx_desc->data[i].lkey = cpu_to_be32(MLX4_EN_MEMTYPE_PAD);
129*4882a593Smuzhiyun 		rx_desc->data[i].addr = 0;
130*4882a593Smuzhiyun 	}
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
mlx4_en_prepare_rx_desc(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,int index,gfp_t gfp)133*4882a593Smuzhiyun static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
134*4882a593Smuzhiyun 				   struct mlx4_en_rx_ring *ring, int index,
135*4882a593Smuzhiyun 				   gfp_t gfp)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct mlx4_en_rx_desc *rx_desc = ring->buf +
138*4882a593Smuzhiyun 		(index << ring->log_stride);
139*4882a593Smuzhiyun 	struct mlx4_en_rx_alloc *frags = ring->rx_info +
140*4882a593Smuzhiyun 					(index << priv->log_rx_info);
141*4882a593Smuzhiyun 	if (likely(ring->page_cache.index > 0)) {
142*4882a593Smuzhiyun 		/* XDP uses a single page per frame */
143*4882a593Smuzhiyun 		if (!frags->page) {
144*4882a593Smuzhiyun 			ring->page_cache.index--;
145*4882a593Smuzhiyun 			frags->page = ring->page_cache.buf[ring->page_cache.index].page;
146*4882a593Smuzhiyun 			frags->dma  = ring->page_cache.buf[ring->page_cache.index].dma;
147*4882a593Smuzhiyun 		}
148*4882a593Smuzhiyun 		frags->page_offset = XDP_PACKET_HEADROOM;
149*4882a593Smuzhiyun 		rx_desc->data[0].addr = cpu_to_be64(frags->dma +
150*4882a593Smuzhiyun 						    XDP_PACKET_HEADROOM);
151*4882a593Smuzhiyun 		return 0;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return mlx4_en_alloc_frags(priv, ring, rx_desc, frags, gfp);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring * ring)157*4882a593Smuzhiyun static bool mlx4_en_is_ring_empty(const struct mlx4_en_rx_ring *ring)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	return ring->prod == ring->cons;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring * ring)162*4882a593Smuzhiyun static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* slow path */
mlx4_en_free_rx_desc(const struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring,int index)168*4882a593Smuzhiyun static void mlx4_en_free_rx_desc(const struct mlx4_en_priv *priv,
169*4882a593Smuzhiyun 				 struct mlx4_en_rx_ring *ring,
170*4882a593Smuzhiyun 				 int index)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	struct mlx4_en_rx_alloc *frags;
173*4882a593Smuzhiyun 	int nr;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	frags = ring->rx_info + (index << priv->log_rx_info);
176*4882a593Smuzhiyun 	for (nr = 0; nr < priv->num_frags; nr++) {
177*4882a593Smuzhiyun 		en_dbg(DRV, priv, "Freeing fragment:%d\n", nr);
178*4882a593Smuzhiyun 		mlx4_en_free_frag(priv, frags + nr);
179*4882a593Smuzhiyun 	}
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /* Function not in fast-path */
mlx4_en_fill_rx_buffers(struct mlx4_en_priv * priv)183*4882a593Smuzhiyun static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct mlx4_en_rx_ring *ring;
186*4882a593Smuzhiyun 	int ring_ind;
187*4882a593Smuzhiyun 	int buf_ind;
188*4882a593Smuzhiyun 	int new_size;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) {
191*4882a593Smuzhiyun 		for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
192*4882a593Smuzhiyun 			ring = priv->rx_ring[ring_ind];
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 			if (mlx4_en_prepare_rx_desc(priv, ring,
195*4882a593Smuzhiyun 						    ring->actual_size,
196*4882a593Smuzhiyun 						    GFP_KERNEL)) {
197*4882a593Smuzhiyun 				if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
198*4882a593Smuzhiyun 					en_err(priv, "Failed to allocate enough rx buffers\n");
199*4882a593Smuzhiyun 					return -ENOMEM;
200*4882a593Smuzhiyun 				} else {
201*4882a593Smuzhiyun 					new_size = rounddown_pow_of_two(ring->actual_size);
202*4882a593Smuzhiyun 					en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
203*4882a593Smuzhiyun 						ring->actual_size, new_size);
204*4882a593Smuzhiyun 					goto reduce_rings;
205*4882a593Smuzhiyun 				}
206*4882a593Smuzhiyun 			}
207*4882a593Smuzhiyun 			ring->actual_size++;
208*4882a593Smuzhiyun 			ring->prod++;
209*4882a593Smuzhiyun 		}
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	return 0;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun reduce_rings:
214*4882a593Smuzhiyun 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
215*4882a593Smuzhiyun 		ring = priv->rx_ring[ring_ind];
216*4882a593Smuzhiyun 		while (ring->actual_size > new_size) {
217*4882a593Smuzhiyun 			ring->actual_size--;
218*4882a593Smuzhiyun 			ring->prod--;
219*4882a593Smuzhiyun 			mlx4_en_free_rx_desc(priv, ring, ring->actual_size);
220*4882a593Smuzhiyun 		}
221*4882a593Smuzhiyun 	}
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	return 0;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
mlx4_en_free_rx_buf(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring)226*4882a593Smuzhiyun static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
227*4882a593Smuzhiyun 				struct mlx4_en_rx_ring *ring)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	int index;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	en_dbg(DRV, priv, "Freeing Rx buf - cons:%d prod:%d\n",
232*4882a593Smuzhiyun 	       ring->cons, ring->prod);
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Unmap and free Rx buffers */
235*4882a593Smuzhiyun 	for (index = 0; index < ring->size; index++) {
236*4882a593Smuzhiyun 		en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
237*4882a593Smuzhiyun 		mlx4_en_free_rx_desc(priv, ring, index);
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 	ring->cons = 0;
240*4882a593Smuzhiyun 	ring->prod = 0;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun 
mlx4_en_set_num_rx_rings(struct mlx4_en_dev * mdev)243*4882a593Smuzhiyun void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun 	int i;
246*4882a593Smuzhiyun 	int num_of_eqs;
247*4882a593Smuzhiyun 	int num_rx_rings;
248*4882a593Smuzhiyun 	struct mlx4_dev *dev = mdev->dev;
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
251*4882a593Smuzhiyun 		num_of_eqs = max_t(int, MIN_RX_RINGS,
252*4882a593Smuzhiyun 				   min_t(int,
253*4882a593Smuzhiyun 					 mlx4_get_eqs_per_port(mdev->dev, i),
254*4882a593Smuzhiyun 					 DEF_RX_RINGS));
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
257*4882a593Smuzhiyun 			min_t(int, num_of_eqs, num_online_cpus());
258*4882a593Smuzhiyun 		mdev->profile.prof[i].rx_ring_num =
259*4882a593Smuzhiyun 			rounddown_pow_of_two(num_rx_rings);
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
mlx4_en_create_rx_ring(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring ** pring,u32 size,u16 stride,int node,int queue_index)263*4882a593Smuzhiyun int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
264*4882a593Smuzhiyun 			   struct mlx4_en_rx_ring **pring,
265*4882a593Smuzhiyun 			   u32 size, u16 stride, int node, int queue_index)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct mlx4_en_dev *mdev = priv->mdev;
268*4882a593Smuzhiyun 	struct mlx4_en_rx_ring *ring;
269*4882a593Smuzhiyun 	int err = -ENOMEM;
270*4882a593Smuzhiyun 	int tmp;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node);
273*4882a593Smuzhiyun 	if (!ring) {
274*4882a593Smuzhiyun 		en_err(priv, "Failed to allocate RX ring structure\n");
275*4882a593Smuzhiyun 		return -ENOMEM;
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	ring->prod = 0;
279*4882a593Smuzhiyun 	ring->cons = 0;
280*4882a593Smuzhiyun 	ring->size = size;
281*4882a593Smuzhiyun 	ring->size_mask = size - 1;
282*4882a593Smuzhiyun 	ring->stride = stride;
283*4882a593Smuzhiyun 	ring->log_stride = ffs(ring->stride) - 1;
284*4882a593Smuzhiyun 	ring->buf_size = ring->size * ring->stride + TXBB_SIZE;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	if (xdp_rxq_info_reg(&ring->xdp_rxq, priv->dev, queue_index) < 0)
287*4882a593Smuzhiyun 		goto err_ring;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	tmp = size * roundup_pow_of_two(MLX4_EN_MAX_RX_FRAGS *
290*4882a593Smuzhiyun 					sizeof(struct mlx4_en_rx_alloc));
291*4882a593Smuzhiyun 	ring->rx_info = kvzalloc_node(tmp, GFP_KERNEL, node);
292*4882a593Smuzhiyun 	if (!ring->rx_info) {
293*4882a593Smuzhiyun 		err = -ENOMEM;
294*4882a593Smuzhiyun 		goto err_xdp_info;
295*4882a593Smuzhiyun 	}
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	en_dbg(DRV, priv, "Allocated rx_info ring at addr:%p size:%d\n",
298*4882a593Smuzhiyun 		 ring->rx_info, tmp);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Allocate HW buffers on provided NUMA node */
301*4882a593Smuzhiyun 	set_dev_node(&mdev->dev->persist->pdev->dev, node);
302*4882a593Smuzhiyun 	err = mlx4_alloc_hwq_res(mdev->dev, &ring->wqres, ring->buf_size);
303*4882a593Smuzhiyun 	set_dev_node(&mdev->dev->persist->pdev->dev, mdev->dev->numa_node);
304*4882a593Smuzhiyun 	if (err)
305*4882a593Smuzhiyun 		goto err_info;
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	ring->buf = ring->wqres.buf.direct.buf;
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	ring->hwtstamp_rx_filter = priv->hwtstamp_config.rx_filter;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	*pring = ring;
312*4882a593Smuzhiyun 	return 0;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun err_info:
315*4882a593Smuzhiyun 	kvfree(ring->rx_info);
316*4882a593Smuzhiyun 	ring->rx_info = NULL;
317*4882a593Smuzhiyun err_xdp_info:
318*4882a593Smuzhiyun 	xdp_rxq_info_unreg(&ring->xdp_rxq);
319*4882a593Smuzhiyun err_ring:
320*4882a593Smuzhiyun 	kfree(ring);
321*4882a593Smuzhiyun 	*pring = NULL;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	return err;
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun 
mlx4_en_activate_rx_rings(struct mlx4_en_priv * priv)326*4882a593Smuzhiyun int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun 	struct mlx4_en_rx_ring *ring;
329*4882a593Smuzhiyun 	int i;
330*4882a593Smuzhiyun 	int ring_ind;
331*4882a593Smuzhiyun 	int err;
332*4882a593Smuzhiyun 	int stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
333*4882a593Smuzhiyun 					DS_SIZE * priv->num_frags);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
336*4882a593Smuzhiyun 		ring = priv->rx_ring[ring_ind];
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 		ring->prod = 0;
339*4882a593Smuzhiyun 		ring->cons = 0;
340*4882a593Smuzhiyun 		ring->actual_size = 0;
341*4882a593Smuzhiyun 		ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 		ring->stride = stride;
344*4882a593Smuzhiyun 		if (ring->stride <= TXBB_SIZE) {
345*4882a593Smuzhiyun 			/* Stamp first unused send wqe */
346*4882a593Smuzhiyun 			__be32 *ptr = (__be32 *)ring->buf;
347*4882a593Smuzhiyun 			__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
348*4882a593Smuzhiyun 			*ptr = stamp;
349*4882a593Smuzhiyun 			/* Move pointer to start of rx section */
350*4882a593Smuzhiyun 			ring->buf += TXBB_SIZE;
351*4882a593Smuzhiyun 		}
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 		ring->log_stride = ffs(ring->stride) - 1;
354*4882a593Smuzhiyun 		ring->buf_size = ring->size * ring->stride;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 		memset(ring->buf, 0, ring->buf_size);
357*4882a593Smuzhiyun 		mlx4_en_update_rx_prod_db(ring);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		/* Initialize all descriptors */
360*4882a593Smuzhiyun 		for (i = 0; i < ring->size; i++)
361*4882a593Smuzhiyun 			mlx4_en_init_rx_desc(priv, ring, i);
362*4882a593Smuzhiyun 	}
363*4882a593Smuzhiyun 	err = mlx4_en_fill_rx_buffers(priv);
364*4882a593Smuzhiyun 	if (err)
365*4882a593Smuzhiyun 		goto err_buffers;
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) {
368*4882a593Smuzhiyun 		ring = priv->rx_ring[ring_ind];
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun 		ring->size_mask = ring->actual_size - 1;
371*4882a593Smuzhiyun 		mlx4_en_update_rx_prod_db(ring);
372*4882a593Smuzhiyun 	}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	return 0;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun err_buffers:
377*4882a593Smuzhiyun 	for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++)
378*4882a593Smuzhiyun 		mlx4_en_free_rx_buf(priv, priv->rx_ring[ring_ind]);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	ring_ind = priv->rx_ring_num - 1;
381*4882a593Smuzhiyun 	while (ring_ind >= 0) {
382*4882a593Smuzhiyun 		if (priv->rx_ring[ring_ind]->stride <= TXBB_SIZE)
383*4882a593Smuzhiyun 			priv->rx_ring[ring_ind]->buf -= TXBB_SIZE;
384*4882a593Smuzhiyun 		ring_ind--;
385*4882a593Smuzhiyun 	}
386*4882a593Smuzhiyun 	return err;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun /* We recover from out of memory by scheduling our napi poll
390*4882a593Smuzhiyun  * function (mlx4_en_process_cq), which tries to allocate
391*4882a593Smuzhiyun  * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
392*4882a593Smuzhiyun  */
mlx4_en_recover_from_oom(struct mlx4_en_priv * priv)393*4882a593Smuzhiyun void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun 	int ring;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (!priv->port_up)
398*4882a593Smuzhiyun 		return;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	for (ring = 0; ring < priv->rx_ring_num; ring++) {
401*4882a593Smuzhiyun 		if (mlx4_en_is_ring_empty(priv->rx_ring[ring])) {
402*4882a593Smuzhiyun 			local_bh_disable();
403*4882a593Smuzhiyun 			napi_reschedule(&priv->rx_cq[ring]->napi);
404*4882a593Smuzhiyun 			local_bh_enable();
405*4882a593Smuzhiyun 		}
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun /* When the rx ring is running in page-per-packet mode, a released frame can go
410*4882a593Smuzhiyun  * directly into a small cache, to avoid unmapping or touching the page
411*4882a593Smuzhiyun  * allocator. In bpf prog performance scenarios, buffers are either forwarded
412*4882a593Smuzhiyun  * or dropped, never converted to skbs, so every page can come directly from
413*4882a593Smuzhiyun  * this cache when it is sized to be a multiple of the napi budget.
414*4882a593Smuzhiyun  */
mlx4_en_rx_recycle(struct mlx4_en_rx_ring * ring,struct mlx4_en_rx_alloc * frame)415*4882a593Smuzhiyun bool mlx4_en_rx_recycle(struct mlx4_en_rx_ring *ring,
416*4882a593Smuzhiyun 			struct mlx4_en_rx_alloc *frame)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct mlx4_en_page_cache *cache = &ring->page_cache;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (cache->index >= MLX4_EN_CACHE_SIZE)
421*4882a593Smuzhiyun 		return false;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	cache->buf[cache->index].page = frame->page;
424*4882a593Smuzhiyun 	cache->buf[cache->index].dma = frame->dma;
425*4882a593Smuzhiyun 	cache->index++;
426*4882a593Smuzhiyun 	return true;
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
mlx4_en_destroy_rx_ring(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring ** pring,u32 size,u16 stride)429*4882a593Smuzhiyun void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
430*4882a593Smuzhiyun 			     struct mlx4_en_rx_ring **pring,
431*4882a593Smuzhiyun 			     u32 size, u16 stride)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct mlx4_en_dev *mdev = priv->mdev;
434*4882a593Smuzhiyun 	struct mlx4_en_rx_ring *ring = *pring;
435*4882a593Smuzhiyun 	struct bpf_prog *old_prog;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	old_prog = rcu_dereference_protected(
438*4882a593Smuzhiyun 					ring->xdp_prog,
439*4882a593Smuzhiyun 					lockdep_is_held(&mdev->state_lock));
440*4882a593Smuzhiyun 	if (old_prog)
441*4882a593Smuzhiyun 		bpf_prog_put(old_prog);
442*4882a593Smuzhiyun 	xdp_rxq_info_unreg(&ring->xdp_rxq);
443*4882a593Smuzhiyun 	mlx4_free_hwq_res(mdev->dev, &ring->wqres, size * stride + TXBB_SIZE);
444*4882a593Smuzhiyun 	kvfree(ring->rx_info);
445*4882a593Smuzhiyun 	ring->rx_info = NULL;
446*4882a593Smuzhiyun 	kfree(ring);
447*4882a593Smuzhiyun 	*pring = NULL;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun 
mlx4_en_deactivate_rx_ring(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring)450*4882a593Smuzhiyun void mlx4_en_deactivate_rx_ring(struct mlx4_en_priv *priv,
451*4882a593Smuzhiyun 				struct mlx4_en_rx_ring *ring)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun 	int i;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	for (i = 0; i < ring->page_cache.index; i++) {
456*4882a593Smuzhiyun 		dma_unmap_page(priv->ddev, ring->page_cache.buf[i].dma,
457*4882a593Smuzhiyun 			       PAGE_SIZE, priv->dma_dir);
458*4882a593Smuzhiyun 		put_page(ring->page_cache.buf[i].page);
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 	ring->page_cache.index = 0;
461*4882a593Smuzhiyun 	mlx4_en_free_rx_buf(priv, ring);
462*4882a593Smuzhiyun 	if (ring->stride <= TXBB_SIZE)
463*4882a593Smuzhiyun 		ring->buf -= TXBB_SIZE;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 
mlx4_en_complete_rx_desc(struct mlx4_en_priv * priv,struct mlx4_en_rx_alloc * frags,struct sk_buff * skb,int length)467*4882a593Smuzhiyun static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
468*4882a593Smuzhiyun 				    struct mlx4_en_rx_alloc *frags,
469*4882a593Smuzhiyun 				    struct sk_buff *skb,
470*4882a593Smuzhiyun 				    int length)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	const struct mlx4_en_frag_info *frag_info = priv->frag_info;
473*4882a593Smuzhiyun 	unsigned int truesize = 0;
474*4882a593Smuzhiyun 	bool release = true;
475*4882a593Smuzhiyun 	int nr, frag_size;
476*4882a593Smuzhiyun 	struct page *page;
477*4882a593Smuzhiyun 	dma_addr_t dma;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	/* Collect used fragments while replacing them in the HW descriptors */
480*4882a593Smuzhiyun 	for (nr = 0;; frags++) {
481*4882a593Smuzhiyun 		frag_size = min_t(int, length, frag_info->frag_size);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 		page = frags->page;
484*4882a593Smuzhiyun 		if (unlikely(!page))
485*4882a593Smuzhiyun 			goto fail;
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 		dma = frags->dma;
488*4882a593Smuzhiyun 		dma_sync_single_range_for_cpu(priv->ddev, dma, frags->page_offset,
489*4882a593Smuzhiyun 					      frag_size, priv->dma_dir);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		__skb_fill_page_desc(skb, nr, page, frags->page_offset,
492*4882a593Smuzhiyun 				     frag_size);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun 		truesize += frag_info->frag_stride;
495*4882a593Smuzhiyun 		if (frag_info->frag_stride == PAGE_SIZE / 2) {
496*4882a593Smuzhiyun 			frags->page_offset ^= PAGE_SIZE / 2;
497*4882a593Smuzhiyun 			release = page_count(page) != 1 ||
498*4882a593Smuzhiyun 				  page_is_pfmemalloc(page) ||
499*4882a593Smuzhiyun 				  page_to_nid(page) != numa_mem_id();
500*4882a593Smuzhiyun 		} else if (!priv->rx_headroom) {
501*4882a593Smuzhiyun 			/* rx_headroom for non XDP setup is always 0.
502*4882a593Smuzhiyun 			 * When XDP is set, the above condition will
503*4882a593Smuzhiyun 			 * guarantee page is always released.
504*4882a593Smuzhiyun 			 */
505*4882a593Smuzhiyun 			u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 			frags->page_offset += sz_align;
508*4882a593Smuzhiyun 			release = frags->page_offset + frag_info->frag_size > PAGE_SIZE;
509*4882a593Smuzhiyun 		}
510*4882a593Smuzhiyun 		if (release) {
511*4882a593Smuzhiyun 			dma_unmap_page(priv->ddev, dma, PAGE_SIZE, priv->dma_dir);
512*4882a593Smuzhiyun 			frags->page = NULL;
513*4882a593Smuzhiyun 		} else {
514*4882a593Smuzhiyun 			page_ref_inc(page);
515*4882a593Smuzhiyun 		}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 		nr++;
518*4882a593Smuzhiyun 		length -= frag_size;
519*4882a593Smuzhiyun 		if (!length)
520*4882a593Smuzhiyun 			break;
521*4882a593Smuzhiyun 		frag_info++;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 	skb->truesize += truesize;
524*4882a593Smuzhiyun 	return nr;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun fail:
527*4882a593Smuzhiyun 	while (nr > 0) {
528*4882a593Smuzhiyun 		nr--;
529*4882a593Smuzhiyun 		__skb_frag_unref(skb_shinfo(skb)->frags + nr);
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 	return 0;
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun 
validate_loopback(struct mlx4_en_priv * priv,void * va)534*4882a593Smuzhiyun static void validate_loopback(struct mlx4_en_priv *priv, void *va)
535*4882a593Smuzhiyun {
536*4882a593Smuzhiyun 	const unsigned char *data = va + ETH_HLEN;
537*4882a593Smuzhiyun 	int i;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	for (i = 0; i < MLX4_LOOPBACK_TEST_PAYLOAD; i++) {
540*4882a593Smuzhiyun 		if (data[i] != (unsigned char)i)
541*4882a593Smuzhiyun 			return;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 	/* Loopback found */
544*4882a593Smuzhiyun 	priv->loopback_ok = 1;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
mlx4_en_refill_rx_buffers(struct mlx4_en_priv * priv,struct mlx4_en_rx_ring * ring)547*4882a593Smuzhiyun static void mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
548*4882a593Smuzhiyun 				      struct mlx4_en_rx_ring *ring)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	u32 missing = ring->actual_size - (ring->prod - ring->cons);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	/* Try to batch allocations, but not too much. */
553*4882a593Smuzhiyun 	if (missing < 8)
554*4882a593Smuzhiyun 		return;
555*4882a593Smuzhiyun 	do {
556*4882a593Smuzhiyun 		if (mlx4_en_prepare_rx_desc(priv, ring,
557*4882a593Smuzhiyun 					    ring->prod & ring->size_mask,
558*4882a593Smuzhiyun 					    GFP_ATOMIC | __GFP_MEMALLOC))
559*4882a593Smuzhiyun 			break;
560*4882a593Smuzhiyun 		ring->prod++;
561*4882a593Smuzhiyun 	} while (likely(--missing));
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	mlx4_en_update_rx_prod_db(ring);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun /* When hardware doesn't strip the vlan, we need to calculate the checksum
567*4882a593Smuzhiyun  * over it and add it to the hardware's checksum calculation
568*4882a593Smuzhiyun  */
get_fixed_vlan_csum(__wsum hw_checksum,struct vlan_hdr * vlanh)569*4882a593Smuzhiyun static inline __wsum get_fixed_vlan_csum(__wsum hw_checksum,
570*4882a593Smuzhiyun 					 struct vlan_hdr *vlanh)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun 	return csum_add(hw_checksum, *(__wsum *)vlanh);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun /* Although the stack expects checksum which doesn't include the pseudo
576*4882a593Smuzhiyun  * header, the HW adds it. To address that, we are subtracting the pseudo
577*4882a593Smuzhiyun  * header checksum from the checksum value provided by the HW.
578*4882a593Smuzhiyun  */
get_fixed_ipv4_csum(__wsum hw_checksum,struct sk_buff * skb,struct iphdr * iph)579*4882a593Smuzhiyun static int get_fixed_ipv4_csum(__wsum hw_checksum, struct sk_buff *skb,
580*4882a593Smuzhiyun 			       struct iphdr *iph)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	__u16 length_for_csum = 0;
583*4882a593Smuzhiyun 	__wsum csum_pseudo_header = 0;
584*4882a593Smuzhiyun 	__u8 ipproto = iph->protocol;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (unlikely(ipproto == IPPROTO_SCTP))
587*4882a593Smuzhiyun 		return -1;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	length_for_csum = (be16_to_cpu(iph->tot_len) - (iph->ihl << 2));
590*4882a593Smuzhiyun 	csum_pseudo_header = csum_tcpudp_nofold(iph->saddr, iph->daddr,
591*4882a593Smuzhiyun 						length_for_csum, ipproto, 0);
592*4882a593Smuzhiyun 	skb->csum = csum_sub(hw_checksum, csum_pseudo_header);
593*4882a593Smuzhiyun 	return 0;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
597*4882a593Smuzhiyun /* In IPv6 packets, hw_checksum lacks 6 bytes from IPv6 header:
598*4882a593Smuzhiyun  * 4 first bytes : priority, version, flow_lbl
599*4882a593Smuzhiyun  * and 2 additional bytes : nexthdr, hop_limit.
600*4882a593Smuzhiyun  */
get_fixed_ipv6_csum(__wsum hw_checksum,struct sk_buff * skb,struct ipv6hdr * ipv6h)601*4882a593Smuzhiyun static int get_fixed_ipv6_csum(__wsum hw_checksum, struct sk_buff *skb,
602*4882a593Smuzhiyun 			       struct ipv6hdr *ipv6h)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	__u8 nexthdr = ipv6h->nexthdr;
605*4882a593Smuzhiyun 	__wsum temp;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	if (unlikely(nexthdr == IPPROTO_FRAGMENT ||
608*4882a593Smuzhiyun 		     nexthdr == IPPROTO_HOPOPTS ||
609*4882a593Smuzhiyun 		     nexthdr == IPPROTO_SCTP))
610*4882a593Smuzhiyun 		return -1;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	/* priority, version, flow_lbl */
613*4882a593Smuzhiyun 	temp = csum_add(hw_checksum, *(__wsum *)ipv6h);
614*4882a593Smuzhiyun 	/* nexthdr and hop_limit */
615*4882a593Smuzhiyun 	skb->csum = csum_add(temp, (__force __wsum)*(__be16 *)&ipv6h->nexthdr);
616*4882a593Smuzhiyun 	return 0;
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun #endif
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun /* We reach this function only after checking that any of
623*4882a593Smuzhiyun  * the (IPv4 | IPv6) bits are set in cqe->status.
624*4882a593Smuzhiyun  */
check_csum(struct mlx4_cqe * cqe,struct sk_buff * skb,void * va,netdev_features_t dev_features)625*4882a593Smuzhiyun static int check_csum(struct mlx4_cqe *cqe, struct sk_buff *skb, void *va,
626*4882a593Smuzhiyun 		      netdev_features_t dev_features)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	__wsum hw_checksum = 0;
629*4882a593Smuzhiyun 	void *hdr;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	/* CQE csum doesn't cover padding octets in short ethernet
632*4882a593Smuzhiyun 	 * frames. And the pad field is appended prior to calculating
633*4882a593Smuzhiyun 	 * and appending the FCS field.
634*4882a593Smuzhiyun 	 *
635*4882a593Smuzhiyun 	 * Detecting these padded frames requires to verify and parse
636*4882a593Smuzhiyun 	 * IP headers, so we simply force all those small frames to skip
637*4882a593Smuzhiyun 	 * checksum complete.
638*4882a593Smuzhiyun 	 */
639*4882a593Smuzhiyun 	if (short_frame(skb->len))
640*4882a593Smuzhiyun 		return -EINVAL;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	hdr = (u8 *)va + sizeof(struct ethhdr);
643*4882a593Smuzhiyun 	hw_checksum = csum_unfold((__force __sum16)cqe->checksum);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	if (cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK) &&
646*4882a593Smuzhiyun 	    !(dev_features & NETIF_F_HW_VLAN_CTAG_RX)) {
647*4882a593Smuzhiyun 		hw_checksum = get_fixed_vlan_csum(hw_checksum, hdr);
648*4882a593Smuzhiyun 		hdr += sizeof(struct vlan_hdr);
649*4882a593Smuzhiyun 	}
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
652*4882a593Smuzhiyun 	if (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPV6))
653*4882a593Smuzhiyun 		return get_fixed_ipv6_csum(hw_checksum, skb, hdr);
654*4882a593Smuzhiyun #endif
655*4882a593Smuzhiyun 	return get_fixed_ipv4_csum(hw_checksum, skb, hdr);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_IPV6)
659*4882a593Smuzhiyun #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4 | MLX4_CQE_STATUS_IPV6)
660*4882a593Smuzhiyun #else
661*4882a593Smuzhiyun #define MLX4_CQE_STATUS_IP_ANY (MLX4_CQE_STATUS_IPV4)
662*4882a593Smuzhiyun #endif
663*4882a593Smuzhiyun 
mlx4_en_process_rx_cq(struct net_device * dev,struct mlx4_en_cq * cq,int budget)664*4882a593Smuzhiyun int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int budget)
665*4882a593Smuzhiyun {
666*4882a593Smuzhiyun 	struct mlx4_en_priv *priv = netdev_priv(dev);
667*4882a593Smuzhiyun 	int factor = priv->cqe_factor;
668*4882a593Smuzhiyun 	struct mlx4_en_rx_ring *ring;
669*4882a593Smuzhiyun 	struct bpf_prog *xdp_prog;
670*4882a593Smuzhiyun 	int cq_ring = cq->ring;
671*4882a593Smuzhiyun 	bool doorbell_pending;
672*4882a593Smuzhiyun 	struct mlx4_cqe *cqe;
673*4882a593Smuzhiyun 	struct xdp_buff xdp;
674*4882a593Smuzhiyun 	int polled = 0;
675*4882a593Smuzhiyun 	int index;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	if (unlikely(!priv->port_up || budget <= 0))
678*4882a593Smuzhiyun 		return 0;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	ring = priv->rx_ring[cq_ring];
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	/* Protect accesses to: ring->xdp_prog, priv->mac_hash list */
683*4882a593Smuzhiyun 	rcu_read_lock();
684*4882a593Smuzhiyun 	xdp_prog = rcu_dereference(ring->xdp_prog);
685*4882a593Smuzhiyun 	xdp.rxq = &ring->xdp_rxq;
686*4882a593Smuzhiyun 	xdp.frame_sz = priv->frag_info[0].frag_stride;
687*4882a593Smuzhiyun 	doorbell_pending = 0;
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 	/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
690*4882a593Smuzhiyun 	 * descriptor offset can be deduced from the CQE index instead of
691*4882a593Smuzhiyun 	 * reading 'cqe->index' */
692*4882a593Smuzhiyun 	index = cq->mcq.cons_index & ring->size_mask;
693*4882a593Smuzhiyun 	cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	/* Process all completed CQEs */
696*4882a593Smuzhiyun 	while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
697*4882a593Smuzhiyun 		    cq->mcq.cons_index & cq->size)) {
698*4882a593Smuzhiyun 		struct mlx4_en_rx_alloc *frags;
699*4882a593Smuzhiyun 		enum pkt_hash_types hash_type;
700*4882a593Smuzhiyun 		struct sk_buff *skb;
701*4882a593Smuzhiyun 		unsigned int length;
702*4882a593Smuzhiyun 		int ip_summed;
703*4882a593Smuzhiyun 		void *va;
704*4882a593Smuzhiyun 		int nr;
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 		frags = ring->rx_info + (index << priv->log_rx_info);
707*4882a593Smuzhiyun 		va = page_address(frags[0].page) + frags[0].page_offset;
708*4882a593Smuzhiyun 		net_prefetchw(va);
709*4882a593Smuzhiyun 		/*
710*4882a593Smuzhiyun 		 * make sure we read the CQE after we read the ownership bit
711*4882a593Smuzhiyun 		 */
712*4882a593Smuzhiyun 		dma_rmb();
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		/* Drop packet on bad receive or bad checksum */
715*4882a593Smuzhiyun 		if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
716*4882a593Smuzhiyun 						MLX4_CQE_OPCODE_ERROR)) {
717*4882a593Smuzhiyun 			en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
718*4882a593Smuzhiyun 			       ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
719*4882a593Smuzhiyun 			       ((struct mlx4_err_cqe *)cqe)->syndrome);
720*4882a593Smuzhiyun 			goto next;
721*4882a593Smuzhiyun 		}
722*4882a593Smuzhiyun 		if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
723*4882a593Smuzhiyun 			en_dbg(RX_ERR, priv, "Accepted frame with bad FCS\n");
724*4882a593Smuzhiyun 			goto next;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 		/* Check if we need to drop the packet if SRIOV is not enabled
728*4882a593Smuzhiyun 		 * and not performing the selftest or flb disabled
729*4882a593Smuzhiyun 		 */
730*4882a593Smuzhiyun 		if (priv->flags & MLX4_EN_FLAG_RX_FILTER_NEEDED) {
731*4882a593Smuzhiyun 			const struct ethhdr *ethh = va;
732*4882a593Smuzhiyun 			dma_addr_t dma;
733*4882a593Smuzhiyun 			/* Get pointer to first fragment since we haven't
734*4882a593Smuzhiyun 			 * skb yet and cast it to ethhdr struct
735*4882a593Smuzhiyun 			 */
736*4882a593Smuzhiyun 			dma = frags[0].dma + frags[0].page_offset;
737*4882a593Smuzhiyun 			dma_sync_single_for_cpu(priv->ddev, dma, sizeof(*ethh),
738*4882a593Smuzhiyun 						DMA_FROM_DEVICE);
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 			if (is_multicast_ether_addr(ethh->h_dest)) {
741*4882a593Smuzhiyun 				struct mlx4_mac_entry *entry;
742*4882a593Smuzhiyun 				struct hlist_head *bucket;
743*4882a593Smuzhiyun 				unsigned int mac_hash;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 				/* Drop the packet, since HW loopback-ed it */
746*4882a593Smuzhiyun 				mac_hash = ethh->h_source[MLX4_EN_MAC_HASH_IDX];
747*4882a593Smuzhiyun 				bucket = &priv->mac_hash[mac_hash];
748*4882a593Smuzhiyun 				hlist_for_each_entry_rcu(entry, bucket, hlist) {
749*4882a593Smuzhiyun 					if (ether_addr_equal_64bits(entry->mac,
750*4882a593Smuzhiyun 								    ethh->h_source))
751*4882a593Smuzhiyun 						goto next;
752*4882a593Smuzhiyun 				}
753*4882a593Smuzhiyun 			}
754*4882a593Smuzhiyun 		}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 		if (unlikely(priv->validate_loopback)) {
757*4882a593Smuzhiyun 			validate_loopback(priv, va);
758*4882a593Smuzhiyun 			goto next;
759*4882a593Smuzhiyun 		}
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 		/*
762*4882a593Smuzhiyun 		 * Packet is OK - process it.
763*4882a593Smuzhiyun 		 */
764*4882a593Smuzhiyun 		length = be32_to_cpu(cqe->byte_cnt);
765*4882a593Smuzhiyun 		length -= ring->fcs_del;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 		/* A bpf program gets first chance to drop the packet. It may
768*4882a593Smuzhiyun 		 * read bytes but not past the end of the frag.
769*4882a593Smuzhiyun 		 */
770*4882a593Smuzhiyun 		if (xdp_prog) {
771*4882a593Smuzhiyun 			dma_addr_t dma;
772*4882a593Smuzhiyun 			void *orig_data;
773*4882a593Smuzhiyun 			u32 act;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 			dma = frags[0].dma + frags[0].page_offset;
776*4882a593Smuzhiyun 			dma_sync_single_for_cpu(priv->ddev, dma,
777*4882a593Smuzhiyun 						priv->frag_info[0].frag_size,
778*4882a593Smuzhiyun 						DMA_FROM_DEVICE);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 			xdp.data_hard_start = va - frags[0].page_offset;
781*4882a593Smuzhiyun 			xdp.data = va;
782*4882a593Smuzhiyun 			xdp_set_data_meta_invalid(&xdp);
783*4882a593Smuzhiyun 			xdp.data_end = xdp.data + length;
784*4882a593Smuzhiyun 			orig_data = xdp.data;
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 			act = bpf_prog_run_xdp(xdp_prog, &xdp);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 			length = xdp.data_end - xdp.data;
789*4882a593Smuzhiyun 			if (xdp.data != orig_data) {
790*4882a593Smuzhiyun 				frags[0].page_offset = xdp.data -
791*4882a593Smuzhiyun 					xdp.data_hard_start;
792*4882a593Smuzhiyun 				va = xdp.data;
793*4882a593Smuzhiyun 			}
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 			switch (act) {
796*4882a593Smuzhiyun 			case XDP_PASS:
797*4882a593Smuzhiyun 				break;
798*4882a593Smuzhiyun 			case XDP_TX:
799*4882a593Smuzhiyun 				if (likely(!mlx4_en_xmit_frame(ring, frags, priv,
800*4882a593Smuzhiyun 							length, cq_ring,
801*4882a593Smuzhiyun 							&doorbell_pending))) {
802*4882a593Smuzhiyun 					frags[0].page = NULL;
803*4882a593Smuzhiyun 					goto next;
804*4882a593Smuzhiyun 				}
805*4882a593Smuzhiyun 				trace_xdp_exception(dev, xdp_prog, act);
806*4882a593Smuzhiyun 				goto xdp_drop_no_cnt; /* Drop on xmit failure */
807*4882a593Smuzhiyun 			default:
808*4882a593Smuzhiyun 				bpf_warn_invalid_xdp_action(act);
809*4882a593Smuzhiyun 				fallthrough;
810*4882a593Smuzhiyun 			case XDP_ABORTED:
811*4882a593Smuzhiyun 				trace_xdp_exception(dev, xdp_prog, act);
812*4882a593Smuzhiyun 				fallthrough;
813*4882a593Smuzhiyun 			case XDP_DROP:
814*4882a593Smuzhiyun 				ring->xdp_drop++;
815*4882a593Smuzhiyun xdp_drop_no_cnt:
816*4882a593Smuzhiyun 				goto next;
817*4882a593Smuzhiyun 			}
818*4882a593Smuzhiyun 		}
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 		ring->bytes += length;
821*4882a593Smuzhiyun 		ring->packets++;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 		skb = napi_get_frags(&cq->napi);
824*4882a593Smuzhiyun 		if (unlikely(!skb))
825*4882a593Smuzhiyun 			goto next;
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		if (unlikely(ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL)) {
828*4882a593Smuzhiyun 			u64 timestamp = mlx4_en_get_cqe_ts(cqe);
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 			mlx4_en_fill_hwtstamps(priv->mdev, skb_hwtstamps(skb),
831*4882a593Smuzhiyun 					       timestamp);
832*4882a593Smuzhiyun 		}
833*4882a593Smuzhiyun 		skb_record_rx_queue(skb, cq_ring);
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 		if (likely(dev->features & NETIF_F_RXCSUM)) {
836*4882a593Smuzhiyun 			/* TODO: For IP non TCP/UDP packets when csum complete is
837*4882a593Smuzhiyun 			 * not an option (not supported or any other reason) we can
838*4882a593Smuzhiyun 			 * actually check cqe IPOK status bit and report
839*4882a593Smuzhiyun 			 * CHECKSUM_UNNECESSARY rather than CHECKSUM_NONE
840*4882a593Smuzhiyun 			 */
841*4882a593Smuzhiyun 			if ((cqe->status & cpu_to_be16(MLX4_CQE_STATUS_TCP |
842*4882a593Smuzhiyun 						       MLX4_CQE_STATUS_UDP)) &&
843*4882a593Smuzhiyun 			    (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IPOK)) &&
844*4882a593Smuzhiyun 			    cqe->checksum == cpu_to_be16(0xffff)) {
845*4882a593Smuzhiyun 				bool l2_tunnel;
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 				l2_tunnel = (dev->hw_enc_features & NETIF_F_RXCSUM) &&
848*4882a593Smuzhiyun 					(cqe->vlan_my_qpn & cpu_to_be32(MLX4_CQE_L2_TUNNEL));
849*4882a593Smuzhiyun 				ip_summed = CHECKSUM_UNNECESSARY;
850*4882a593Smuzhiyun 				hash_type = PKT_HASH_TYPE_L4;
851*4882a593Smuzhiyun 				if (l2_tunnel)
852*4882a593Smuzhiyun 					skb->csum_level = 1;
853*4882a593Smuzhiyun 				ring->csum_ok++;
854*4882a593Smuzhiyun 			} else {
855*4882a593Smuzhiyun 				if (!(priv->flags & MLX4_EN_FLAG_RX_CSUM_NON_TCP_UDP &&
856*4882a593Smuzhiyun 				      (cqe->status & cpu_to_be16(MLX4_CQE_STATUS_IP_ANY))))
857*4882a593Smuzhiyun 					goto csum_none;
858*4882a593Smuzhiyun 				if (check_csum(cqe, skb, va, dev->features))
859*4882a593Smuzhiyun 					goto csum_none;
860*4882a593Smuzhiyun 				ip_summed = CHECKSUM_COMPLETE;
861*4882a593Smuzhiyun 				hash_type = PKT_HASH_TYPE_L3;
862*4882a593Smuzhiyun 				ring->csum_complete++;
863*4882a593Smuzhiyun 			}
864*4882a593Smuzhiyun 		} else {
865*4882a593Smuzhiyun csum_none:
866*4882a593Smuzhiyun 			ip_summed = CHECKSUM_NONE;
867*4882a593Smuzhiyun 			hash_type = PKT_HASH_TYPE_L3;
868*4882a593Smuzhiyun 			ring->csum_none++;
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 		skb->ip_summed = ip_summed;
871*4882a593Smuzhiyun 		if (dev->features & NETIF_F_RXHASH)
872*4882a593Smuzhiyun 			skb_set_hash(skb,
873*4882a593Smuzhiyun 				     be32_to_cpu(cqe->immed_rss_invalid),
874*4882a593Smuzhiyun 				     hash_type);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 		if ((cqe->vlan_my_qpn &
877*4882a593Smuzhiyun 		     cpu_to_be32(MLX4_CQE_CVLAN_PRESENT_MASK)) &&
878*4882a593Smuzhiyun 		    (dev->features & NETIF_F_HW_VLAN_CTAG_RX))
879*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
880*4882a593Smuzhiyun 					       be16_to_cpu(cqe->sl_vid));
881*4882a593Smuzhiyun 		else if ((cqe->vlan_my_qpn &
882*4882a593Smuzhiyun 			  cpu_to_be32(MLX4_CQE_SVLAN_PRESENT_MASK)) &&
883*4882a593Smuzhiyun 			 (dev->features & NETIF_F_HW_VLAN_STAG_RX))
884*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD),
885*4882a593Smuzhiyun 					       be16_to_cpu(cqe->sl_vid));
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 		nr = mlx4_en_complete_rx_desc(priv, frags, skb, length);
888*4882a593Smuzhiyun 		if (likely(nr)) {
889*4882a593Smuzhiyun 			skb_shinfo(skb)->nr_frags = nr;
890*4882a593Smuzhiyun 			skb->len = length;
891*4882a593Smuzhiyun 			skb->data_len = length;
892*4882a593Smuzhiyun 			napi_gro_frags(&cq->napi);
893*4882a593Smuzhiyun 		} else {
894*4882a593Smuzhiyun 			__vlan_hwaccel_clear_tag(skb);
895*4882a593Smuzhiyun 			skb_clear_hash(skb);
896*4882a593Smuzhiyun 		}
897*4882a593Smuzhiyun next:
898*4882a593Smuzhiyun 		++cq->mcq.cons_index;
899*4882a593Smuzhiyun 		index = (cq->mcq.cons_index) & ring->size_mask;
900*4882a593Smuzhiyun 		cqe = mlx4_en_get_cqe(cq->buf, index, priv->cqe_size) + factor;
901*4882a593Smuzhiyun 		if (unlikely(++polled == budget))
902*4882a593Smuzhiyun 			break;
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	rcu_read_unlock();
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	if (likely(polled)) {
908*4882a593Smuzhiyun 		if (doorbell_pending) {
909*4882a593Smuzhiyun 			priv->tx_cq[TX_XDP][cq_ring]->xdp_busy = true;
910*4882a593Smuzhiyun 			mlx4_en_xmit_doorbell(priv->tx_ring[TX_XDP][cq_ring]);
911*4882a593Smuzhiyun 		}
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 		mlx4_cq_set_ci(&cq->mcq);
914*4882a593Smuzhiyun 		wmb(); /* ensure HW sees CQ consumer before we post new buffers */
915*4882a593Smuzhiyun 		ring->cons = cq->mcq.cons_index;
916*4882a593Smuzhiyun 	}
917*4882a593Smuzhiyun 	AVG_PERF_COUNTER(priv->pstats.rx_coal_avg, polled);
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	mlx4_en_refill_rx_buffers(priv, ring);
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	return polled;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 
mlx4_en_rx_irq(struct mlx4_cq * mcq)925*4882a593Smuzhiyun void mlx4_en_rx_irq(struct mlx4_cq *mcq)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	struct mlx4_en_cq *cq = container_of(mcq, struct mlx4_en_cq, mcq);
928*4882a593Smuzhiyun 	struct mlx4_en_priv *priv = netdev_priv(cq->dev);
929*4882a593Smuzhiyun 
930*4882a593Smuzhiyun 	if (likely(priv->port_up))
931*4882a593Smuzhiyun 		napi_schedule_irqoff(&cq->napi);
932*4882a593Smuzhiyun 	else
933*4882a593Smuzhiyun 		mlx4_en_arm_cq(priv, cq);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun /* Rx CQ polling - called by NAPI */
mlx4_en_poll_rx_cq(struct napi_struct * napi,int budget)937*4882a593Smuzhiyun int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
940*4882a593Smuzhiyun 	struct net_device *dev = cq->dev;
941*4882a593Smuzhiyun 	struct mlx4_en_priv *priv = netdev_priv(dev);
942*4882a593Smuzhiyun 	struct mlx4_en_cq *xdp_tx_cq = NULL;
943*4882a593Smuzhiyun 	bool clean_complete = true;
944*4882a593Smuzhiyun 	int done;
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	if (!budget)
947*4882a593Smuzhiyun 		return 0;
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	if (priv->tx_ring_num[TX_XDP]) {
950*4882a593Smuzhiyun 		xdp_tx_cq = priv->tx_cq[TX_XDP][cq->ring];
951*4882a593Smuzhiyun 		if (xdp_tx_cq->xdp_busy) {
952*4882a593Smuzhiyun 			clean_complete = mlx4_en_process_tx_cq(dev, xdp_tx_cq,
953*4882a593Smuzhiyun 							       budget) < budget;
954*4882a593Smuzhiyun 			xdp_tx_cq->xdp_busy = !clean_complete;
955*4882a593Smuzhiyun 		}
956*4882a593Smuzhiyun 	}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	done = mlx4_en_process_rx_cq(dev, cq, budget);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	/* If we used up all the quota - we're probably not done yet... */
961*4882a593Smuzhiyun 	if (done == budget || !clean_complete) {
962*4882a593Smuzhiyun 		const struct cpumask *aff;
963*4882a593Smuzhiyun 		struct irq_data *idata;
964*4882a593Smuzhiyun 		int cpu_curr;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 		/* in case we got here because of !clean_complete */
967*4882a593Smuzhiyun 		done = budget;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 		INC_PERF_COUNTER(priv->pstats.napi_quota);
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 		cpu_curr = smp_processor_id();
972*4882a593Smuzhiyun 		idata = irq_desc_get_irq_data(cq->irq_desc);
973*4882a593Smuzhiyun 		aff = irq_data_get_affinity_mask(idata);
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 		if (likely(cpumask_test_cpu(cpu_curr, aff)))
976*4882a593Smuzhiyun 			return budget;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 		/* Current cpu is not according to smp_irq_affinity -
979*4882a593Smuzhiyun 		 * probably affinity changed. Need to stop this NAPI
980*4882a593Smuzhiyun 		 * poll, and restart it on the right CPU.
981*4882a593Smuzhiyun 		 * Try to avoid returning a too small value (like 0),
982*4882a593Smuzhiyun 		 * to not fool net_rx_action() and its netdev_budget
983*4882a593Smuzhiyun 		 */
984*4882a593Smuzhiyun 		if (done)
985*4882a593Smuzhiyun 			done--;
986*4882a593Smuzhiyun 	}
987*4882a593Smuzhiyun 	/* Done for now */
988*4882a593Smuzhiyun 	if (likely(napi_complete_done(napi, done)))
989*4882a593Smuzhiyun 		mlx4_en_arm_cq(priv, cq);
990*4882a593Smuzhiyun 	return done;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
mlx4_en_calc_rx_buf(struct net_device * dev)993*4882a593Smuzhiyun void mlx4_en_calc_rx_buf(struct net_device *dev)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	struct mlx4_en_priv *priv = netdev_priv(dev);
996*4882a593Smuzhiyun 	int eff_mtu = MLX4_EN_EFF_MTU(dev->mtu);
997*4882a593Smuzhiyun 	int i = 0;
998*4882a593Smuzhiyun 
999*4882a593Smuzhiyun 	/* bpf requires buffers to be set up as 1 packet per page.
1000*4882a593Smuzhiyun 	 * This only works when num_frags == 1.
1001*4882a593Smuzhiyun 	 */
1002*4882a593Smuzhiyun 	if (priv->tx_ring_num[TX_XDP]) {
1003*4882a593Smuzhiyun 		priv->frag_info[0].frag_size = eff_mtu;
1004*4882a593Smuzhiyun 		/* This will gain efficient xdp frame recycling at the
1005*4882a593Smuzhiyun 		 * expense of more costly truesize accounting
1006*4882a593Smuzhiyun 		 */
1007*4882a593Smuzhiyun 		priv->frag_info[0].frag_stride = PAGE_SIZE;
1008*4882a593Smuzhiyun 		priv->dma_dir = PCI_DMA_BIDIRECTIONAL;
1009*4882a593Smuzhiyun 		priv->rx_headroom = XDP_PACKET_HEADROOM;
1010*4882a593Smuzhiyun 		i = 1;
1011*4882a593Smuzhiyun 	} else {
1012*4882a593Smuzhiyun 		int frag_size_max = 2048, buf_size = 0;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 		/* should not happen, right ? */
1015*4882a593Smuzhiyun 		if (eff_mtu > PAGE_SIZE + (MLX4_EN_MAX_RX_FRAGS - 1) * 2048)
1016*4882a593Smuzhiyun 			frag_size_max = PAGE_SIZE;
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun 		while (buf_size < eff_mtu) {
1019*4882a593Smuzhiyun 			int frag_stride, frag_size = eff_mtu - buf_size;
1020*4882a593Smuzhiyun 			int pad, nb;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 			if (i < MLX4_EN_MAX_RX_FRAGS - 1)
1023*4882a593Smuzhiyun 				frag_size = min(frag_size, frag_size_max);
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 			priv->frag_info[i].frag_size = frag_size;
1026*4882a593Smuzhiyun 			frag_stride = ALIGN(frag_size, SMP_CACHE_BYTES);
1027*4882a593Smuzhiyun 			/* We can only pack 2 1536-bytes frames in on 4K page
1028*4882a593Smuzhiyun 			 * Therefore, each frame would consume more bytes (truesize)
1029*4882a593Smuzhiyun 			 */
1030*4882a593Smuzhiyun 			nb = PAGE_SIZE / frag_stride;
1031*4882a593Smuzhiyun 			pad = (PAGE_SIZE - nb * frag_stride) / nb;
1032*4882a593Smuzhiyun 			pad &= ~(SMP_CACHE_BYTES - 1);
1033*4882a593Smuzhiyun 			priv->frag_info[i].frag_stride = frag_stride + pad;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 			buf_size += frag_size;
1036*4882a593Smuzhiyun 			i++;
1037*4882a593Smuzhiyun 		}
1038*4882a593Smuzhiyun 		priv->dma_dir = PCI_DMA_FROMDEVICE;
1039*4882a593Smuzhiyun 		priv->rx_headroom = 0;
1040*4882a593Smuzhiyun 	}
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	priv->num_frags = i;
1043*4882a593Smuzhiyun 	priv->rx_skb_size = eff_mtu;
1044*4882a593Smuzhiyun 	priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
1047*4882a593Smuzhiyun 	       eff_mtu, priv->num_frags);
1048*4882a593Smuzhiyun 	for (i = 0; i < priv->num_frags; i++) {
1049*4882a593Smuzhiyun 		en_dbg(DRV,
1050*4882a593Smuzhiyun 		       priv,
1051*4882a593Smuzhiyun 		       "  frag:%d - size:%d stride:%d\n",
1052*4882a593Smuzhiyun 		       i,
1053*4882a593Smuzhiyun 		       priv->frag_info[i].frag_size,
1054*4882a593Smuzhiyun 		       priv->frag_info[i].frag_stride);
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun 
1058*4882a593Smuzhiyun /* RSS related functions */
1059*4882a593Smuzhiyun 
mlx4_en_config_rss_qp(struct mlx4_en_priv * priv,int qpn,struct mlx4_en_rx_ring * ring,enum mlx4_qp_state * state,struct mlx4_qp * qp)1060*4882a593Smuzhiyun static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn,
1061*4882a593Smuzhiyun 				 struct mlx4_en_rx_ring *ring,
1062*4882a593Smuzhiyun 				 enum mlx4_qp_state *state,
1063*4882a593Smuzhiyun 				 struct mlx4_qp *qp)
1064*4882a593Smuzhiyun {
1065*4882a593Smuzhiyun 	struct mlx4_en_dev *mdev = priv->mdev;
1066*4882a593Smuzhiyun 	struct mlx4_qp_context *context;
1067*4882a593Smuzhiyun 	int err = 0;
1068*4882a593Smuzhiyun 
1069*4882a593Smuzhiyun 	context = kmalloc(sizeof(*context), GFP_KERNEL);
1070*4882a593Smuzhiyun 	if (!context)
1071*4882a593Smuzhiyun 		return -ENOMEM;
1072*4882a593Smuzhiyun 
1073*4882a593Smuzhiyun 	err = mlx4_qp_alloc(mdev->dev, qpn, qp);
1074*4882a593Smuzhiyun 	if (err) {
1075*4882a593Smuzhiyun 		en_err(priv, "Failed to allocate qp #%x\n", qpn);
1076*4882a593Smuzhiyun 		goto out;
1077*4882a593Smuzhiyun 	}
1078*4882a593Smuzhiyun 	qp->event = mlx4_en_sqp_event;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	memset(context, 0, sizeof(*context));
1081*4882a593Smuzhiyun 	mlx4_en_fill_qp_context(priv, ring->actual_size, ring->stride, 0, 0,
1082*4882a593Smuzhiyun 				qpn, ring->cqn, -1, context);
1083*4882a593Smuzhiyun 	context->db_rec_addr = cpu_to_be64(ring->wqres.db.dma);
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	/* Cancel FCS removal if FW allows */
1086*4882a593Smuzhiyun 	if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) {
1087*4882a593Smuzhiyun 		context->param3 |= cpu_to_be32(1 << 29);
1088*4882a593Smuzhiyun 		if (priv->dev->features & NETIF_F_RXFCS)
1089*4882a593Smuzhiyun 			ring->fcs_del = 0;
1090*4882a593Smuzhiyun 		else
1091*4882a593Smuzhiyun 			ring->fcs_del = ETH_FCS_LEN;
1092*4882a593Smuzhiyun 	} else
1093*4882a593Smuzhiyun 		ring->fcs_del = 0;
1094*4882a593Smuzhiyun 
1095*4882a593Smuzhiyun 	err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, context, qp, state);
1096*4882a593Smuzhiyun 	if (err) {
1097*4882a593Smuzhiyun 		mlx4_qp_remove(mdev->dev, qp);
1098*4882a593Smuzhiyun 		mlx4_qp_free(mdev->dev, qp);
1099*4882a593Smuzhiyun 	}
1100*4882a593Smuzhiyun 	mlx4_en_update_rx_prod_db(ring);
1101*4882a593Smuzhiyun out:
1102*4882a593Smuzhiyun 	kfree(context);
1103*4882a593Smuzhiyun 	return err;
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun 
mlx4_en_create_drop_qp(struct mlx4_en_priv * priv)1106*4882a593Smuzhiyun int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv)
1107*4882a593Smuzhiyun {
1108*4882a593Smuzhiyun 	int err;
1109*4882a593Smuzhiyun 	u32 qpn;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 	err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn,
1112*4882a593Smuzhiyun 				    MLX4_RESERVE_A0_QP,
1113*4882a593Smuzhiyun 				    MLX4_RES_USAGE_DRIVER);
1114*4882a593Smuzhiyun 	if (err) {
1115*4882a593Smuzhiyun 		en_err(priv, "Failed reserving drop qpn\n");
1116*4882a593Smuzhiyun 		return err;
1117*4882a593Smuzhiyun 	}
1118*4882a593Smuzhiyun 	err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
1119*4882a593Smuzhiyun 	if (err) {
1120*4882a593Smuzhiyun 		en_err(priv, "Failed allocating drop qp\n");
1121*4882a593Smuzhiyun 		mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1122*4882a593Smuzhiyun 		return err;
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	return 0;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun 
mlx4_en_destroy_drop_qp(struct mlx4_en_priv * priv)1128*4882a593Smuzhiyun void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun 	u32 qpn;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	qpn = priv->drop_qp.qpn;
1133*4882a593Smuzhiyun 	mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp);
1134*4882a593Smuzhiyun 	mlx4_qp_free(priv->mdev->dev, &priv->drop_qp);
1135*4882a593Smuzhiyun 	mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun /* Allocate rx qp's and configure them according to rss map */
mlx4_en_config_rss_steer(struct mlx4_en_priv * priv)1139*4882a593Smuzhiyun int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun 	struct mlx4_en_dev *mdev = priv->mdev;
1142*4882a593Smuzhiyun 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1143*4882a593Smuzhiyun 	struct mlx4_qp_context context;
1144*4882a593Smuzhiyun 	struct mlx4_rss_context *rss_context;
1145*4882a593Smuzhiyun 	int rss_rings;
1146*4882a593Smuzhiyun 	void *ptr;
1147*4882a593Smuzhiyun 	u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
1148*4882a593Smuzhiyun 			MLX4_RSS_TCP_IPV6);
1149*4882a593Smuzhiyun 	int i, qpn;
1150*4882a593Smuzhiyun 	int err = 0;
1151*4882a593Smuzhiyun 	int good_qps = 0;
1152*4882a593Smuzhiyun 	u8 flags;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	en_dbg(DRV, priv, "Configuring rss steering\n");
1155*4882a593Smuzhiyun 
1156*4882a593Smuzhiyun 	flags = priv->rx_ring_num == 1 ? MLX4_RESERVE_A0_QP : 0;
1157*4882a593Smuzhiyun 	err = mlx4_qp_reserve_range(mdev->dev, priv->rx_ring_num,
1158*4882a593Smuzhiyun 				    priv->rx_ring_num,
1159*4882a593Smuzhiyun 				    &rss_map->base_qpn, flags,
1160*4882a593Smuzhiyun 				    MLX4_RES_USAGE_DRIVER);
1161*4882a593Smuzhiyun 	if (err) {
1162*4882a593Smuzhiyun 		en_err(priv, "Failed reserving %d qps\n", priv->rx_ring_num);
1163*4882a593Smuzhiyun 		return err;
1164*4882a593Smuzhiyun 	}
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_ring_num; i++) {
1167*4882a593Smuzhiyun 		qpn = rss_map->base_qpn + i;
1168*4882a593Smuzhiyun 		err = mlx4_en_config_rss_qp(priv, qpn, priv->rx_ring[i],
1169*4882a593Smuzhiyun 					    &rss_map->state[i],
1170*4882a593Smuzhiyun 					    &rss_map->qps[i]);
1171*4882a593Smuzhiyun 		if (err)
1172*4882a593Smuzhiyun 			goto rss_err;
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 		++good_qps;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	if (priv->rx_ring_num == 1) {
1178*4882a593Smuzhiyun 		rss_map->indir_qp = &rss_map->qps[0];
1179*4882a593Smuzhiyun 		priv->base_qpn = rss_map->indir_qp->qpn;
1180*4882a593Smuzhiyun 		en_info(priv, "Optimized Non-RSS steering\n");
1181*4882a593Smuzhiyun 		return 0;
1182*4882a593Smuzhiyun 	}
1183*4882a593Smuzhiyun 
1184*4882a593Smuzhiyun 	rss_map->indir_qp = kzalloc(sizeof(*rss_map->indir_qp), GFP_KERNEL);
1185*4882a593Smuzhiyun 	if (!rss_map->indir_qp) {
1186*4882a593Smuzhiyun 		err = -ENOMEM;
1187*4882a593Smuzhiyun 		goto rss_err;
1188*4882a593Smuzhiyun 	}
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/* Configure RSS indirection qp */
1191*4882a593Smuzhiyun 	err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
1192*4882a593Smuzhiyun 	if (err) {
1193*4882a593Smuzhiyun 		en_err(priv, "Failed to allocate RSS indirection QP\n");
1194*4882a593Smuzhiyun 		goto qp_alloc_err;
1195*4882a593Smuzhiyun 	}
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	rss_map->indir_qp->event = mlx4_en_sqp_event;
1198*4882a593Smuzhiyun 	mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
1199*4882a593Smuzhiyun 				priv->rx_ring[0]->cqn, -1, &context);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
1202*4882a593Smuzhiyun 		rss_rings = priv->rx_ring_num;
1203*4882a593Smuzhiyun 	else
1204*4882a593Smuzhiyun 		rss_rings = priv->prof->rss_rings;
1205*4882a593Smuzhiyun 
1206*4882a593Smuzhiyun 	ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
1207*4882a593Smuzhiyun 					+ MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
1208*4882a593Smuzhiyun 	rss_context = ptr;
1209*4882a593Smuzhiyun 	rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
1210*4882a593Smuzhiyun 					    (rss_map->base_qpn));
1211*4882a593Smuzhiyun 	rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
1212*4882a593Smuzhiyun 	if (priv->mdev->profile.udp_rss) {
1213*4882a593Smuzhiyun 		rss_mask |=  MLX4_RSS_UDP_IPV4 | MLX4_RSS_UDP_IPV6;
1214*4882a593Smuzhiyun 		rss_context->base_qpn_udp = rss_context->default_qpn;
1215*4882a593Smuzhiyun 	}
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
1218*4882a593Smuzhiyun 		en_info(priv, "Setting RSS context tunnel type to RSS on inner headers\n");
1219*4882a593Smuzhiyun 		rss_mask |= MLX4_RSS_BY_INNER_HEADERS;
1220*4882a593Smuzhiyun 	}
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 	rss_context->flags = rss_mask;
1223*4882a593Smuzhiyun 	rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1224*4882a593Smuzhiyun 	if (priv->rss_hash_fn == ETH_RSS_HASH_XOR) {
1225*4882a593Smuzhiyun 		rss_context->hash_fn = MLX4_RSS_HASH_XOR;
1226*4882a593Smuzhiyun 	} else if (priv->rss_hash_fn == ETH_RSS_HASH_TOP) {
1227*4882a593Smuzhiyun 		rss_context->hash_fn = MLX4_RSS_HASH_TOP;
1228*4882a593Smuzhiyun 		memcpy(rss_context->rss_key, priv->rss_key,
1229*4882a593Smuzhiyun 		       MLX4_EN_RSS_KEY_SIZE);
1230*4882a593Smuzhiyun 	} else {
1231*4882a593Smuzhiyun 		en_err(priv, "Unknown RSS hash function requested\n");
1232*4882a593Smuzhiyun 		err = -EINVAL;
1233*4882a593Smuzhiyun 		goto indir_err;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context,
1237*4882a593Smuzhiyun 			       rss_map->indir_qp, &rss_map->indir_state);
1238*4882a593Smuzhiyun 	if (err)
1239*4882a593Smuzhiyun 		goto indir_err;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	return 0;
1242*4882a593Smuzhiyun 
1243*4882a593Smuzhiyun indir_err:
1244*4882a593Smuzhiyun 	mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1245*4882a593Smuzhiyun 		       MLX4_QP_STATE_RST, NULL, 0, 0, rss_map->indir_qp);
1246*4882a593Smuzhiyun 	mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1247*4882a593Smuzhiyun 	mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1248*4882a593Smuzhiyun qp_alloc_err:
1249*4882a593Smuzhiyun 	kfree(rss_map->indir_qp);
1250*4882a593Smuzhiyun 	rss_map->indir_qp = NULL;
1251*4882a593Smuzhiyun rss_err:
1252*4882a593Smuzhiyun 	for (i = 0; i < good_qps; i++) {
1253*4882a593Smuzhiyun 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1254*4882a593Smuzhiyun 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1255*4882a593Smuzhiyun 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1256*4882a593Smuzhiyun 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1257*4882a593Smuzhiyun 	}
1258*4882a593Smuzhiyun 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1259*4882a593Smuzhiyun 	return err;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
mlx4_en_release_rss_steer(struct mlx4_en_priv * priv)1262*4882a593Smuzhiyun void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun 	struct mlx4_en_dev *mdev = priv->mdev;
1265*4882a593Smuzhiyun 	struct mlx4_en_rss_map *rss_map = &priv->rss_map;
1266*4882a593Smuzhiyun 	int i;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	if (priv->rx_ring_num > 1) {
1269*4882a593Smuzhiyun 		mlx4_qp_modify(mdev->dev, NULL, rss_map->indir_state,
1270*4882a593Smuzhiyun 			       MLX4_QP_STATE_RST, NULL, 0, 0,
1271*4882a593Smuzhiyun 			       rss_map->indir_qp);
1272*4882a593Smuzhiyun 		mlx4_qp_remove(mdev->dev, rss_map->indir_qp);
1273*4882a593Smuzhiyun 		mlx4_qp_free(mdev->dev, rss_map->indir_qp);
1274*4882a593Smuzhiyun 		kfree(rss_map->indir_qp);
1275*4882a593Smuzhiyun 		rss_map->indir_qp = NULL;
1276*4882a593Smuzhiyun 	}
1277*4882a593Smuzhiyun 
1278*4882a593Smuzhiyun 	for (i = 0; i < priv->rx_ring_num; i++) {
1279*4882a593Smuzhiyun 		mlx4_qp_modify(mdev->dev, NULL, rss_map->state[i],
1280*4882a593Smuzhiyun 			       MLX4_QP_STATE_RST, NULL, 0, 0, &rss_map->qps[i]);
1281*4882a593Smuzhiyun 		mlx4_qp_remove(mdev->dev, &rss_map->qps[i]);
1282*4882a593Smuzhiyun 		mlx4_qp_free(mdev->dev, &rss_map->qps[i]);
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 	mlx4_qp_release_range(mdev->dev, rss_map->base_qpn, priv->rx_ring_num);
1285*4882a593Smuzhiyun }
1286