xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/fm10k/fm10k_netdev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 2013 - 2019 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include "fm10k.h"
5*4882a593Smuzhiyun #include <linux/vmalloc.h>
6*4882a593Smuzhiyun #include <net/udp_tunnel.h>
7*4882a593Smuzhiyun #include <linux/if_macvlan.h>
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun /**
10*4882a593Smuzhiyun  * fm10k_setup_tx_resources - allocate Tx resources (Descriptors)
11*4882a593Smuzhiyun  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * Return 0 on success, negative on failure
14*4882a593Smuzhiyun  **/
fm10k_setup_tx_resources(struct fm10k_ring * tx_ring)15*4882a593Smuzhiyun int fm10k_setup_tx_resources(struct fm10k_ring *tx_ring)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	struct device *dev = tx_ring->dev;
18*4882a593Smuzhiyun 	int size;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 	size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	tx_ring->tx_buffer = vzalloc(size);
23*4882a593Smuzhiyun 	if (!tx_ring->tx_buffer)
24*4882a593Smuzhiyun 		goto err;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	u64_stats_init(&tx_ring->syncp);
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	/* round up to nearest 4K */
29*4882a593Smuzhiyun 	tx_ring->size = tx_ring->count * sizeof(struct fm10k_tx_desc);
30*4882a593Smuzhiyun 	tx_ring->size = ALIGN(tx_ring->size, 4096);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
33*4882a593Smuzhiyun 					   &tx_ring->dma, GFP_KERNEL);
34*4882a593Smuzhiyun 	if (!tx_ring->desc)
35*4882a593Smuzhiyun 		goto err;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	return 0;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun err:
40*4882a593Smuzhiyun 	vfree(tx_ring->tx_buffer);
41*4882a593Smuzhiyun 	tx_ring->tx_buffer = NULL;
42*4882a593Smuzhiyun 	return -ENOMEM;
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /**
46*4882a593Smuzhiyun  * fm10k_setup_all_tx_resources - allocate all queues Tx resources
47*4882a593Smuzhiyun  * @interface: board private structure
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * If this function returns with an error, then it's possible one or
50*4882a593Smuzhiyun  * more of the rings is populated (while the rest are not).  It is the
51*4882a593Smuzhiyun  * callers duty to clean those orphaned rings.
52*4882a593Smuzhiyun  *
53*4882a593Smuzhiyun  * Return 0 on success, negative on failure
54*4882a593Smuzhiyun  **/
fm10k_setup_all_tx_resources(struct fm10k_intfc * interface)55*4882a593Smuzhiyun static int fm10k_setup_all_tx_resources(struct fm10k_intfc *interface)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	int i, err;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	for (i = 0; i < interface->num_tx_queues; i++) {
60*4882a593Smuzhiyun 		err = fm10k_setup_tx_resources(interface->tx_ring[i]);
61*4882a593Smuzhiyun 		if (!err)
62*4882a593Smuzhiyun 			continue;
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun 		netif_err(interface, probe, interface->netdev,
65*4882a593Smuzhiyun 			  "Allocation for Tx Queue %u failed\n", i);
66*4882a593Smuzhiyun 		goto err_setup_tx;
67*4882a593Smuzhiyun 	}
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return 0;
70*4882a593Smuzhiyun err_setup_tx:
71*4882a593Smuzhiyun 	/* rewind the index freeing the rings as we go */
72*4882a593Smuzhiyun 	while (i--)
73*4882a593Smuzhiyun 		fm10k_free_tx_resources(interface->tx_ring[i]);
74*4882a593Smuzhiyun 	return err;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun  * fm10k_setup_rx_resources - allocate Rx resources (Descriptors)
79*4882a593Smuzhiyun  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * Returns 0 on success, negative on failure
82*4882a593Smuzhiyun  **/
fm10k_setup_rx_resources(struct fm10k_ring * rx_ring)83*4882a593Smuzhiyun int fm10k_setup_rx_resources(struct fm10k_ring *rx_ring)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct device *dev = rx_ring->dev;
86*4882a593Smuzhiyun 	int size;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	rx_ring->rx_buffer = vzalloc(size);
91*4882a593Smuzhiyun 	if (!rx_ring->rx_buffer)
92*4882a593Smuzhiyun 		goto err;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	u64_stats_init(&rx_ring->syncp);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	/* Round up to nearest 4K */
97*4882a593Smuzhiyun 	rx_ring->size = rx_ring->count * sizeof(union fm10k_rx_desc);
98*4882a593Smuzhiyun 	rx_ring->size = ALIGN(rx_ring->size, 4096);
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
101*4882a593Smuzhiyun 					   &rx_ring->dma, GFP_KERNEL);
102*4882a593Smuzhiyun 	if (!rx_ring->desc)
103*4882a593Smuzhiyun 		goto err;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun err:
107*4882a593Smuzhiyun 	vfree(rx_ring->rx_buffer);
108*4882a593Smuzhiyun 	rx_ring->rx_buffer = NULL;
109*4882a593Smuzhiyun 	return -ENOMEM;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun /**
113*4882a593Smuzhiyun  * fm10k_setup_all_rx_resources - allocate all queues Rx resources
114*4882a593Smuzhiyun  * @interface: board private structure
115*4882a593Smuzhiyun  *
116*4882a593Smuzhiyun  * If this function returns with an error, then it's possible one or
117*4882a593Smuzhiyun  * more of the rings is populated (while the rest are not).  It is the
118*4882a593Smuzhiyun  * callers duty to clean those orphaned rings.
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Return 0 on success, negative on failure
121*4882a593Smuzhiyun  **/
fm10k_setup_all_rx_resources(struct fm10k_intfc * interface)122*4882a593Smuzhiyun static int fm10k_setup_all_rx_resources(struct fm10k_intfc *interface)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	int i, err;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	for (i = 0; i < interface->num_rx_queues; i++) {
127*4882a593Smuzhiyun 		err = fm10k_setup_rx_resources(interface->rx_ring[i]);
128*4882a593Smuzhiyun 		if (!err)
129*4882a593Smuzhiyun 			continue;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		netif_err(interface, probe, interface->netdev,
132*4882a593Smuzhiyun 			  "Allocation for Rx Queue %u failed\n", i);
133*4882a593Smuzhiyun 		goto err_setup_rx;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return 0;
137*4882a593Smuzhiyun err_setup_rx:
138*4882a593Smuzhiyun 	/* rewind the index freeing the rings as we go */
139*4882a593Smuzhiyun 	while (i--)
140*4882a593Smuzhiyun 		fm10k_free_rx_resources(interface->rx_ring[i]);
141*4882a593Smuzhiyun 	return err;
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
fm10k_unmap_and_free_tx_resource(struct fm10k_ring * ring,struct fm10k_tx_buffer * tx_buffer)144*4882a593Smuzhiyun void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *ring,
145*4882a593Smuzhiyun 				      struct fm10k_tx_buffer *tx_buffer)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	if (tx_buffer->skb) {
148*4882a593Smuzhiyun 		dev_kfree_skb_any(tx_buffer->skb);
149*4882a593Smuzhiyun 		if (dma_unmap_len(tx_buffer, len))
150*4882a593Smuzhiyun 			dma_unmap_single(ring->dev,
151*4882a593Smuzhiyun 					 dma_unmap_addr(tx_buffer, dma),
152*4882a593Smuzhiyun 					 dma_unmap_len(tx_buffer, len),
153*4882a593Smuzhiyun 					 DMA_TO_DEVICE);
154*4882a593Smuzhiyun 	} else if (dma_unmap_len(tx_buffer, len)) {
155*4882a593Smuzhiyun 		dma_unmap_page(ring->dev,
156*4882a593Smuzhiyun 			       dma_unmap_addr(tx_buffer, dma),
157*4882a593Smuzhiyun 			       dma_unmap_len(tx_buffer, len),
158*4882a593Smuzhiyun 			       DMA_TO_DEVICE);
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 	tx_buffer->next_to_watch = NULL;
161*4882a593Smuzhiyun 	tx_buffer->skb = NULL;
162*4882a593Smuzhiyun 	dma_unmap_len_set(tx_buffer, len, 0);
163*4882a593Smuzhiyun 	/* tx_buffer must be completely set up in the transmit path */
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun /**
167*4882a593Smuzhiyun  * fm10k_clean_tx_ring - Free Tx Buffers
168*4882a593Smuzhiyun  * @tx_ring: ring to be cleaned
169*4882a593Smuzhiyun  **/
fm10k_clean_tx_ring(struct fm10k_ring * tx_ring)170*4882a593Smuzhiyun static void fm10k_clean_tx_ring(struct fm10k_ring *tx_ring)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	unsigned long size;
173*4882a593Smuzhiyun 	u16 i;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	/* ring already cleared, nothing to do */
176*4882a593Smuzhiyun 	if (!tx_ring->tx_buffer)
177*4882a593Smuzhiyun 		return;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	/* Free all the Tx ring sk_buffs */
180*4882a593Smuzhiyun 	for (i = 0; i < tx_ring->count; i++) {
181*4882a593Smuzhiyun 		struct fm10k_tx_buffer *tx_buffer = &tx_ring->tx_buffer[i];
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		fm10k_unmap_and_free_tx_resource(tx_ring, tx_buffer);
184*4882a593Smuzhiyun 	}
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	/* reset BQL values */
187*4882a593Smuzhiyun 	netdev_tx_reset_queue(txring_txq(tx_ring));
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	size = sizeof(struct fm10k_tx_buffer) * tx_ring->count;
190*4882a593Smuzhiyun 	memset(tx_ring->tx_buffer, 0, size);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	/* Zero out the descriptor ring */
193*4882a593Smuzhiyun 	memset(tx_ring->desc, 0, tx_ring->size);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun /**
197*4882a593Smuzhiyun  * fm10k_free_tx_resources - Free Tx Resources per Queue
198*4882a593Smuzhiyun  * @tx_ring: Tx descriptor ring for a specific queue
199*4882a593Smuzhiyun  *
200*4882a593Smuzhiyun  * Free all transmit software resources
201*4882a593Smuzhiyun  **/
fm10k_free_tx_resources(struct fm10k_ring * tx_ring)202*4882a593Smuzhiyun void fm10k_free_tx_resources(struct fm10k_ring *tx_ring)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	fm10k_clean_tx_ring(tx_ring);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	vfree(tx_ring->tx_buffer);
207*4882a593Smuzhiyun 	tx_ring->tx_buffer = NULL;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* if not set, then don't free */
210*4882a593Smuzhiyun 	if (!tx_ring->desc)
211*4882a593Smuzhiyun 		return;
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	dma_free_coherent(tx_ring->dev, tx_ring->size,
214*4882a593Smuzhiyun 			  tx_ring->desc, tx_ring->dma);
215*4882a593Smuzhiyun 	tx_ring->desc = NULL;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun  * fm10k_clean_all_tx_rings - Free Tx Buffers for all queues
220*4882a593Smuzhiyun  * @interface: board private structure
221*4882a593Smuzhiyun  **/
fm10k_clean_all_tx_rings(struct fm10k_intfc * interface)222*4882a593Smuzhiyun void fm10k_clean_all_tx_rings(struct fm10k_intfc *interface)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	int i;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	for (i = 0; i < interface->num_tx_queues; i++)
227*4882a593Smuzhiyun 		fm10k_clean_tx_ring(interface->tx_ring[i]);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun /**
231*4882a593Smuzhiyun  * fm10k_free_all_tx_resources - Free Tx Resources for All Queues
232*4882a593Smuzhiyun  * @interface: board private structure
233*4882a593Smuzhiyun  *
234*4882a593Smuzhiyun  * Free all transmit software resources
235*4882a593Smuzhiyun  **/
fm10k_free_all_tx_resources(struct fm10k_intfc * interface)236*4882a593Smuzhiyun static void fm10k_free_all_tx_resources(struct fm10k_intfc *interface)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	int i = interface->num_tx_queues;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	while (i--)
241*4882a593Smuzhiyun 		fm10k_free_tx_resources(interface->tx_ring[i]);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun /**
245*4882a593Smuzhiyun  * fm10k_clean_rx_ring - Free Rx Buffers per Queue
246*4882a593Smuzhiyun  * @rx_ring: ring to free buffers from
247*4882a593Smuzhiyun  **/
fm10k_clean_rx_ring(struct fm10k_ring * rx_ring)248*4882a593Smuzhiyun static void fm10k_clean_rx_ring(struct fm10k_ring *rx_ring)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	unsigned long size;
251*4882a593Smuzhiyun 	u16 i;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (!rx_ring->rx_buffer)
254*4882a593Smuzhiyun 		return;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	dev_kfree_skb(rx_ring->skb);
257*4882a593Smuzhiyun 	rx_ring->skb = NULL;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* Free all the Rx ring sk_buffs */
260*4882a593Smuzhiyun 	for (i = 0; i < rx_ring->count; i++) {
261*4882a593Smuzhiyun 		struct fm10k_rx_buffer *buffer = &rx_ring->rx_buffer[i];
262*4882a593Smuzhiyun 		/* clean-up will only set page pointer to NULL */
263*4882a593Smuzhiyun 		if (!buffer->page)
264*4882a593Smuzhiyun 			continue;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 		dma_unmap_page(rx_ring->dev, buffer->dma,
267*4882a593Smuzhiyun 			       PAGE_SIZE, DMA_FROM_DEVICE);
268*4882a593Smuzhiyun 		__free_page(buffer->page);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		buffer->page = NULL;
271*4882a593Smuzhiyun 	}
272*4882a593Smuzhiyun 
273*4882a593Smuzhiyun 	size = sizeof(struct fm10k_rx_buffer) * rx_ring->count;
274*4882a593Smuzhiyun 	memset(rx_ring->rx_buffer, 0, size);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	/* Zero out the descriptor ring */
277*4882a593Smuzhiyun 	memset(rx_ring->desc, 0, rx_ring->size);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	rx_ring->next_to_alloc = 0;
280*4882a593Smuzhiyun 	rx_ring->next_to_clean = 0;
281*4882a593Smuzhiyun 	rx_ring->next_to_use = 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun /**
285*4882a593Smuzhiyun  * fm10k_free_rx_resources - Free Rx Resources
286*4882a593Smuzhiyun  * @rx_ring: ring to clean the resources from
287*4882a593Smuzhiyun  *
288*4882a593Smuzhiyun  * Free all receive software resources
289*4882a593Smuzhiyun  **/
fm10k_free_rx_resources(struct fm10k_ring * rx_ring)290*4882a593Smuzhiyun void fm10k_free_rx_resources(struct fm10k_ring *rx_ring)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun 	fm10k_clean_rx_ring(rx_ring);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	vfree(rx_ring->rx_buffer);
295*4882a593Smuzhiyun 	rx_ring->rx_buffer = NULL;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* if not set, then don't free */
298*4882a593Smuzhiyun 	if (!rx_ring->desc)
299*4882a593Smuzhiyun 		return;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	dma_free_coherent(rx_ring->dev, rx_ring->size,
302*4882a593Smuzhiyun 			  rx_ring->desc, rx_ring->dma);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	rx_ring->desc = NULL;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun  * fm10k_clean_all_rx_rings - Free Rx Buffers for all queues
309*4882a593Smuzhiyun  * @interface: board private structure
310*4882a593Smuzhiyun  **/
fm10k_clean_all_rx_rings(struct fm10k_intfc * interface)311*4882a593Smuzhiyun void fm10k_clean_all_rx_rings(struct fm10k_intfc *interface)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	int i;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	for (i = 0; i < interface->num_rx_queues; i++)
316*4882a593Smuzhiyun 		fm10k_clean_rx_ring(interface->rx_ring[i]);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /**
320*4882a593Smuzhiyun  * fm10k_free_all_rx_resources - Free Rx Resources for All Queues
321*4882a593Smuzhiyun  * @interface: board private structure
322*4882a593Smuzhiyun  *
323*4882a593Smuzhiyun  * Free all receive software resources
324*4882a593Smuzhiyun  **/
fm10k_free_all_rx_resources(struct fm10k_intfc * interface)325*4882a593Smuzhiyun static void fm10k_free_all_rx_resources(struct fm10k_intfc *interface)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	int i = interface->num_rx_queues;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	while (i--)
330*4882a593Smuzhiyun 		fm10k_free_rx_resources(interface->rx_ring[i]);
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun /**
334*4882a593Smuzhiyun  * fm10k_request_glort_range - Request GLORTs for use in configuring rules
335*4882a593Smuzhiyun  * @interface: board private structure
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * This function allocates a range of glorts for this interface to use.
338*4882a593Smuzhiyun  **/
fm10k_request_glort_range(struct fm10k_intfc * interface)339*4882a593Smuzhiyun static void fm10k_request_glort_range(struct fm10k_intfc *interface)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
342*4882a593Smuzhiyun 	u16 mask = (~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	/* establish GLORT base */
345*4882a593Smuzhiyun 	interface->glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
346*4882a593Smuzhiyun 	interface->glort_count = 0;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	/* nothing we can do until mask is allocated */
349*4882a593Smuzhiyun 	if (hw->mac.dglort_map == FM10K_DGLORTMAP_NONE)
350*4882a593Smuzhiyun 		return;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* we support 3 possible GLORT configurations.
353*4882a593Smuzhiyun 	 * 1: VFs consume all but the last 1
354*4882a593Smuzhiyun 	 * 2: VFs and PF split glorts with possible gap between
355*4882a593Smuzhiyun 	 * 3: VFs allocated first 64, all others belong to PF
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	if (mask <= hw->iov.total_vfs) {
358*4882a593Smuzhiyun 		interface->glort_count = 1;
359*4882a593Smuzhiyun 		interface->glort += mask;
360*4882a593Smuzhiyun 	} else if (mask < 64) {
361*4882a593Smuzhiyun 		interface->glort_count = (mask + 1) / 2;
362*4882a593Smuzhiyun 		interface->glort += interface->glort_count;
363*4882a593Smuzhiyun 	} else {
364*4882a593Smuzhiyun 		interface->glort_count = mask - 63;
365*4882a593Smuzhiyun 		interface->glort += 64;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun /**
370*4882a593Smuzhiyun  * fm10k_restore_udp_port_info
371*4882a593Smuzhiyun  * @interface: board private structure
372*4882a593Smuzhiyun  *
373*4882a593Smuzhiyun  * This function restores the value in the tunnel_cfg register(s) after reset
374*4882a593Smuzhiyun  **/
fm10k_restore_udp_port_info(struct fm10k_intfc * interface)375*4882a593Smuzhiyun static void fm10k_restore_udp_port_info(struct fm10k_intfc *interface)
376*4882a593Smuzhiyun {
377*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* only the PF supports configuring tunnels */
380*4882a593Smuzhiyun 	if (hw->mac.type != fm10k_mac_pf)
381*4882a593Smuzhiyun 		return;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* restore tunnel configuration register */
384*4882a593Smuzhiyun 	fm10k_write_reg(hw, FM10K_TUNNEL_CFG,
385*4882a593Smuzhiyun 			ntohs(interface->vxlan_port) |
386*4882a593Smuzhiyun 			(ETH_P_TEB << FM10K_TUNNEL_CFG_NVGRE_SHIFT));
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	/* restore Geneve tunnel configuration register */
389*4882a593Smuzhiyun 	fm10k_write_reg(hw, FM10K_TUNNEL_CFG_GENEVE,
390*4882a593Smuzhiyun 			ntohs(interface->geneve_port));
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun  * fm10k_udp_tunnel_sync - Called when UDP tunnel ports change
395*4882a593Smuzhiyun  * @dev: network interface device structure
396*4882a593Smuzhiyun  * @table: Tunnel table (according to tables of @fm10k_udp_tunnels)
397*4882a593Smuzhiyun  *
398*4882a593Smuzhiyun  * This function is called when a new UDP tunnel port is added or deleted.
399*4882a593Smuzhiyun  * Due to hardware restrictions, only one port per type can be offloaded at
400*4882a593Smuzhiyun  * once. Core will send to the driver a port of its choice.
401*4882a593Smuzhiyun  **/
fm10k_udp_tunnel_sync(struct net_device * dev,unsigned int table)402*4882a593Smuzhiyun static int fm10k_udp_tunnel_sync(struct net_device *dev, unsigned int table)
403*4882a593Smuzhiyun {
404*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
405*4882a593Smuzhiyun 	struct udp_tunnel_info ti;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	udp_tunnel_nic_get_port(dev, table, 0, &ti);
408*4882a593Smuzhiyun 	if (!table)
409*4882a593Smuzhiyun 		interface->vxlan_port = ti.port;
410*4882a593Smuzhiyun 	else
411*4882a593Smuzhiyun 		interface->geneve_port = ti.port;
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	fm10k_restore_udp_port_info(interface);
414*4882a593Smuzhiyun 	return 0;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun static const struct udp_tunnel_nic_info fm10k_udp_tunnels = {
418*4882a593Smuzhiyun 	.sync_table	= fm10k_udp_tunnel_sync,
419*4882a593Smuzhiyun 	.tables		= {
420*4882a593Smuzhiyun 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,  },
421*4882a593Smuzhiyun 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_GENEVE, },
422*4882a593Smuzhiyun 	},
423*4882a593Smuzhiyun };
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun  * fm10k_open - Called when a network interface is made active
427*4882a593Smuzhiyun  * @netdev: network interface device structure
428*4882a593Smuzhiyun  *
429*4882a593Smuzhiyun  * Returns 0 on success, negative value on failure
430*4882a593Smuzhiyun  *
431*4882a593Smuzhiyun  * The open entry point is called when a network interface is made
432*4882a593Smuzhiyun  * active by the system (IFF_UP).  At this point all resources needed
433*4882a593Smuzhiyun  * for transmit and receive operations are allocated, the interrupt
434*4882a593Smuzhiyun  * handler is registered with the OS, the watchdog timer is started,
435*4882a593Smuzhiyun  * and the stack is notified that the interface is ready.
436*4882a593Smuzhiyun  **/
fm10k_open(struct net_device * netdev)437*4882a593Smuzhiyun int fm10k_open(struct net_device *netdev)
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
440*4882a593Smuzhiyun 	int err;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* allocate transmit descriptors */
443*4882a593Smuzhiyun 	err = fm10k_setup_all_tx_resources(interface);
444*4882a593Smuzhiyun 	if (err)
445*4882a593Smuzhiyun 		goto err_setup_tx;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	/* allocate receive descriptors */
448*4882a593Smuzhiyun 	err = fm10k_setup_all_rx_resources(interface);
449*4882a593Smuzhiyun 	if (err)
450*4882a593Smuzhiyun 		goto err_setup_rx;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	/* allocate interrupt resources */
453*4882a593Smuzhiyun 	err = fm10k_qv_request_irq(interface);
454*4882a593Smuzhiyun 	if (err)
455*4882a593Smuzhiyun 		goto err_req_irq;
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	/* setup GLORT assignment for this port */
458*4882a593Smuzhiyun 	fm10k_request_glort_range(interface);
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	/* Notify the stack of the actual queue counts */
461*4882a593Smuzhiyun 	err = netif_set_real_num_tx_queues(netdev,
462*4882a593Smuzhiyun 					   interface->num_tx_queues);
463*4882a593Smuzhiyun 	if (err)
464*4882a593Smuzhiyun 		goto err_set_queues;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	err = netif_set_real_num_rx_queues(netdev,
467*4882a593Smuzhiyun 					   interface->num_rx_queues);
468*4882a593Smuzhiyun 	if (err)
469*4882a593Smuzhiyun 		goto err_set_queues;
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	fm10k_up(interface);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	return 0;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun err_set_queues:
476*4882a593Smuzhiyun 	fm10k_qv_free_irq(interface);
477*4882a593Smuzhiyun err_req_irq:
478*4882a593Smuzhiyun 	fm10k_free_all_rx_resources(interface);
479*4882a593Smuzhiyun err_setup_rx:
480*4882a593Smuzhiyun 	fm10k_free_all_tx_resources(interface);
481*4882a593Smuzhiyun err_setup_tx:
482*4882a593Smuzhiyun 	return err;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun /**
486*4882a593Smuzhiyun  * fm10k_close - Disables a network interface
487*4882a593Smuzhiyun  * @netdev: network interface device structure
488*4882a593Smuzhiyun  *
489*4882a593Smuzhiyun  * Returns 0, this is not allowed to fail
490*4882a593Smuzhiyun  *
491*4882a593Smuzhiyun  * The close entry point is called when an interface is de-activated
492*4882a593Smuzhiyun  * by the OS.  The hardware is still under the drivers control, but
493*4882a593Smuzhiyun  * needs to be disabled.  A global MAC reset is issued to stop the
494*4882a593Smuzhiyun  * hardware, and all transmit and receive resources are freed.
495*4882a593Smuzhiyun  **/
fm10k_close(struct net_device * netdev)496*4882a593Smuzhiyun int fm10k_close(struct net_device *netdev)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	fm10k_down(interface);
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	fm10k_qv_free_irq(interface);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	fm10k_free_all_tx_resources(interface);
505*4882a593Smuzhiyun 	fm10k_free_all_rx_resources(interface);
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	return 0;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
fm10k_xmit_frame(struct sk_buff * skb,struct net_device * dev)510*4882a593Smuzhiyun static netdev_tx_t fm10k_xmit_frame(struct sk_buff *skb, struct net_device *dev)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
513*4882a593Smuzhiyun 	int num_tx_queues = READ_ONCE(interface->num_tx_queues);
514*4882a593Smuzhiyun 	unsigned int r_idx = skb->queue_mapping;
515*4882a593Smuzhiyun 	int err;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	if (!num_tx_queues)
518*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if ((skb->protocol == htons(ETH_P_8021Q)) &&
521*4882a593Smuzhiyun 	    !skb_vlan_tag_present(skb)) {
522*4882a593Smuzhiyun 		/* FM10K only supports hardware tagging, any tags in frame
523*4882a593Smuzhiyun 		 * are considered 2nd level or "outer" tags
524*4882a593Smuzhiyun 		 */
525*4882a593Smuzhiyun 		struct vlan_hdr *vhdr;
526*4882a593Smuzhiyun 		__be16 proto;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 		/* make sure skb is not shared */
529*4882a593Smuzhiyun 		skb = skb_share_check(skb, GFP_ATOMIC);
530*4882a593Smuzhiyun 		if (!skb)
531*4882a593Smuzhiyun 			return NETDEV_TX_OK;
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 		/* make sure there is enough room to move the ethernet header */
534*4882a593Smuzhiyun 		if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
535*4882a593Smuzhiyun 			return NETDEV_TX_OK;
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 		/* verify the skb head is not shared */
538*4882a593Smuzhiyun 		err = skb_cow_head(skb, 0);
539*4882a593Smuzhiyun 		if (err) {
540*4882a593Smuzhiyun 			dev_kfree_skb(skb);
541*4882a593Smuzhiyun 			return NETDEV_TX_OK;
542*4882a593Smuzhiyun 		}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		/* locate VLAN header */
545*4882a593Smuzhiyun 		vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		/* pull the 2 key pieces of data out of it */
548*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb,
549*4882a593Smuzhiyun 				       htons(ETH_P_8021Q),
550*4882a593Smuzhiyun 				       ntohs(vhdr->h_vlan_TCI));
551*4882a593Smuzhiyun 		proto = vhdr->h_vlan_encapsulated_proto;
552*4882a593Smuzhiyun 		skb->protocol = (ntohs(proto) >= 1536) ? proto :
553*4882a593Smuzhiyun 							 htons(ETH_P_802_2);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		/* squash it by moving the ethernet addresses up 4 bytes */
556*4882a593Smuzhiyun 		memmove(skb->data + VLAN_HLEN, skb->data, 12);
557*4882a593Smuzhiyun 		__skb_pull(skb, VLAN_HLEN);
558*4882a593Smuzhiyun 		skb_reset_mac_header(skb);
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	/* The minimum packet size for a single buffer is 17B so pad the skb
562*4882a593Smuzhiyun 	 * in order to meet this minimum size requirement.
563*4882a593Smuzhiyun 	 */
564*4882a593Smuzhiyun 	if (unlikely(skb->len < 17)) {
565*4882a593Smuzhiyun 		int pad_len = 17 - skb->len;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 		if (skb_pad(skb, pad_len))
568*4882a593Smuzhiyun 			return NETDEV_TX_OK;
569*4882a593Smuzhiyun 		__skb_put(skb, pad_len);
570*4882a593Smuzhiyun 	}
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	if (r_idx >= num_tx_queues)
573*4882a593Smuzhiyun 		r_idx %= num_tx_queues;
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	err = fm10k_xmit_frame_ring(skb, interface->tx_ring[r_idx]);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	return err;
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun /**
581*4882a593Smuzhiyun  * fm10k_tx_timeout - Respond to a Tx Hang
582*4882a593Smuzhiyun  * @netdev: network interface device structure
583*4882a593Smuzhiyun  * @txqueue: the index of the Tx queue that timed out
584*4882a593Smuzhiyun  **/
fm10k_tx_timeout(struct net_device * netdev,unsigned int txqueue)585*4882a593Smuzhiyun static void fm10k_tx_timeout(struct net_device *netdev, unsigned int txqueue)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
588*4882a593Smuzhiyun 	struct fm10k_ring *tx_ring;
589*4882a593Smuzhiyun 	bool real_tx_hang = false;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	if (txqueue >= interface->num_tx_queues) {
592*4882a593Smuzhiyun 		WARN(1, "invalid Tx queue index %d", txqueue);
593*4882a593Smuzhiyun 		return;
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	tx_ring = interface->tx_ring[txqueue];
597*4882a593Smuzhiyun 	if (check_for_tx_hang(tx_ring) && fm10k_check_tx_hang(tx_ring))
598*4882a593Smuzhiyun 		real_tx_hang = true;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun #define TX_TIMEO_LIMIT 16000
601*4882a593Smuzhiyun 	if (real_tx_hang) {
602*4882a593Smuzhiyun 		fm10k_tx_timeout_reset(interface);
603*4882a593Smuzhiyun 	} else {
604*4882a593Smuzhiyun 		netif_info(interface, drv, netdev,
605*4882a593Smuzhiyun 			   "Fake Tx hang detected with timeout of %d seconds\n",
606*4882a593Smuzhiyun 			   netdev->watchdog_timeo / HZ);
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		/* fake Tx hang - increase the kernel timeout */
609*4882a593Smuzhiyun 		if (netdev->watchdog_timeo < TX_TIMEO_LIMIT)
610*4882a593Smuzhiyun 			netdev->watchdog_timeo *= 2;
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun /**
615*4882a593Smuzhiyun  * fm10k_host_mbx_ready - Check PF interface's mailbox readiness
616*4882a593Smuzhiyun  * @interface: board private structure
617*4882a593Smuzhiyun  *
618*4882a593Smuzhiyun  * This function checks if the PF interface's mailbox is ready before queueing
619*4882a593Smuzhiyun  * mailbox messages for transmission. This will prevent filling the TX mailbox
620*4882a593Smuzhiyun  * queue when the receiver is not ready. VF interfaces are exempt from this
621*4882a593Smuzhiyun  * check since it will block all PF-VF mailbox messages from being sent from
622*4882a593Smuzhiyun  * the VF to the PF at initialization.
623*4882a593Smuzhiyun  **/
fm10k_host_mbx_ready(struct fm10k_intfc * interface)624*4882a593Smuzhiyun static bool fm10k_host_mbx_ready(struct fm10k_intfc *interface)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	return (hw->mac.type == fm10k_mac_vf || interface->host_ready);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun /**
632*4882a593Smuzhiyun  * fm10k_queue_vlan_request - Queue a VLAN update request
633*4882a593Smuzhiyun  * @interface: the fm10k interface structure
634*4882a593Smuzhiyun  * @vid: the VLAN vid
635*4882a593Smuzhiyun  * @vsi: VSI index number
636*4882a593Smuzhiyun  * @set: whether to set or clear
637*4882a593Smuzhiyun  *
638*4882a593Smuzhiyun  * This function queues up a VLAN update. For VFs, this must be sent to the
639*4882a593Smuzhiyun  * managing PF over the mailbox. For PFs, we'll use the same handling so that
640*4882a593Smuzhiyun  * it's similar to the VF. This avoids storming the PF<->VF mailbox with too
641*4882a593Smuzhiyun  * many VLAN updates during reset.
642*4882a593Smuzhiyun  */
fm10k_queue_vlan_request(struct fm10k_intfc * interface,u32 vid,u8 vsi,bool set)643*4882a593Smuzhiyun int fm10k_queue_vlan_request(struct fm10k_intfc *interface,
644*4882a593Smuzhiyun 			     u32 vid, u8 vsi, bool set)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	struct fm10k_macvlan_request *request;
647*4882a593Smuzhiyun 	unsigned long flags;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	/* This must be atomic since we may be called while the netdev
650*4882a593Smuzhiyun 	 * addr_list_lock is held
651*4882a593Smuzhiyun 	 */
652*4882a593Smuzhiyun 	request = kzalloc(sizeof(*request), GFP_ATOMIC);
653*4882a593Smuzhiyun 	if (!request)
654*4882a593Smuzhiyun 		return -ENOMEM;
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	request->type = FM10K_VLAN_REQUEST;
657*4882a593Smuzhiyun 	request->vlan.vid = vid;
658*4882a593Smuzhiyun 	request->vlan.vsi = vsi;
659*4882a593Smuzhiyun 	request->set = set;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	spin_lock_irqsave(&interface->macvlan_lock, flags);
662*4882a593Smuzhiyun 	list_add_tail(&request->list, &interface->macvlan_requests);
663*4882a593Smuzhiyun 	spin_unlock_irqrestore(&interface->macvlan_lock, flags);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	fm10k_macvlan_schedule(interface);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	return 0;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun /**
671*4882a593Smuzhiyun  * fm10k_queue_mac_request - Queue a MAC update request
672*4882a593Smuzhiyun  * @interface: the fm10k interface structure
673*4882a593Smuzhiyun  * @glort: the target glort for this update
674*4882a593Smuzhiyun  * @addr: the address to update
675*4882a593Smuzhiyun  * @vid: the vid to update
676*4882a593Smuzhiyun  * @set: whether to add or remove
677*4882a593Smuzhiyun  *
678*4882a593Smuzhiyun  * This function queues up a MAC request for sending to the switch manager.
679*4882a593Smuzhiyun  * A separate thread monitors the queue and sends updates to the switch
680*4882a593Smuzhiyun  * manager. Return 0 on success, and negative error code on failure.
681*4882a593Smuzhiyun  **/
fm10k_queue_mac_request(struct fm10k_intfc * interface,u16 glort,const unsigned char * addr,u16 vid,bool set)682*4882a593Smuzhiyun int fm10k_queue_mac_request(struct fm10k_intfc *interface, u16 glort,
683*4882a593Smuzhiyun 			    const unsigned char *addr, u16 vid, bool set)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun 	struct fm10k_macvlan_request *request;
686*4882a593Smuzhiyun 	unsigned long flags;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	/* This must be atomic since we may be called while the netdev
689*4882a593Smuzhiyun 	 * addr_list_lock is held
690*4882a593Smuzhiyun 	 */
691*4882a593Smuzhiyun 	request = kzalloc(sizeof(*request), GFP_ATOMIC);
692*4882a593Smuzhiyun 	if (!request)
693*4882a593Smuzhiyun 		return -ENOMEM;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	if (is_multicast_ether_addr(addr))
696*4882a593Smuzhiyun 		request->type = FM10K_MC_MAC_REQUEST;
697*4882a593Smuzhiyun 	else
698*4882a593Smuzhiyun 		request->type = FM10K_UC_MAC_REQUEST;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	ether_addr_copy(request->mac.addr, addr);
701*4882a593Smuzhiyun 	request->mac.glort = glort;
702*4882a593Smuzhiyun 	request->mac.vid = vid;
703*4882a593Smuzhiyun 	request->set = set;
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	spin_lock_irqsave(&interface->macvlan_lock, flags);
706*4882a593Smuzhiyun 	list_add_tail(&request->list, &interface->macvlan_requests);
707*4882a593Smuzhiyun 	spin_unlock_irqrestore(&interface->macvlan_lock, flags);
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	fm10k_macvlan_schedule(interface);
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	return 0;
712*4882a593Smuzhiyun }
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun /**
715*4882a593Smuzhiyun  * fm10k_clear_macvlan_queue - Cancel pending updates for a given glort
716*4882a593Smuzhiyun  * @interface: the fm10k interface structure
717*4882a593Smuzhiyun  * @glort: the target glort to clear
718*4882a593Smuzhiyun  * @vlans: true to clear VLAN messages, false to ignore them
719*4882a593Smuzhiyun  *
720*4882a593Smuzhiyun  * Cancel any outstanding MAC/VLAN requests for a given glort. This is
721*4882a593Smuzhiyun  * expected to be called when a logical port goes down.
722*4882a593Smuzhiyun  **/
fm10k_clear_macvlan_queue(struct fm10k_intfc * interface,u16 glort,bool vlans)723*4882a593Smuzhiyun void fm10k_clear_macvlan_queue(struct fm10k_intfc *interface,
724*4882a593Smuzhiyun 			       u16 glort, bool vlans)
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	struct fm10k_macvlan_request *r, *tmp;
728*4882a593Smuzhiyun 	unsigned long flags;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	spin_lock_irqsave(&interface->macvlan_lock, flags);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	/* Free any outstanding MAC/VLAN requests for this interface */
733*4882a593Smuzhiyun 	list_for_each_entry_safe(r, tmp, &interface->macvlan_requests, list) {
734*4882a593Smuzhiyun 		switch (r->type) {
735*4882a593Smuzhiyun 		case FM10K_MC_MAC_REQUEST:
736*4882a593Smuzhiyun 		case FM10K_UC_MAC_REQUEST:
737*4882a593Smuzhiyun 			/* Don't free requests for other interfaces */
738*4882a593Smuzhiyun 			if (r->mac.glort != glort)
739*4882a593Smuzhiyun 				break;
740*4882a593Smuzhiyun 			fallthrough;
741*4882a593Smuzhiyun 		case FM10K_VLAN_REQUEST:
742*4882a593Smuzhiyun 			if (vlans) {
743*4882a593Smuzhiyun 				list_del(&r->list);
744*4882a593Smuzhiyun 				kfree(r);
745*4882a593Smuzhiyun 			}
746*4882a593Smuzhiyun 			break;
747*4882a593Smuzhiyun 		}
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	spin_unlock_irqrestore(&interface->macvlan_lock, flags);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
fm10k_uc_vlan_unsync(struct net_device * netdev,const unsigned char * uc_addr)753*4882a593Smuzhiyun static int fm10k_uc_vlan_unsync(struct net_device *netdev,
754*4882a593Smuzhiyun 				const unsigned char *uc_addr)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
757*4882a593Smuzhiyun 	u16 glort = interface->glort;
758*4882a593Smuzhiyun 	u16 vid = interface->vid;
759*4882a593Smuzhiyun 	bool set = !!(vid / VLAN_N_VID);
760*4882a593Smuzhiyun 	int err;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	/* drop any leading bits on the VLAN ID */
763*4882a593Smuzhiyun 	vid &= VLAN_N_VID - 1;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	err = fm10k_queue_mac_request(interface, glort, uc_addr, vid, set);
766*4882a593Smuzhiyun 	if (err)
767*4882a593Smuzhiyun 		return err;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/* return non-zero value as we are only doing a partial sync/unsync */
770*4882a593Smuzhiyun 	return 1;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun 
fm10k_mc_vlan_unsync(struct net_device * netdev,const unsigned char * mc_addr)773*4882a593Smuzhiyun static int fm10k_mc_vlan_unsync(struct net_device *netdev,
774*4882a593Smuzhiyun 				const unsigned char *mc_addr)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
777*4882a593Smuzhiyun 	u16 glort = interface->glort;
778*4882a593Smuzhiyun 	u16 vid = interface->vid;
779*4882a593Smuzhiyun 	bool set = !!(vid / VLAN_N_VID);
780*4882a593Smuzhiyun 	int err;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	/* drop any leading bits on the VLAN ID */
783*4882a593Smuzhiyun 	vid &= VLAN_N_VID - 1;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	err = fm10k_queue_mac_request(interface, glort, mc_addr, vid, set);
786*4882a593Smuzhiyun 	if (err)
787*4882a593Smuzhiyun 		return err;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	/* return non-zero value as we are only doing a partial sync/unsync */
790*4882a593Smuzhiyun 	return 1;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
fm10k_update_vid(struct net_device * netdev,u16 vid,bool set)793*4882a593Smuzhiyun static int fm10k_update_vid(struct net_device *netdev, u16 vid, bool set)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
796*4882a593Smuzhiyun 	struct fm10k_l2_accel *l2_accel = interface->l2_accel;
797*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
798*4882a593Smuzhiyun 	u16 glort;
799*4882a593Smuzhiyun 	s32 err;
800*4882a593Smuzhiyun 	int i;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	/* updates do not apply to VLAN 0 */
803*4882a593Smuzhiyun 	if (!vid)
804*4882a593Smuzhiyun 		return 0;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	if (vid >= VLAN_N_VID)
807*4882a593Smuzhiyun 		return -EINVAL;
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	/* Verify that we have permission to add VLANs. If this is a request
810*4882a593Smuzhiyun 	 * to remove a VLAN, we still want to allow the user to remove the
811*4882a593Smuzhiyun 	 * VLAN device. In that case, we need to clear the bit in the
812*4882a593Smuzhiyun 	 * active_vlans bitmask.
813*4882a593Smuzhiyun 	 */
814*4882a593Smuzhiyun 	if (set && hw->mac.vlan_override)
815*4882a593Smuzhiyun 		return -EACCES;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	/* update active_vlans bitmask */
818*4882a593Smuzhiyun 	set_bit(vid, interface->active_vlans);
819*4882a593Smuzhiyun 	if (!set)
820*4882a593Smuzhiyun 		clear_bit(vid, interface->active_vlans);
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	/* disable the default VLAN ID on ring if we have an active VLAN */
823*4882a593Smuzhiyun 	for (i = 0; i < interface->num_rx_queues; i++) {
824*4882a593Smuzhiyun 		struct fm10k_ring *rx_ring = interface->rx_ring[i];
825*4882a593Smuzhiyun 		u16 rx_vid = rx_ring->vid & (VLAN_N_VID - 1);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 		if (test_bit(rx_vid, interface->active_vlans))
828*4882a593Smuzhiyun 			rx_ring->vid |= FM10K_VLAN_CLEAR;
829*4882a593Smuzhiyun 		else
830*4882a593Smuzhiyun 			rx_ring->vid &= ~FM10K_VLAN_CLEAR;
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 	/* If our VLAN has been overridden, there is no reason to send VLAN
834*4882a593Smuzhiyun 	 * removal requests as they will be silently ignored.
835*4882a593Smuzhiyun 	 */
836*4882a593Smuzhiyun 	if (hw->mac.vlan_override)
837*4882a593Smuzhiyun 		return 0;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	/* Do not remove default VLAN ID related entries from VLAN and MAC
840*4882a593Smuzhiyun 	 * tables
841*4882a593Smuzhiyun 	 */
842*4882a593Smuzhiyun 	if (!set && vid == hw->mac.default_vid)
843*4882a593Smuzhiyun 		return 0;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	/* Do not throw an error if the interface is down. We will sync once
846*4882a593Smuzhiyun 	 * we come up
847*4882a593Smuzhiyun 	 */
848*4882a593Smuzhiyun 	if (test_bit(__FM10K_DOWN, interface->state))
849*4882a593Smuzhiyun 		return 0;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 	fm10k_mbx_lock(interface);
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	/* only need to update the VLAN if not in promiscuous mode */
854*4882a593Smuzhiyun 	if (!(netdev->flags & IFF_PROMISC)) {
855*4882a593Smuzhiyun 		err = fm10k_queue_vlan_request(interface, vid, 0, set);
856*4882a593Smuzhiyun 		if (err)
857*4882a593Smuzhiyun 			goto err_out;
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	/* Update our base MAC address */
861*4882a593Smuzhiyun 	err = fm10k_queue_mac_request(interface, interface->glort,
862*4882a593Smuzhiyun 				      hw->mac.addr, vid, set);
863*4882a593Smuzhiyun 	if (err)
864*4882a593Smuzhiyun 		goto err_out;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	/* Update L2 accelerated macvlan addresses */
867*4882a593Smuzhiyun 	if (l2_accel) {
868*4882a593Smuzhiyun 		for (i = 0; i < l2_accel->size; i++) {
869*4882a593Smuzhiyun 			struct net_device *sdev = l2_accel->macvlan[i];
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 			if (!sdev)
872*4882a593Smuzhiyun 				continue;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 			glort = l2_accel->dglort + 1 + i;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 			fm10k_queue_mac_request(interface, glort,
877*4882a593Smuzhiyun 						sdev->dev_addr,
878*4882a593Smuzhiyun 						vid, set);
879*4882a593Smuzhiyun 		}
880*4882a593Smuzhiyun 	}
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 	/* set VLAN ID prior to syncing/unsyncing the VLAN */
883*4882a593Smuzhiyun 	interface->vid = vid + (set ? VLAN_N_VID : 0);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Update the unicast and multicast address list to add/drop VLAN */
886*4882a593Smuzhiyun 	__dev_uc_unsync(netdev, fm10k_uc_vlan_unsync);
887*4882a593Smuzhiyun 	__dev_mc_unsync(netdev, fm10k_mc_vlan_unsync);
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun err_out:
890*4882a593Smuzhiyun 	fm10k_mbx_unlock(interface);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	return err;
893*4882a593Smuzhiyun }
894*4882a593Smuzhiyun 
fm10k_vlan_rx_add_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)895*4882a593Smuzhiyun static int fm10k_vlan_rx_add_vid(struct net_device *netdev,
896*4882a593Smuzhiyun 				 __always_unused __be16 proto, u16 vid)
897*4882a593Smuzhiyun {
898*4882a593Smuzhiyun 	/* update VLAN and address table based on changes */
899*4882a593Smuzhiyun 	return fm10k_update_vid(netdev, vid, true);
900*4882a593Smuzhiyun }
901*4882a593Smuzhiyun 
fm10k_vlan_rx_kill_vid(struct net_device * netdev,__always_unused __be16 proto,u16 vid)902*4882a593Smuzhiyun static int fm10k_vlan_rx_kill_vid(struct net_device *netdev,
903*4882a593Smuzhiyun 				  __always_unused __be16 proto, u16 vid)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	/* update VLAN and address table based on changes */
906*4882a593Smuzhiyun 	return fm10k_update_vid(netdev, vid, false);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun 
fm10k_find_next_vlan(struct fm10k_intfc * interface,u16 vid)909*4882a593Smuzhiyun static u16 fm10k_find_next_vlan(struct fm10k_intfc *interface, u16 vid)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
912*4882a593Smuzhiyun 	u16 default_vid = hw->mac.default_vid;
913*4882a593Smuzhiyun 	u16 vid_limit = vid < default_vid ? default_vid : VLAN_N_VID;
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	vid = find_next_bit(interface->active_vlans, vid_limit, ++vid);
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	return vid;
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun 
fm10k_clear_unused_vlans(struct fm10k_intfc * interface)920*4882a593Smuzhiyun static void fm10k_clear_unused_vlans(struct fm10k_intfc *interface)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun 	u32 vid, prev_vid;
923*4882a593Smuzhiyun 
924*4882a593Smuzhiyun 	/* loop through and find any gaps in the table */
925*4882a593Smuzhiyun 	for (vid = 0, prev_vid = 0;
926*4882a593Smuzhiyun 	     prev_vid < VLAN_N_VID;
927*4882a593Smuzhiyun 	     prev_vid = vid + 1, vid = fm10k_find_next_vlan(interface, vid)) {
928*4882a593Smuzhiyun 		if (prev_vid == vid)
929*4882a593Smuzhiyun 			continue;
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun 		/* send request to clear multiple bits at a time */
932*4882a593Smuzhiyun 		prev_vid += (vid - prev_vid - 1) << FM10K_VLAN_LENGTH_SHIFT;
933*4882a593Smuzhiyun 		fm10k_queue_vlan_request(interface, prev_vid, 0, false);
934*4882a593Smuzhiyun 	}
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
__fm10k_uc_sync(struct net_device * dev,const unsigned char * addr,bool sync)937*4882a593Smuzhiyun static int __fm10k_uc_sync(struct net_device *dev,
938*4882a593Smuzhiyun 			   const unsigned char *addr, bool sync)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
941*4882a593Smuzhiyun 	u16 vid, glort = interface->glort;
942*4882a593Smuzhiyun 	s32 err;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr))
945*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	for (vid = fm10k_find_next_vlan(interface, 0);
948*4882a593Smuzhiyun 	     vid < VLAN_N_VID;
949*4882a593Smuzhiyun 	     vid = fm10k_find_next_vlan(interface, vid)) {
950*4882a593Smuzhiyun 		err = fm10k_queue_mac_request(interface, glort,
951*4882a593Smuzhiyun 					      addr, vid, sync);
952*4882a593Smuzhiyun 		if (err)
953*4882a593Smuzhiyun 			return err;
954*4882a593Smuzhiyun 	}
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	return 0;
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
fm10k_uc_sync(struct net_device * dev,const unsigned char * addr)959*4882a593Smuzhiyun static int fm10k_uc_sync(struct net_device *dev,
960*4882a593Smuzhiyun 			 const unsigned char *addr)
961*4882a593Smuzhiyun {
962*4882a593Smuzhiyun 	return __fm10k_uc_sync(dev, addr, true);
963*4882a593Smuzhiyun }
964*4882a593Smuzhiyun 
fm10k_uc_unsync(struct net_device * dev,const unsigned char * addr)965*4882a593Smuzhiyun static int fm10k_uc_unsync(struct net_device *dev,
966*4882a593Smuzhiyun 			   const unsigned char *addr)
967*4882a593Smuzhiyun {
968*4882a593Smuzhiyun 	return __fm10k_uc_sync(dev, addr, false);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun 
fm10k_set_mac(struct net_device * dev,void * p)971*4882a593Smuzhiyun static int fm10k_set_mac(struct net_device *dev, void *p)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
974*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
975*4882a593Smuzhiyun 	struct sockaddr *addr = p;
976*4882a593Smuzhiyun 	s32 err = 0;
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
979*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	if (dev->flags & IFF_UP) {
982*4882a593Smuzhiyun 		/* setting MAC address requires mailbox */
983*4882a593Smuzhiyun 		fm10k_mbx_lock(interface);
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 		err = fm10k_uc_sync(dev, addr->sa_data);
986*4882a593Smuzhiyun 		if (!err)
987*4882a593Smuzhiyun 			fm10k_uc_unsync(dev, hw->mac.addr);
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 		fm10k_mbx_unlock(interface);
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	if (!err) {
993*4882a593Smuzhiyun 		ether_addr_copy(dev->dev_addr, addr->sa_data);
994*4882a593Smuzhiyun 		ether_addr_copy(hw->mac.addr, addr->sa_data);
995*4882a593Smuzhiyun 		dev->addr_assign_type &= ~NET_ADDR_RANDOM;
996*4882a593Smuzhiyun 	}
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	/* if we had a mailbox error suggest trying again */
999*4882a593Smuzhiyun 	return err ? -EAGAIN : 0;
1000*4882a593Smuzhiyun }
1001*4882a593Smuzhiyun 
__fm10k_mc_sync(struct net_device * dev,const unsigned char * addr,bool sync)1002*4882a593Smuzhiyun static int __fm10k_mc_sync(struct net_device *dev,
1003*4882a593Smuzhiyun 			   const unsigned char *addr, bool sync)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
1006*4882a593Smuzhiyun 	u16 vid, glort = interface->glort;
1007*4882a593Smuzhiyun 	s32 err;
1008*4882a593Smuzhiyun 
1009*4882a593Smuzhiyun 	if (!is_multicast_ether_addr(addr))
1010*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	for (vid = fm10k_find_next_vlan(interface, 0);
1013*4882a593Smuzhiyun 	     vid < VLAN_N_VID;
1014*4882a593Smuzhiyun 	     vid = fm10k_find_next_vlan(interface, vid)) {
1015*4882a593Smuzhiyun 		err = fm10k_queue_mac_request(interface, glort,
1016*4882a593Smuzhiyun 					      addr, vid, sync);
1017*4882a593Smuzhiyun 		if (err)
1018*4882a593Smuzhiyun 			return err;
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 
1021*4882a593Smuzhiyun 	return 0;
1022*4882a593Smuzhiyun }
1023*4882a593Smuzhiyun 
fm10k_mc_sync(struct net_device * dev,const unsigned char * addr)1024*4882a593Smuzhiyun static int fm10k_mc_sync(struct net_device *dev,
1025*4882a593Smuzhiyun 			 const unsigned char *addr)
1026*4882a593Smuzhiyun {
1027*4882a593Smuzhiyun 	return __fm10k_mc_sync(dev, addr, true);
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun 
fm10k_mc_unsync(struct net_device * dev,const unsigned char * addr)1030*4882a593Smuzhiyun static int fm10k_mc_unsync(struct net_device *dev,
1031*4882a593Smuzhiyun 			   const unsigned char *addr)
1032*4882a593Smuzhiyun {
1033*4882a593Smuzhiyun 	return __fm10k_mc_sync(dev, addr, false);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun 
fm10k_set_rx_mode(struct net_device * dev)1036*4882a593Smuzhiyun static void fm10k_set_rx_mode(struct net_device *dev)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
1039*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
1040*4882a593Smuzhiyun 	int xcast_mode;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	/* no need to update the harwdare if we are not running */
1043*4882a593Smuzhiyun 	if (!(dev->flags & IFF_UP))
1044*4882a593Smuzhiyun 		return;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	/* determine new mode based on flags */
1047*4882a593Smuzhiyun 	xcast_mode = (dev->flags & IFF_PROMISC) ? FM10K_XCAST_MODE_PROMISC :
1048*4882a593Smuzhiyun 		     (dev->flags & IFF_ALLMULTI) ? FM10K_XCAST_MODE_ALLMULTI :
1049*4882a593Smuzhiyun 		     (dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) ?
1050*4882a593Smuzhiyun 		     FM10K_XCAST_MODE_MULTI : FM10K_XCAST_MODE_NONE;
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	fm10k_mbx_lock(interface);
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	/* update xcast mode first, but only if it changed */
1055*4882a593Smuzhiyun 	if (interface->xcast_mode != xcast_mode) {
1056*4882a593Smuzhiyun 		/* update VLAN table when entering promiscuous mode */
1057*4882a593Smuzhiyun 		if (xcast_mode == FM10K_XCAST_MODE_PROMISC)
1058*4882a593Smuzhiyun 			fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL,
1059*4882a593Smuzhiyun 						 0, true);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 		/* clear VLAN table when exiting promiscuous mode */
1062*4882a593Smuzhiyun 		if (interface->xcast_mode == FM10K_XCAST_MODE_PROMISC)
1063*4882a593Smuzhiyun 			fm10k_clear_unused_vlans(interface);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 		/* update xcast mode if host's mailbox is ready */
1066*4882a593Smuzhiyun 		if (fm10k_host_mbx_ready(interface))
1067*4882a593Smuzhiyun 			hw->mac.ops.update_xcast_mode(hw, interface->glort,
1068*4882a593Smuzhiyun 						      xcast_mode);
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		/* record updated xcast mode state */
1071*4882a593Smuzhiyun 		interface->xcast_mode = xcast_mode;
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 	/* synchronize all of the addresses */
1075*4882a593Smuzhiyun 	__dev_uc_sync(dev, fm10k_uc_sync, fm10k_uc_unsync);
1076*4882a593Smuzhiyun 	__dev_mc_sync(dev, fm10k_mc_sync, fm10k_mc_unsync);
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	fm10k_mbx_unlock(interface);
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun 
fm10k_restore_rx_state(struct fm10k_intfc * interface)1081*4882a593Smuzhiyun void fm10k_restore_rx_state(struct fm10k_intfc *interface)
1082*4882a593Smuzhiyun {
1083*4882a593Smuzhiyun 	struct fm10k_l2_accel *l2_accel = interface->l2_accel;
1084*4882a593Smuzhiyun 	struct net_device *netdev = interface->netdev;
1085*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
1086*4882a593Smuzhiyun 	int xcast_mode, i;
1087*4882a593Smuzhiyun 	u16 vid, glort;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	/* record glort for this interface */
1090*4882a593Smuzhiyun 	glort = interface->glort;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	/* convert interface flags to xcast mode */
1093*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC)
1094*4882a593Smuzhiyun 		xcast_mode = FM10K_XCAST_MODE_PROMISC;
1095*4882a593Smuzhiyun 	else if (netdev->flags & IFF_ALLMULTI)
1096*4882a593Smuzhiyun 		xcast_mode = FM10K_XCAST_MODE_ALLMULTI;
1097*4882a593Smuzhiyun 	else if (netdev->flags & (IFF_BROADCAST | IFF_MULTICAST))
1098*4882a593Smuzhiyun 		xcast_mode = FM10K_XCAST_MODE_MULTI;
1099*4882a593Smuzhiyun 	else
1100*4882a593Smuzhiyun 		xcast_mode = FM10K_XCAST_MODE_NONE;
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 	fm10k_mbx_lock(interface);
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	/* Enable logical port if host's mailbox is ready */
1105*4882a593Smuzhiyun 	if (fm10k_host_mbx_ready(interface))
1106*4882a593Smuzhiyun 		hw->mac.ops.update_lport_state(hw, glort,
1107*4882a593Smuzhiyun 					       interface->glort_count, true);
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 	/* update VLAN table */
1110*4882a593Smuzhiyun 	fm10k_queue_vlan_request(interface, FM10K_VLAN_ALL, 0,
1111*4882a593Smuzhiyun 				 xcast_mode == FM10K_XCAST_MODE_PROMISC);
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 	/* update table with current entries */
1114*4882a593Smuzhiyun 	for (vid = fm10k_find_next_vlan(interface, 0);
1115*4882a593Smuzhiyun 	     vid < VLAN_N_VID;
1116*4882a593Smuzhiyun 	     vid = fm10k_find_next_vlan(interface, vid)) {
1117*4882a593Smuzhiyun 		fm10k_queue_vlan_request(interface, vid, 0, true);
1118*4882a593Smuzhiyun 
1119*4882a593Smuzhiyun 		fm10k_queue_mac_request(interface, glort,
1120*4882a593Smuzhiyun 					hw->mac.addr, vid, true);
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 		/* synchronize macvlan addresses */
1123*4882a593Smuzhiyun 		if (l2_accel) {
1124*4882a593Smuzhiyun 			for (i = 0; i < l2_accel->size; i++) {
1125*4882a593Smuzhiyun 				struct net_device *sdev = l2_accel->macvlan[i];
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 				if (!sdev)
1128*4882a593Smuzhiyun 					continue;
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 				glort = l2_accel->dglort + 1 + i;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 				fm10k_queue_mac_request(interface, glort,
1133*4882a593Smuzhiyun 							sdev->dev_addr,
1134*4882a593Smuzhiyun 							vid, true);
1135*4882a593Smuzhiyun 			}
1136*4882a593Smuzhiyun 		}
1137*4882a593Smuzhiyun 	}
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	/* update xcast mode before synchronizing addresses if host's mailbox
1140*4882a593Smuzhiyun 	 * is ready
1141*4882a593Smuzhiyun 	 */
1142*4882a593Smuzhiyun 	if (fm10k_host_mbx_ready(interface))
1143*4882a593Smuzhiyun 		hw->mac.ops.update_xcast_mode(hw, glort, xcast_mode);
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/* synchronize all of the addresses */
1146*4882a593Smuzhiyun 	__dev_uc_sync(netdev, fm10k_uc_sync, fm10k_uc_unsync);
1147*4882a593Smuzhiyun 	__dev_mc_sync(netdev, fm10k_mc_sync, fm10k_mc_unsync);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	/* synchronize macvlan addresses */
1150*4882a593Smuzhiyun 	if (l2_accel) {
1151*4882a593Smuzhiyun 		for (i = 0; i < l2_accel->size; i++) {
1152*4882a593Smuzhiyun 			struct net_device *sdev = l2_accel->macvlan[i];
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 			if (!sdev)
1155*4882a593Smuzhiyun 				continue;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 			glort = l2_accel->dglort + 1 + i;
1158*4882a593Smuzhiyun 
1159*4882a593Smuzhiyun 			hw->mac.ops.update_xcast_mode(hw, glort,
1160*4882a593Smuzhiyun 						      FM10K_XCAST_MODE_NONE);
1161*4882a593Smuzhiyun 			fm10k_queue_mac_request(interface, glort,
1162*4882a593Smuzhiyun 						sdev->dev_addr,
1163*4882a593Smuzhiyun 						hw->mac.default_vid, true);
1164*4882a593Smuzhiyun 		}
1165*4882a593Smuzhiyun 	}
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	fm10k_mbx_unlock(interface);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	/* record updated xcast mode state */
1170*4882a593Smuzhiyun 	interface->xcast_mode = xcast_mode;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	/* Restore tunnel configuration */
1173*4882a593Smuzhiyun 	fm10k_restore_udp_port_info(interface);
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
fm10k_reset_rx_state(struct fm10k_intfc * interface)1176*4882a593Smuzhiyun void fm10k_reset_rx_state(struct fm10k_intfc *interface)
1177*4882a593Smuzhiyun {
1178*4882a593Smuzhiyun 	struct net_device *netdev = interface->netdev;
1179*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	/* Wait for MAC/VLAN work to finish */
1182*4882a593Smuzhiyun 	while (test_bit(__FM10K_MACVLAN_SCHED, interface->state))
1183*4882a593Smuzhiyun 		usleep_range(1000, 2000);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	/* Cancel pending MAC/VLAN requests */
1186*4882a593Smuzhiyun 	fm10k_clear_macvlan_queue(interface, interface->glort, true);
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	fm10k_mbx_lock(interface);
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/* clear the logical port state on lower device if host's mailbox is
1191*4882a593Smuzhiyun 	 * ready
1192*4882a593Smuzhiyun 	 */
1193*4882a593Smuzhiyun 	if (fm10k_host_mbx_ready(interface))
1194*4882a593Smuzhiyun 		hw->mac.ops.update_lport_state(hw, interface->glort,
1195*4882a593Smuzhiyun 					       interface->glort_count, false);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	fm10k_mbx_unlock(interface);
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	/* reset flags to default state */
1200*4882a593Smuzhiyun 	interface->xcast_mode = FM10K_XCAST_MODE_NONE;
1201*4882a593Smuzhiyun 
1202*4882a593Smuzhiyun 	/* clear the sync flag since the lport has been dropped */
1203*4882a593Smuzhiyun 	__dev_uc_unsync(netdev, NULL);
1204*4882a593Smuzhiyun 	__dev_mc_unsync(netdev, NULL);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun /**
1208*4882a593Smuzhiyun  * fm10k_get_stats64 - Get System Network Statistics
1209*4882a593Smuzhiyun  * @netdev: network interface device structure
1210*4882a593Smuzhiyun  * @stats: storage space for 64bit statistics
1211*4882a593Smuzhiyun  *
1212*4882a593Smuzhiyun  * Obtain 64bit statistics in a way that is safe for both 32bit and 64bit
1213*4882a593Smuzhiyun  * architectures.
1214*4882a593Smuzhiyun  */
fm10k_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)1215*4882a593Smuzhiyun static void fm10k_get_stats64(struct net_device *netdev,
1216*4882a593Smuzhiyun 			      struct rtnl_link_stats64 *stats)
1217*4882a593Smuzhiyun {
1218*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(netdev);
1219*4882a593Smuzhiyun 	struct fm10k_ring *ring;
1220*4882a593Smuzhiyun 	unsigned int start, i;
1221*4882a593Smuzhiyun 	u64 bytes, packets;
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	rcu_read_lock();
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 	for (i = 0; i < interface->num_rx_queues; i++) {
1226*4882a593Smuzhiyun 		ring = READ_ONCE(interface->rx_ring[i]);
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 		if (!ring)
1229*4882a593Smuzhiyun 			continue;
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun 		do {
1232*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1233*4882a593Smuzhiyun 			packets = ring->stats.packets;
1234*4882a593Smuzhiyun 			bytes   = ring->stats.bytes;
1235*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1236*4882a593Smuzhiyun 
1237*4882a593Smuzhiyun 		stats->rx_packets += packets;
1238*4882a593Smuzhiyun 		stats->rx_bytes   += bytes;
1239*4882a593Smuzhiyun 	}
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	for (i = 0; i < interface->num_tx_queues; i++) {
1242*4882a593Smuzhiyun 		ring = READ_ONCE(interface->tx_ring[i]);
1243*4882a593Smuzhiyun 
1244*4882a593Smuzhiyun 		if (!ring)
1245*4882a593Smuzhiyun 			continue;
1246*4882a593Smuzhiyun 
1247*4882a593Smuzhiyun 		do {
1248*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&ring->syncp);
1249*4882a593Smuzhiyun 			packets = ring->stats.packets;
1250*4882a593Smuzhiyun 			bytes   = ring->stats.bytes;
1251*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 		stats->tx_packets += packets;
1254*4882a593Smuzhiyun 		stats->tx_bytes   += bytes;
1255*4882a593Smuzhiyun 	}
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun 	rcu_read_unlock();
1258*4882a593Smuzhiyun 
1259*4882a593Smuzhiyun 	/* following stats updated by fm10k_service_task() */
1260*4882a593Smuzhiyun 	stats->rx_missed_errors	= netdev->stats.rx_missed_errors;
1261*4882a593Smuzhiyun }
1262*4882a593Smuzhiyun 
fm10k_setup_tc(struct net_device * dev,u8 tc)1263*4882a593Smuzhiyun int fm10k_setup_tc(struct net_device *dev, u8 tc)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
1266*4882a593Smuzhiyun 	int err;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	/* Currently only the PF supports priority classes */
1269*4882a593Smuzhiyun 	if (tc && (interface->hw.mac.type != fm10k_mac_pf))
1270*4882a593Smuzhiyun 		return -EINVAL;
1271*4882a593Smuzhiyun 
1272*4882a593Smuzhiyun 	/* Hardware supports up to 8 traffic classes */
1273*4882a593Smuzhiyun 	if (tc > 8)
1274*4882a593Smuzhiyun 		return -EINVAL;
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	/* Hardware has to reinitialize queues to match packet
1277*4882a593Smuzhiyun 	 * buffer alignment. Unfortunately, the hardware is not
1278*4882a593Smuzhiyun 	 * flexible enough to do this dynamically.
1279*4882a593Smuzhiyun 	 */
1280*4882a593Smuzhiyun 	if (netif_running(dev))
1281*4882a593Smuzhiyun 		fm10k_close(dev);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	fm10k_mbx_free_irq(interface);
1284*4882a593Smuzhiyun 
1285*4882a593Smuzhiyun 	fm10k_clear_queueing_scheme(interface);
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	/* we expect the prio_tc map to be repopulated later */
1288*4882a593Smuzhiyun 	netdev_reset_tc(dev);
1289*4882a593Smuzhiyun 	netdev_set_num_tc(dev, tc);
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun 	err = fm10k_init_queueing_scheme(interface);
1292*4882a593Smuzhiyun 	if (err)
1293*4882a593Smuzhiyun 		goto err_queueing_scheme;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	err = fm10k_mbx_request_irq(interface);
1296*4882a593Smuzhiyun 	if (err)
1297*4882a593Smuzhiyun 		goto err_mbx_irq;
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	err = netif_running(dev) ? fm10k_open(dev) : 0;
1300*4882a593Smuzhiyun 	if (err)
1301*4882a593Smuzhiyun 		goto err_open;
1302*4882a593Smuzhiyun 
1303*4882a593Smuzhiyun 	/* flag to indicate SWPRI has yet to be updated */
1304*4882a593Smuzhiyun 	set_bit(FM10K_FLAG_SWPRI_CONFIG, interface->flags);
1305*4882a593Smuzhiyun 
1306*4882a593Smuzhiyun 	return 0;
1307*4882a593Smuzhiyun err_open:
1308*4882a593Smuzhiyun 	fm10k_mbx_free_irq(interface);
1309*4882a593Smuzhiyun err_mbx_irq:
1310*4882a593Smuzhiyun 	fm10k_clear_queueing_scheme(interface);
1311*4882a593Smuzhiyun err_queueing_scheme:
1312*4882a593Smuzhiyun 	netif_device_detach(dev);
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	return err;
1315*4882a593Smuzhiyun }
1316*4882a593Smuzhiyun 
__fm10k_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)1317*4882a593Smuzhiyun static int __fm10k_setup_tc(struct net_device *dev, enum tc_setup_type type,
1318*4882a593Smuzhiyun 			    void *type_data)
1319*4882a593Smuzhiyun {
1320*4882a593Smuzhiyun 	struct tc_mqprio_qopt *mqprio = type_data;
1321*4882a593Smuzhiyun 
1322*4882a593Smuzhiyun 	if (type != TC_SETUP_QDISC_MQPRIO)
1323*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1324*4882a593Smuzhiyun 
1325*4882a593Smuzhiyun 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
1326*4882a593Smuzhiyun 
1327*4882a593Smuzhiyun 	return fm10k_setup_tc(dev, mqprio->num_tc);
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun 
fm10k_assign_l2_accel(struct fm10k_intfc * interface,struct fm10k_l2_accel * l2_accel)1330*4882a593Smuzhiyun static void fm10k_assign_l2_accel(struct fm10k_intfc *interface,
1331*4882a593Smuzhiyun 				  struct fm10k_l2_accel *l2_accel)
1332*4882a593Smuzhiyun {
1333*4882a593Smuzhiyun 	int i;
1334*4882a593Smuzhiyun 
1335*4882a593Smuzhiyun 	for (i = 0; i < interface->num_rx_queues; i++) {
1336*4882a593Smuzhiyun 		struct fm10k_ring *ring = interface->rx_ring[i];
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 		rcu_assign_pointer(ring->l2_accel, l2_accel);
1339*4882a593Smuzhiyun 	}
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	interface->l2_accel = l2_accel;
1342*4882a593Smuzhiyun }
1343*4882a593Smuzhiyun 
fm10k_dfwd_add_station(struct net_device * dev,struct net_device * sdev)1344*4882a593Smuzhiyun static void *fm10k_dfwd_add_station(struct net_device *dev,
1345*4882a593Smuzhiyun 				    struct net_device *sdev)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
1348*4882a593Smuzhiyun 	struct fm10k_l2_accel *l2_accel = interface->l2_accel;
1349*4882a593Smuzhiyun 	struct fm10k_l2_accel *old_l2_accel = NULL;
1350*4882a593Smuzhiyun 	struct fm10k_dglort_cfg dglort = { 0 };
1351*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
1352*4882a593Smuzhiyun 	int size, i;
1353*4882a593Smuzhiyun 	u16 vid, glort;
1354*4882a593Smuzhiyun 
1355*4882a593Smuzhiyun 	/* The hardware supported by fm10k only filters on the destination MAC
1356*4882a593Smuzhiyun 	 * address. In order to avoid issues we only support offloading modes
1357*4882a593Smuzhiyun 	 * where the hardware can actually provide the functionality.
1358*4882a593Smuzhiyun 	 */
1359*4882a593Smuzhiyun 	if (!macvlan_supports_dest_filter(sdev))
1360*4882a593Smuzhiyun 		return ERR_PTR(-EMEDIUMTYPE);
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	/* allocate l2 accel structure if it is not available */
1363*4882a593Smuzhiyun 	if (!l2_accel) {
1364*4882a593Smuzhiyun 		/* verify there is enough free GLORTs to support l2_accel */
1365*4882a593Smuzhiyun 		if (interface->glort_count < 7)
1366*4882a593Smuzhiyun 			return ERR_PTR(-EBUSY);
1367*4882a593Smuzhiyun 
1368*4882a593Smuzhiyun 		size = offsetof(struct fm10k_l2_accel, macvlan[7]);
1369*4882a593Smuzhiyun 		l2_accel = kzalloc(size, GFP_KERNEL);
1370*4882a593Smuzhiyun 		if (!l2_accel)
1371*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
1372*4882a593Smuzhiyun 
1373*4882a593Smuzhiyun 		l2_accel->size = 7;
1374*4882a593Smuzhiyun 		l2_accel->dglort = interface->glort;
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 		/* update pointers */
1377*4882a593Smuzhiyun 		fm10k_assign_l2_accel(interface, l2_accel);
1378*4882a593Smuzhiyun 	/* do not expand if we are at our limit */
1379*4882a593Smuzhiyun 	} else if ((l2_accel->count == FM10K_MAX_STATIONS) ||
1380*4882a593Smuzhiyun 		   (l2_accel->count == (interface->glort_count - 1))) {
1381*4882a593Smuzhiyun 		return ERR_PTR(-EBUSY);
1382*4882a593Smuzhiyun 	/* expand if we have hit the size limit */
1383*4882a593Smuzhiyun 	} else if (l2_accel->count == l2_accel->size) {
1384*4882a593Smuzhiyun 		old_l2_accel = l2_accel;
1385*4882a593Smuzhiyun 		size = offsetof(struct fm10k_l2_accel,
1386*4882a593Smuzhiyun 				macvlan[(l2_accel->size * 2) + 1]);
1387*4882a593Smuzhiyun 		l2_accel = kzalloc(size, GFP_KERNEL);
1388*4882a593Smuzhiyun 		if (!l2_accel)
1389*4882a593Smuzhiyun 			return ERR_PTR(-ENOMEM);
1390*4882a593Smuzhiyun 
1391*4882a593Smuzhiyun 		memcpy(l2_accel, old_l2_accel,
1392*4882a593Smuzhiyun 		       offsetof(struct fm10k_l2_accel,
1393*4882a593Smuzhiyun 				macvlan[old_l2_accel->size]));
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 		l2_accel->size = (old_l2_accel->size * 2) + 1;
1396*4882a593Smuzhiyun 
1397*4882a593Smuzhiyun 		/* update pointers */
1398*4882a593Smuzhiyun 		fm10k_assign_l2_accel(interface, l2_accel);
1399*4882a593Smuzhiyun 		kfree_rcu(old_l2_accel, rcu);
1400*4882a593Smuzhiyun 	}
1401*4882a593Smuzhiyun 
1402*4882a593Smuzhiyun 	/* add macvlan to accel table, and record GLORT for position */
1403*4882a593Smuzhiyun 	for (i = 0; i < l2_accel->size; i++) {
1404*4882a593Smuzhiyun 		if (!l2_accel->macvlan[i])
1405*4882a593Smuzhiyun 			break;
1406*4882a593Smuzhiyun 	}
1407*4882a593Smuzhiyun 
1408*4882a593Smuzhiyun 	/* record station */
1409*4882a593Smuzhiyun 	l2_accel->macvlan[i] = sdev;
1410*4882a593Smuzhiyun 	l2_accel->count++;
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 	/* configure default DGLORT mapping for RSS/DCB */
1413*4882a593Smuzhiyun 	dglort.idx = fm10k_dglort_pf_rss;
1414*4882a593Smuzhiyun 	dglort.inner_rss = 1;
1415*4882a593Smuzhiyun 	dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
1416*4882a593Smuzhiyun 	dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
1417*4882a593Smuzhiyun 	dglort.glort = interface->glort;
1418*4882a593Smuzhiyun 	dglort.shared_l = fls(l2_accel->size);
1419*4882a593Smuzhiyun 	hw->mac.ops.configure_dglort_map(hw, &dglort);
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	/* Add rules for this specific dglort to the switch */
1422*4882a593Smuzhiyun 	fm10k_mbx_lock(interface);
1423*4882a593Smuzhiyun 
1424*4882a593Smuzhiyun 	glort = l2_accel->dglort + 1 + i;
1425*4882a593Smuzhiyun 
1426*4882a593Smuzhiyun 	if (fm10k_host_mbx_ready(interface))
1427*4882a593Smuzhiyun 		hw->mac.ops.update_xcast_mode(hw, glort,
1428*4882a593Smuzhiyun 					      FM10K_XCAST_MODE_NONE);
1429*4882a593Smuzhiyun 
1430*4882a593Smuzhiyun 	fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1431*4882a593Smuzhiyun 				hw->mac.default_vid, true);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	for (vid = fm10k_find_next_vlan(interface, 0);
1434*4882a593Smuzhiyun 	     vid < VLAN_N_VID;
1435*4882a593Smuzhiyun 	     vid = fm10k_find_next_vlan(interface, vid))
1436*4882a593Smuzhiyun 		fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1437*4882a593Smuzhiyun 					vid, true);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 	fm10k_mbx_unlock(interface);
1440*4882a593Smuzhiyun 
1441*4882a593Smuzhiyun 	return sdev;
1442*4882a593Smuzhiyun }
1443*4882a593Smuzhiyun 
fm10k_dfwd_del_station(struct net_device * dev,void * priv)1444*4882a593Smuzhiyun static void fm10k_dfwd_del_station(struct net_device *dev, void *priv)
1445*4882a593Smuzhiyun {
1446*4882a593Smuzhiyun 	struct fm10k_intfc *interface = netdev_priv(dev);
1447*4882a593Smuzhiyun 	struct fm10k_l2_accel *l2_accel = READ_ONCE(interface->l2_accel);
1448*4882a593Smuzhiyun 	struct fm10k_dglort_cfg dglort = { 0 };
1449*4882a593Smuzhiyun 	struct fm10k_hw *hw = &interface->hw;
1450*4882a593Smuzhiyun 	struct net_device *sdev = priv;
1451*4882a593Smuzhiyun 	u16 vid, glort;
1452*4882a593Smuzhiyun 	int i;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	if (!l2_accel)
1455*4882a593Smuzhiyun 		return;
1456*4882a593Smuzhiyun 
1457*4882a593Smuzhiyun 	/* search table for matching interface */
1458*4882a593Smuzhiyun 	for (i = 0; i < l2_accel->size; i++) {
1459*4882a593Smuzhiyun 		if (l2_accel->macvlan[i] == sdev)
1460*4882a593Smuzhiyun 			break;
1461*4882a593Smuzhiyun 	}
1462*4882a593Smuzhiyun 
1463*4882a593Smuzhiyun 	/* exit if macvlan not found */
1464*4882a593Smuzhiyun 	if (i == l2_accel->size)
1465*4882a593Smuzhiyun 		return;
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	/* Remove any rules specific to this dglort */
1468*4882a593Smuzhiyun 	fm10k_mbx_lock(interface);
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 	glort = l2_accel->dglort + 1 + i;
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	if (fm10k_host_mbx_ready(interface))
1473*4882a593Smuzhiyun 		hw->mac.ops.update_xcast_mode(hw, glort,
1474*4882a593Smuzhiyun 					      FM10K_XCAST_MODE_NONE);
1475*4882a593Smuzhiyun 
1476*4882a593Smuzhiyun 	fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1477*4882a593Smuzhiyun 				hw->mac.default_vid, false);
1478*4882a593Smuzhiyun 
1479*4882a593Smuzhiyun 	for (vid = fm10k_find_next_vlan(interface, 0);
1480*4882a593Smuzhiyun 	     vid < VLAN_N_VID;
1481*4882a593Smuzhiyun 	     vid = fm10k_find_next_vlan(interface, vid))
1482*4882a593Smuzhiyun 		fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
1483*4882a593Smuzhiyun 					vid, false);
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	fm10k_mbx_unlock(interface);
1486*4882a593Smuzhiyun 
1487*4882a593Smuzhiyun 	/* record removal */
1488*4882a593Smuzhiyun 	l2_accel->macvlan[i] = NULL;
1489*4882a593Smuzhiyun 	l2_accel->count--;
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun 	/* configure default DGLORT mapping for RSS/DCB */
1492*4882a593Smuzhiyun 	dglort.idx = fm10k_dglort_pf_rss;
1493*4882a593Smuzhiyun 	dglort.inner_rss = 1;
1494*4882a593Smuzhiyun 	dglort.rss_l = fls(interface->ring_feature[RING_F_RSS].mask);
1495*4882a593Smuzhiyun 	dglort.pc_l = fls(interface->ring_feature[RING_F_QOS].mask);
1496*4882a593Smuzhiyun 	dglort.glort = interface->glort;
1497*4882a593Smuzhiyun 	dglort.shared_l = fls(l2_accel->size);
1498*4882a593Smuzhiyun 	hw->mac.ops.configure_dglort_map(hw, &dglort);
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun 	/* If table is empty remove it */
1501*4882a593Smuzhiyun 	if (l2_accel->count == 0) {
1502*4882a593Smuzhiyun 		fm10k_assign_l2_accel(interface, NULL);
1503*4882a593Smuzhiyun 		kfree_rcu(l2_accel, rcu);
1504*4882a593Smuzhiyun 	}
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun 
fm10k_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1507*4882a593Smuzhiyun static netdev_features_t fm10k_features_check(struct sk_buff *skb,
1508*4882a593Smuzhiyun 					      struct net_device *dev,
1509*4882a593Smuzhiyun 					      netdev_features_t features)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	if (!skb->encapsulation || fm10k_tx_encap_offload(skb))
1512*4882a593Smuzhiyun 		return features;
1513*4882a593Smuzhiyun 
1514*4882a593Smuzhiyun 	return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1515*4882a593Smuzhiyun }
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun static const struct net_device_ops fm10k_netdev_ops = {
1518*4882a593Smuzhiyun 	.ndo_open		= fm10k_open,
1519*4882a593Smuzhiyun 	.ndo_stop		= fm10k_close,
1520*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1521*4882a593Smuzhiyun 	.ndo_start_xmit		= fm10k_xmit_frame,
1522*4882a593Smuzhiyun 	.ndo_set_mac_address	= fm10k_set_mac,
1523*4882a593Smuzhiyun 	.ndo_tx_timeout		= fm10k_tx_timeout,
1524*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid	= fm10k_vlan_rx_add_vid,
1525*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid	= fm10k_vlan_rx_kill_vid,
1526*4882a593Smuzhiyun 	.ndo_set_rx_mode	= fm10k_set_rx_mode,
1527*4882a593Smuzhiyun 	.ndo_get_stats64	= fm10k_get_stats64,
1528*4882a593Smuzhiyun 	.ndo_setup_tc		= __fm10k_setup_tc,
1529*4882a593Smuzhiyun 	.ndo_set_vf_mac		= fm10k_ndo_set_vf_mac,
1530*4882a593Smuzhiyun 	.ndo_set_vf_vlan	= fm10k_ndo_set_vf_vlan,
1531*4882a593Smuzhiyun 	.ndo_set_vf_rate	= fm10k_ndo_set_vf_bw,
1532*4882a593Smuzhiyun 	.ndo_get_vf_config	= fm10k_ndo_get_vf_config,
1533*4882a593Smuzhiyun 	.ndo_get_vf_stats	= fm10k_ndo_get_vf_stats,
1534*4882a593Smuzhiyun 	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
1535*4882a593Smuzhiyun 	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
1536*4882a593Smuzhiyun 	.ndo_dfwd_add_station	= fm10k_dfwd_add_station,
1537*4882a593Smuzhiyun 	.ndo_dfwd_del_station	= fm10k_dfwd_del_station,
1538*4882a593Smuzhiyun 	.ndo_features_check	= fm10k_features_check,
1539*4882a593Smuzhiyun };
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun #define DEFAULT_DEBUG_LEVEL_SHIFT 3
1542*4882a593Smuzhiyun 
fm10k_alloc_netdev(const struct fm10k_info * info)1543*4882a593Smuzhiyun struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info)
1544*4882a593Smuzhiyun {
1545*4882a593Smuzhiyun 	netdev_features_t hw_features;
1546*4882a593Smuzhiyun 	struct fm10k_intfc *interface;
1547*4882a593Smuzhiyun 	struct net_device *dev;
1548*4882a593Smuzhiyun 
1549*4882a593Smuzhiyun 	dev = alloc_etherdev_mq(sizeof(struct fm10k_intfc), MAX_QUEUES);
1550*4882a593Smuzhiyun 	if (!dev)
1551*4882a593Smuzhiyun 		return NULL;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	/* set net device and ethtool ops */
1554*4882a593Smuzhiyun 	dev->netdev_ops = &fm10k_netdev_ops;
1555*4882a593Smuzhiyun 	fm10k_set_ethtool_ops(dev);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	/* configure default debug level */
1558*4882a593Smuzhiyun 	interface = netdev_priv(dev);
1559*4882a593Smuzhiyun 	interface->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
1560*4882a593Smuzhiyun 
1561*4882a593Smuzhiyun 	/* configure default features */
1562*4882a593Smuzhiyun 	dev->features |= NETIF_F_IP_CSUM |
1563*4882a593Smuzhiyun 			 NETIF_F_IPV6_CSUM |
1564*4882a593Smuzhiyun 			 NETIF_F_SG |
1565*4882a593Smuzhiyun 			 NETIF_F_TSO |
1566*4882a593Smuzhiyun 			 NETIF_F_TSO6 |
1567*4882a593Smuzhiyun 			 NETIF_F_TSO_ECN |
1568*4882a593Smuzhiyun 			 NETIF_F_RXHASH |
1569*4882a593Smuzhiyun 			 NETIF_F_RXCSUM;
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	/* Only the PF can support VXLAN and NVGRE tunnel offloads */
1572*4882a593Smuzhiyun 	if (info->mac == fm10k_mac_pf) {
1573*4882a593Smuzhiyun 		dev->hw_enc_features = NETIF_F_IP_CSUM |
1574*4882a593Smuzhiyun 				       NETIF_F_TSO |
1575*4882a593Smuzhiyun 				       NETIF_F_TSO6 |
1576*4882a593Smuzhiyun 				       NETIF_F_TSO_ECN |
1577*4882a593Smuzhiyun 				       NETIF_F_GSO_UDP_TUNNEL |
1578*4882a593Smuzhiyun 				       NETIF_F_IPV6_CSUM |
1579*4882a593Smuzhiyun 				       NETIF_F_SG;
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 		dev->features |= NETIF_F_GSO_UDP_TUNNEL;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 		dev->udp_tunnel_nic_info = &fm10k_udp_tunnels;
1584*4882a593Smuzhiyun 	}
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	/* all features defined to this point should be changeable */
1587*4882a593Smuzhiyun 	hw_features = dev->features;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	/* allow user to enable L2 forwarding acceleration */
1590*4882a593Smuzhiyun 	hw_features |= NETIF_F_HW_L2FW_DOFFLOAD;
1591*4882a593Smuzhiyun 
1592*4882a593Smuzhiyun 	/* configure VLAN features */
1593*4882a593Smuzhiyun 	dev->vlan_features |= dev->features;
1594*4882a593Smuzhiyun 
1595*4882a593Smuzhiyun 	/* we want to leave these both on as we cannot disable VLAN tag
1596*4882a593Smuzhiyun 	 * insertion or stripping on the hardware since it is contained
1597*4882a593Smuzhiyun 	 * in the FTAG and not in the frame itself.
1598*4882a593Smuzhiyun 	 */
1599*4882a593Smuzhiyun 	dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
1600*4882a593Smuzhiyun 			 NETIF_F_HW_VLAN_CTAG_RX |
1601*4882a593Smuzhiyun 			 NETIF_F_HW_VLAN_CTAG_FILTER;
1602*4882a593Smuzhiyun 
1603*4882a593Smuzhiyun 	dev->priv_flags |= IFF_UNICAST_FLT;
1604*4882a593Smuzhiyun 
1605*4882a593Smuzhiyun 	dev->hw_features |= hw_features;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	/* MTU range: 68 - 15342 */
1608*4882a593Smuzhiyun 	dev->min_mtu = ETH_MIN_MTU;
1609*4882a593Smuzhiyun 	dev->max_mtu = FM10K_MAX_JUMBO_FRAME_SIZE;
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	return dev;
1612*4882a593Smuzhiyun }
1613