xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/emulex/benet/be_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2005 - 2016 Broadcom
4*4882a593Smuzhiyun  * All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Contact Information:
7*4882a593Smuzhiyun  * linux-drivers@emulex.com
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Emulex
10*4882a593Smuzhiyun  * 3333 Susan Street
11*4882a593Smuzhiyun  * Costa Mesa, CA 92626
12*4882a593Smuzhiyun  */
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/prefetch.h>
15*4882a593Smuzhiyun #include <linux/module.h>
16*4882a593Smuzhiyun #include "be.h"
17*4882a593Smuzhiyun #include "be_cmds.h"
18*4882a593Smuzhiyun #include <asm/div64.h>
19*4882a593Smuzhiyun #include <linux/aer.h>
20*4882a593Smuzhiyun #include <linux/if_bridge.h>
21*4882a593Smuzhiyun #include <net/busy_poll.h>
22*4882a593Smuzhiyun #include <net/vxlan.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESC);
25*4882a593Smuzhiyun MODULE_AUTHOR("Emulex Corporation");
26*4882a593Smuzhiyun MODULE_LICENSE("GPL");
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* num_vfs module param is obsolete.
29*4882a593Smuzhiyun  * Use sysfs method to enable/disable VFs.
30*4882a593Smuzhiyun  */
31*4882a593Smuzhiyun static unsigned int num_vfs;
32*4882a593Smuzhiyun module_param(num_vfs, uint, 0444);
33*4882a593Smuzhiyun MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static ushort rx_frag_size = 2048;
36*4882a593Smuzhiyun module_param(rx_frag_size, ushort, 0444);
37*4882a593Smuzhiyun MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Per-module error detection/recovery workq shared across all functions.
40*4882a593Smuzhiyun  * Each function schedules its own work request on this shared workq.
41*4882a593Smuzhiyun  */
42*4882a593Smuzhiyun static struct workqueue_struct *be_err_recovery_workq;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun static const struct pci_device_id be_dev_ids[] = {
45*4882a593Smuzhiyun #ifdef CONFIG_BE2NET_BE2
46*4882a593Smuzhiyun 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
47*4882a593Smuzhiyun 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
48*4882a593Smuzhiyun #endif /* CONFIG_BE2NET_BE2 */
49*4882a593Smuzhiyun #ifdef CONFIG_BE2NET_BE3
50*4882a593Smuzhiyun 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
51*4882a593Smuzhiyun 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
52*4882a593Smuzhiyun #endif /* CONFIG_BE2NET_BE3 */
53*4882a593Smuzhiyun #ifdef CONFIG_BE2NET_LANCER
54*4882a593Smuzhiyun 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
55*4882a593Smuzhiyun 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
56*4882a593Smuzhiyun #endif /* CONFIG_BE2NET_LANCER */
57*4882a593Smuzhiyun #ifdef CONFIG_BE2NET_SKYHAWK
58*4882a593Smuzhiyun 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
59*4882a593Smuzhiyun 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
60*4882a593Smuzhiyun #endif /* CONFIG_BE2NET_SKYHAWK */
61*4882a593Smuzhiyun 	{ 0 }
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, be_dev_ids);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* Workqueue used by all functions for defering cmd calls to the adapter */
66*4882a593Smuzhiyun static struct workqueue_struct *be_wq;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /* UE Status Low CSR */
69*4882a593Smuzhiyun static const char * const ue_status_low_desc[] = {
70*4882a593Smuzhiyun 	"CEV",
71*4882a593Smuzhiyun 	"CTX",
72*4882a593Smuzhiyun 	"DBUF",
73*4882a593Smuzhiyun 	"ERX",
74*4882a593Smuzhiyun 	"Host",
75*4882a593Smuzhiyun 	"MPU",
76*4882a593Smuzhiyun 	"NDMA",
77*4882a593Smuzhiyun 	"PTC ",
78*4882a593Smuzhiyun 	"RDMA ",
79*4882a593Smuzhiyun 	"RXF ",
80*4882a593Smuzhiyun 	"RXIPS ",
81*4882a593Smuzhiyun 	"RXULP0 ",
82*4882a593Smuzhiyun 	"RXULP1 ",
83*4882a593Smuzhiyun 	"RXULP2 ",
84*4882a593Smuzhiyun 	"TIM ",
85*4882a593Smuzhiyun 	"TPOST ",
86*4882a593Smuzhiyun 	"TPRE ",
87*4882a593Smuzhiyun 	"TXIPS ",
88*4882a593Smuzhiyun 	"TXULP0 ",
89*4882a593Smuzhiyun 	"TXULP1 ",
90*4882a593Smuzhiyun 	"UC ",
91*4882a593Smuzhiyun 	"WDMA ",
92*4882a593Smuzhiyun 	"TXULP2 ",
93*4882a593Smuzhiyun 	"HOST1 ",
94*4882a593Smuzhiyun 	"P0_OB_LINK ",
95*4882a593Smuzhiyun 	"P1_OB_LINK ",
96*4882a593Smuzhiyun 	"HOST_GPIO ",
97*4882a593Smuzhiyun 	"MBOX ",
98*4882a593Smuzhiyun 	"ERX2 ",
99*4882a593Smuzhiyun 	"SPARE ",
100*4882a593Smuzhiyun 	"JTAG ",
101*4882a593Smuzhiyun 	"MPU_INTPEND "
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* UE Status High CSR */
105*4882a593Smuzhiyun static const char * const ue_status_hi_desc[] = {
106*4882a593Smuzhiyun 	"LPCMEMHOST",
107*4882a593Smuzhiyun 	"MGMT_MAC",
108*4882a593Smuzhiyun 	"PCS0ONLINE",
109*4882a593Smuzhiyun 	"MPU_IRAM",
110*4882a593Smuzhiyun 	"PCS1ONLINE",
111*4882a593Smuzhiyun 	"PCTL0",
112*4882a593Smuzhiyun 	"PCTL1",
113*4882a593Smuzhiyun 	"PMEM",
114*4882a593Smuzhiyun 	"RR",
115*4882a593Smuzhiyun 	"TXPB",
116*4882a593Smuzhiyun 	"RXPP",
117*4882a593Smuzhiyun 	"XAUI",
118*4882a593Smuzhiyun 	"TXP",
119*4882a593Smuzhiyun 	"ARM",
120*4882a593Smuzhiyun 	"IPC",
121*4882a593Smuzhiyun 	"HOST2",
122*4882a593Smuzhiyun 	"HOST3",
123*4882a593Smuzhiyun 	"HOST4",
124*4882a593Smuzhiyun 	"HOST5",
125*4882a593Smuzhiyun 	"HOST6",
126*4882a593Smuzhiyun 	"HOST7",
127*4882a593Smuzhiyun 	"ECRC",
128*4882a593Smuzhiyun 	"Poison TLP",
129*4882a593Smuzhiyun 	"NETC",
130*4882a593Smuzhiyun 	"PERIPH",
131*4882a593Smuzhiyun 	"LLTXULP",
132*4882a593Smuzhiyun 	"D2P",
133*4882a593Smuzhiyun 	"RCON",
134*4882a593Smuzhiyun 	"LDMA",
135*4882a593Smuzhiyun 	"LLTXP",
136*4882a593Smuzhiyun 	"LLTXPB",
137*4882a593Smuzhiyun 	"Unknown"
138*4882a593Smuzhiyun };
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #define BE_VF_IF_EN_FLAGS	(BE_IF_FLAGS_UNTAGGED | \
141*4882a593Smuzhiyun 				 BE_IF_FLAGS_BROADCAST | \
142*4882a593Smuzhiyun 				 BE_IF_FLAGS_MULTICAST | \
143*4882a593Smuzhiyun 				 BE_IF_FLAGS_PASS_L3L4_ERRORS)
144*4882a593Smuzhiyun 
be_queue_free(struct be_adapter * adapter,struct be_queue_info * q)145*4882a593Smuzhiyun static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	struct be_dma_mem *mem = &q->dma_mem;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	if (mem->va) {
150*4882a593Smuzhiyun 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
151*4882a593Smuzhiyun 				  mem->dma);
152*4882a593Smuzhiyun 		mem->va = NULL;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
be_queue_alloc(struct be_adapter * adapter,struct be_queue_info * q,u16 len,u16 entry_size)156*4882a593Smuzhiyun static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
157*4882a593Smuzhiyun 			  u16 len, u16 entry_size)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct be_dma_mem *mem = &q->dma_mem;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	memset(q, 0, sizeof(*q));
162*4882a593Smuzhiyun 	q->len = len;
163*4882a593Smuzhiyun 	q->entry_size = entry_size;
164*4882a593Smuzhiyun 	mem->size = len * entry_size;
165*4882a593Smuzhiyun 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
166*4882a593Smuzhiyun 				     &mem->dma, GFP_KERNEL);
167*4882a593Smuzhiyun 	if (!mem->va)
168*4882a593Smuzhiyun 		return -ENOMEM;
169*4882a593Smuzhiyun 	return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun 
be_reg_intr_set(struct be_adapter * adapter,bool enable)172*4882a593Smuzhiyun static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun 	u32 reg, enabled;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
177*4882a593Smuzhiyun 			      &reg);
178*4882a593Smuzhiyun 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	if (!enabled && enable)
181*4882a593Smuzhiyun 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
182*4882a593Smuzhiyun 	else if (enabled && !enable)
183*4882a593Smuzhiyun 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
184*4882a593Smuzhiyun 	else
185*4882a593Smuzhiyun 		return;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	pci_write_config_dword(adapter->pdev,
188*4882a593Smuzhiyun 			       PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
be_intr_set(struct be_adapter * adapter,bool enable)191*4882a593Smuzhiyun static void be_intr_set(struct be_adapter *adapter, bool enable)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	int status = 0;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* On lancer interrupts can't be controlled via this register */
196*4882a593Smuzhiyun 	if (lancer_chip(adapter))
197*4882a593Smuzhiyun 		return;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_EEH))
200*4882a593Smuzhiyun 		return;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	status = be_cmd_intr_set(adapter, enable);
203*4882a593Smuzhiyun 	if (status)
204*4882a593Smuzhiyun 		be_reg_intr_set(adapter, enable);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
be_rxq_notify(struct be_adapter * adapter,u16 qid,u16 posted)207*4882a593Smuzhiyun static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun 	u32 val = 0;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_HW))
212*4882a593Smuzhiyun 		return;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	val |= qid & DB_RQ_RING_ID_MASK;
215*4882a593Smuzhiyun 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	wmb();
218*4882a593Smuzhiyun 	iowrite32(val, adapter->db + DB_RQ_OFFSET);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
be_txq_notify(struct be_adapter * adapter,struct be_tx_obj * txo,u16 posted)221*4882a593Smuzhiyun static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
222*4882a593Smuzhiyun 			  u16 posted)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	u32 val = 0;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_HW))
227*4882a593Smuzhiyun 		return;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	val |= txo->q.id & DB_TXULP_RING_ID_MASK;
230*4882a593Smuzhiyun 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	wmb();
233*4882a593Smuzhiyun 	iowrite32(val, adapter->db + txo->db_offset);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
be_eq_notify(struct be_adapter * adapter,u16 qid,bool arm,bool clear_int,u16 num_popped,u32 eq_delay_mult_enc)236*4882a593Smuzhiyun static void be_eq_notify(struct be_adapter *adapter, u16 qid,
237*4882a593Smuzhiyun 			 bool arm, bool clear_int, u16 num_popped,
238*4882a593Smuzhiyun 			 u32 eq_delay_mult_enc)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	u32 val = 0;
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	val |= qid & DB_EQ_RING_ID_MASK;
243*4882a593Smuzhiyun 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_HW))
246*4882a593Smuzhiyun 		return;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (arm)
249*4882a593Smuzhiyun 		val |= 1 << DB_EQ_REARM_SHIFT;
250*4882a593Smuzhiyun 	if (clear_int)
251*4882a593Smuzhiyun 		val |= 1 << DB_EQ_CLR_SHIFT;
252*4882a593Smuzhiyun 	val |= 1 << DB_EQ_EVNT_SHIFT;
253*4882a593Smuzhiyun 	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
254*4882a593Smuzhiyun 	val |= eq_delay_mult_enc << DB_EQ_R2I_DLY_SHIFT;
255*4882a593Smuzhiyun 	iowrite32(val, adapter->db + DB_EQ_OFFSET);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
be_cq_notify(struct be_adapter * adapter,u16 qid,bool arm,u16 num_popped)258*4882a593Smuzhiyun void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	u32 val = 0;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	val |= qid & DB_CQ_RING_ID_MASK;
263*4882a593Smuzhiyun 	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
264*4882a593Smuzhiyun 			DB_CQ_RING_ID_EXT_MASK_SHIFT);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_HW))
267*4882a593Smuzhiyun 		return;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (arm)
270*4882a593Smuzhiyun 		val |= 1 << DB_CQ_REARM_SHIFT;
271*4882a593Smuzhiyun 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
272*4882a593Smuzhiyun 	iowrite32(val, adapter->db + DB_CQ_OFFSET);
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
be_dev_mac_add(struct be_adapter * adapter,u8 * mac)275*4882a593Smuzhiyun static int be_dev_mac_add(struct be_adapter *adapter, u8 *mac)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun 	int i;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	/* Check if mac has already been added as part of uc-list */
280*4882a593Smuzhiyun 	for (i = 0; i < adapter->uc_macs; i++) {
281*4882a593Smuzhiyun 		if (ether_addr_equal(adapter->uc_list[i].mac, mac)) {
282*4882a593Smuzhiyun 			/* mac already added, skip addition */
283*4882a593Smuzhiyun 			adapter->pmac_id[0] = adapter->pmac_id[i + 1];
284*4882a593Smuzhiyun 			return 0;
285*4882a593Smuzhiyun 		}
286*4882a593Smuzhiyun 	}
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return be_cmd_pmac_add(adapter, mac, adapter->if_handle,
289*4882a593Smuzhiyun 			       &adapter->pmac_id[0], 0);
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun 
be_dev_mac_del(struct be_adapter * adapter,int pmac_id)292*4882a593Smuzhiyun static void be_dev_mac_del(struct be_adapter *adapter, int pmac_id)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	int i;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	/* Skip deletion if the programmed mac is
297*4882a593Smuzhiyun 	 * being used in uc-list
298*4882a593Smuzhiyun 	 */
299*4882a593Smuzhiyun 	for (i = 0; i < adapter->uc_macs; i++) {
300*4882a593Smuzhiyun 		if (adapter->pmac_id[i + 1] == pmac_id)
301*4882a593Smuzhiyun 			return;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 	be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
be_mac_addr_set(struct net_device * netdev,void * p)306*4882a593Smuzhiyun static int be_mac_addr_set(struct net_device *netdev, void *p)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
309*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
310*4882a593Smuzhiyun 	struct sockaddr *addr = p;
311*4882a593Smuzhiyun 	int status;
312*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
313*4882a593Smuzhiyun 	u32 old_pmac_id = adapter->pmac_id[0];
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
316*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	/* Proceed further only if, User provided MAC is different
319*4882a593Smuzhiyun 	 * from active MAC
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
322*4882a593Smuzhiyun 		return 0;
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
325*4882a593Smuzhiyun 	 * address
326*4882a593Smuzhiyun 	 */
327*4882a593Smuzhiyun 	if (BEx_chip(adapter) && be_virtfn(adapter) &&
328*4882a593Smuzhiyun 	    !check_privilege(adapter, BE_PRIV_FILTMGMT))
329*4882a593Smuzhiyun 		return -EPERM;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	/* if device is not running, copy MAC to netdev->dev_addr */
332*4882a593Smuzhiyun 	if (!netif_running(netdev))
333*4882a593Smuzhiyun 		goto done;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
336*4882a593Smuzhiyun 	 * privilege or if PF did not provision the new MAC address.
337*4882a593Smuzhiyun 	 * On BE3, this cmd will always fail if the VF doesn't have the
338*4882a593Smuzhiyun 	 * FILTMGMT privilege. This failure is OK, only if the PF programmed
339*4882a593Smuzhiyun 	 * the MAC for the VF.
340*4882a593Smuzhiyun 	 */
341*4882a593Smuzhiyun 	mutex_lock(&adapter->rx_filter_lock);
342*4882a593Smuzhiyun 	status = be_dev_mac_add(adapter, (u8 *)addr->sa_data);
343*4882a593Smuzhiyun 	if (!status) {
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 		/* Delete the old programmed MAC. This call may fail if the
346*4882a593Smuzhiyun 		 * old MAC was already deleted by the PF driver.
347*4882a593Smuzhiyun 		 */
348*4882a593Smuzhiyun 		if (adapter->pmac_id[0] != old_pmac_id)
349*4882a593Smuzhiyun 			be_dev_mac_del(adapter, old_pmac_id);
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	mutex_unlock(&adapter->rx_filter_lock);
353*4882a593Smuzhiyun 	/* Decide if the new MAC is successfully activated only after
354*4882a593Smuzhiyun 	 * querying the FW
355*4882a593Smuzhiyun 	 */
356*4882a593Smuzhiyun 	status = be_cmd_get_active_mac(adapter, adapter->pmac_id[0], mac,
357*4882a593Smuzhiyun 				       adapter->if_handle, true, 0);
358*4882a593Smuzhiyun 	if (status)
359*4882a593Smuzhiyun 		goto err;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/* The MAC change did not happen, either due to lack of privilege
362*4882a593Smuzhiyun 	 * or PF didn't pre-provision.
363*4882a593Smuzhiyun 	 */
364*4882a593Smuzhiyun 	if (!ether_addr_equal(addr->sa_data, mac)) {
365*4882a593Smuzhiyun 		status = -EPERM;
366*4882a593Smuzhiyun 		goto err;
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/* Remember currently programmed MAC */
370*4882a593Smuzhiyun 	ether_addr_copy(adapter->dev_mac, addr->sa_data);
371*4882a593Smuzhiyun done:
372*4882a593Smuzhiyun 	ether_addr_copy(netdev->dev_addr, addr->sa_data);
373*4882a593Smuzhiyun 	dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
374*4882a593Smuzhiyun 	return 0;
375*4882a593Smuzhiyun err:
376*4882a593Smuzhiyun 	dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
377*4882a593Smuzhiyun 	return status;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun /* BE2 supports only v0 cmd */
hw_stats_from_cmd(struct be_adapter * adapter)381*4882a593Smuzhiyun static void *hw_stats_from_cmd(struct be_adapter *adapter)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun 	if (BE2_chip(adapter)) {
384*4882a593Smuzhiyun 		struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		return &cmd->hw_stats;
387*4882a593Smuzhiyun 	} else if (BE3_chip(adapter)) {
388*4882a593Smuzhiyun 		struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		return &cmd->hw_stats;
391*4882a593Smuzhiyun 	} else {
392*4882a593Smuzhiyun 		struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 		return &cmd->hw_stats;
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun /* BE2 supports only v0 cmd */
be_erx_stats_from_cmd(struct be_adapter * adapter)399*4882a593Smuzhiyun static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	if (BE2_chip(adapter)) {
402*4882a593Smuzhiyun 		struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 		return &hw_stats->erx;
405*4882a593Smuzhiyun 	} else if (BE3_chip(adapter)) {
406*4882a593Smuzhiyun 		struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 		return &hw_stats->erx;
409*4882a593Smuzhiyun 	} else {
410*4882a593Smuzhiyun 		struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 		return &hw_stats->erx;
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
populate_be_v0_stats(struct be_adapter * adapter)416*4882a593Smuzhiyun static void populate_be_v0_stats(struct be_adapter *adapter)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
419*4882a593Smuzhiyun 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
420*4882a593Smuzhiyun 	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
421*4882a593Smuzhiyun 	struct be_port_rxf_stats_v0 *port_stats =
422*4882a593Smuzhiyun 					&rxf_stats->port[adapter->port_num];
423*4882a593Smuzhiyun 	struct be_drv_stats *drvs = &adapter->drv_stats;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
426*4882a593Smuzhiyun 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
427*4882a593Smuzhiyun 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
428*4882a593Smuzhiyun 	drvs->rx_control_frames = port_stats->rx_control_frames;
429*4882a593Smuzhiyun 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
430*4882a593Smuzhiyun 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
431*4882a593Smuzhiyun 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
432*4882a593Smuzhiyun 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
433*4882a593Smuzhiyun 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
434*4882a593Smuzhiyun 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
435*4882a593Smuzhiyun 	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
436*4882a593Smuzhiyun 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
437*4882a593Smuzhiyun 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
438*4882a593Smuzhiyun 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
439*4882a593Smuzhiyun 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
440*4882a593Smuzhiyun 	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
441*4882a593Smuzhiyun 	drvs->rx_dropped_header_too_small =
442*4882a593Smuzhiyun 		port_stats->rx_dropped_header_too_small;
443*4882a593Smuzhiyun 	drvs->rx_address_filtered =
444*4882a593Smuzhiyun 					port_stats->rx_address_filtered +
445*4882a593Smuzhiyun 					port_stats->rx_vlan_filtered;
446*4882a593Smuzhiyun 	drvs->rx_alignment_symbol_errors =
447*4882a593Smuzhiyun 		port_stats->rx_alignment_symbol_errors;
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
450*4882a593Smuzhiyun 	drvs->tx_controlframes = port_stats->tx_controlframes;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	if (adapter->port_num)
453*4882a593Smuzhiyun 		drvs->jabber_events = rxf_stats->port1_jabber_events;
454*4882a593Smuzhiyun 	else
455*4882a593Smuzhiyun 		drvs->jabber_events = rxf_stats->port0_jabber_events;
456*4882a593Smuzhiyun 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
457*4882a593Smuzhiyun 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
458*4882a593Smuzhiyun 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
459*4882a593Smuzhiyun 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
460*4882a593Smuzhiyun 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
461*4882a593Smuzhiyun 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
462*4882a593Smuzhiyun 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun 
populate_be_v1_stats(struct be_adapter * adapter)465*4882a593Smuzhiyun static void populate_be_v1_stats(struct be_adapter *adapter)
466*4882a593Smuzhiyun {
467*4882a593Smuzhiyun 	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
468*4882a593Smuzhiyun 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
469*4882a593Smuzhiyun 	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
470*4882a593Smuzhiyun 	struct be_port_rxf_stats_v1 *port_stats =
471*4882a593Smuzhiyun 					&rxf_stats->port[adapter->port_num];
472*4882a593Smuzhiyun 	struct be_drv_stats *drvs = &adapter->drv_stats;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
475*4882a593Smuzhiyun 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
476*4882a593Smuzhiyun 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
477*4882a593Smuzhiyun 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
478*4882a593Smuzhiyun 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
479*4882a593Smuzhiyun 	drvs->rx_control_frames = port_stats->rx_control_frames;
480*4882a593Smuzhiyun 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
481*4882a593Smuzhiyun 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
482*4882a593Smuzhiyun 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
483*4882a593Smuzhiyun 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
484*4882a593Smuzhiyun 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
485*4882a593Smuzhiyun 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
486*4882a593Smuzhiyun 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
487*4882a593Smuzhiyun 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
488*4882a593Smuzhiyun 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
489*4882a593Smuzhiyun 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
490*4882a593Smuzhiyun 	drvs->rx_dropped_header_too_small =
491*4882a593Smuzhiyun 		port_stats->rx_dropped_header_too_small;
492*4882a593Smuzhiyun 	drvs->rx_input_fifo_overflow_drop =
493*4882a593Smuzhiyun 		port_stats->rx_input_fifo_overflow_drop;
494*4882a593Smuzhiyun 	drvs->rx_address_filtered = port_stats->rx_address_filtered;
495*4882a593Smuzhiyun 	drvs->rx_alignment_symbol_errors =
496*4882a593Smuzhiyun 		port_stats->rx_alignment_symbol_errors;
497*4882a593Smuzhiyun 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
498*4882a593Smuzhiyun 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
499*4882a593Smuzhiyun 	drvs->tx_controlframes = port_stats->tx_controlframes;
500*4882a593Smuzhiyun 	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
501*4882a593Smuzhiyun 	drvs->jabber_events = port_stats->jabber_events;
502*4882a593Smuzhiyun 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
503*4882a593Smuzhiyun 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
504*4882a593Smuzhiyun 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
505*4882a593Smuzhiyun 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
506*4882a593Smuzhiyun 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
507*4882a593Smuzhiyun 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
508*4882a593Smuzhiyun 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
populate_be_v2_stats(struct be_adapter * adapter)511*4882a593Smuzhiyun static void populate_be_v2_stats(struct be_adapter *adapter)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
514*4882a593Smuzhiyun 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
515*4882a593Smuzhiyun 	struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
516*4882a593Smuzhiyun 	struct be_port_rxf_stats_v2 *port_stats =
517*4882a593Smuzhiyun 					&rxf_stats->port[adapter->port_num];
518*4882a593Smuzhiyun 	struct be_drv_stats *drvs = &adapter->drv_stats;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
521*4882a593Smuzhiyun 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
522*4882a593Smuzhiyun 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
523*4882a593Smuzhiyun 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
524*4882a593Smuzhiyun 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
525*4882a593Smuzhiyun 	drvs->rx_control_frames = port_stats->rx_control_frames;
526*4882a593Smuzhiyun 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
527*4882a593Smuzhiyun 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
528*4882a593Smuzhiyun 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
529*4882a593Smuzhiyun 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
530*4882a593Smuzhiyun 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
531*4882a593Smuzhiyun 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
532*4882a593Smuzhiyun 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
533*4882a593Smuzhiyun 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
534*4882a593Smuzhiyun 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
535*4882a593Smuzhiyun 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
536*4882a593Smuzhiyun 	drvs->rx_dropped_header_too_small =
537*4882a593Smuzhiyun 		port_stats->rx_dropped_header_too_small;
538*4882a593Smuzhiyun 	drvs->rx_input_fifo_overflow_drop =
539*4882a593Smuzhiyun 		port_stats->rx_input_fifo_overflow_drop;
540*4882a593Smuzhiyun 	drvs->rx_address_filtered = port_stats->rx_address_filtered;
541*4882a593Smuzhiyun 	drvs->rx_alignment_symbol_errors =
542*4882a593Smuzhiyun 		port_stats->rx_alignment_symbol_errors;
543*4882a593Smuzhiyun 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
544*4882a593Smuzhiyun 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
545*4882a593Smuzhiyun 	drvs->tx_controlframes = port_stats->tx_controlframes;
546*4882a593Smuzhiyun 	drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
547*4882a593Smuzhiyun 	drvs->jabber_events = port_stats->jabber_events;
548*4882a593Smuzhiyun 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
549*4882a593Smuzhiyun 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
550*4882a593Smuzhiyun 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
551*4882a593Smuzhiyun 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
552*4882a593Smuzhiyun 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
553*4882a593Smuzhiyun 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
554*4882a593Smuzhiyun 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
555*4882a593Smuzhiyun 	if (be_roce_supported(adapter)) {
556*4882a593Smuzhiyun 		drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
557*4882a593Smuzhiyun 		drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
558*4882a593Smuzhiyun 		drvs->rx_roce_frames = port_stats->roce_frames_received;
559*4882a593Smuzhiyun 		drvs->roce_drops_crc = port_stats->roce_drops_crc;
560*4882a593Smuzhiyun 		drvs->roce_drops_payload_len =
561*4882a593Smuzhiyun 			port_stats->roce_drops_payload_len;
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun 
populate_lancer_stats(struct be_adapter * adapter)565*4882a593Smuzhiyun static void populate_lancer_stats(struct be_adapter *adapter)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun 	struct be_drv_stats *drvs = &adapter->drv_stats;
568*4882a593Smuzhiyun 	struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
571*4882a593Smuzhiyun 	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
572*4882a593Smuzhiyun 	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
573*4882a593Smuzhiyun 	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
574*4882a593Smuzhiyun 	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
575*4882a593Smuzhiyun 	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
576*4882a593Smuzhiyun 	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
577*4882a593Smuzhiyun 	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
578*4882a593Smuzhiyun 	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
579*4882a593Smuzhiyun 	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
580*4882a593Smuzhiyun 	drvs->rx_dropped_tcp_length =
581*4882a593Smuzhiyun 				pport_stats->rx_dropped_invalid_tcp_length;
582*4882a593Smuzhiyun 	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
583*4882a593Smuzhiyun 	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
584*4882a593Smuzhiyun 	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
585*4882a593Smuzhiyun 	drvs->rx_dropped_header_too_small =
586*4882a593Smuzhiyun 				pport_stats->rx_dropped_header_too_small;
587*4882a593Smuzhiyun 	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
588*4882a593Smuzhiyun 	drvs->rx_address_filtered =
589*4882a593Smuzhiyun 					pport_stats->rx_address_filtered +
590*4882a593Smuzhiyun 					pport_stats->rx_vlan_filtered;
591*4882a593Smuzhiyun 	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
592*4882a593Smuzhiyun 	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
593*4882a593Smuzhiyun 	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
594*4882a593Smuzhiyun 	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
595*4882a593Smuzhiyun 	drvs->jabber_events = pport_stats->rx_jabbers;
596*4882a593Smuzhiyun 	drvs->forwarded_packets = pport_stats->num_forwards_lo;
597*4882a593Smuzhiyun 	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
598*4882a593Smuzhiyun 	drvs->rx_drops_too_many_frags =
599*4882a593Smuzhiyun 				pport_stats->rx_drops_too_many_frags_lo;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
accumulate_16bit_val(u32 * acc,u16 val)602*4882a593Smuzhiyun static void accumulate_16bit_val(u32 *acc, u16 val)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun #define lo(x)			(x & 0xFFFF)
605*4882a593Smuzhiyun #define hi(x)			(x & 0xFFFF0000)
606*4882a593Smuzhiyun 	bool wrapped = val < lo(*acc);
607*4882a593Smuzhiyun 	u32 newacc = hi(*acc) + val;
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	if (wrapped)
610*4882a593Smuzhiyun 		newacc += 65536;
611*4882a593Smuzhiyun 	WRITE_ONCE(*acc, newacc);
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
populate_erx_stats(struct be_adapter * adapter,struct be_rx_obj * rxo,u32 erx_stat)614*4882a593Smuzhiyun static void populate_erx_stats(struct be_adapter *adapter,
615*4882a593Smuzhiyun 			       struct be_rx_obj *rxo, u32 erx_stat)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun 	if (!BEx_chip(adapter))
618*4882a593Smuzhiyun 		rx_stats(rxo)->rx_drops_no_frags = erx_stat;
619*4882a593Smuzhiyun 	else
620*4882a593Smuzhiyun 		/* below erx HW counter can actually wrap around after
621*4882a593Smuzhiyun 		 * 65535. Driver accumulates a 32-bit value
622*4882a593Smuzhiyun 		 */
623*4882a593Smuzhiyun 		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
624*4882a593Smuzhiyun 				     (u16)erx_stat);
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
be_parse_stats(struct be_adapter * adapter)627*4882a593Smuzhiyun void be_parse_stats(struct be_adapter *adapter)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun 	struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
630*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
631*4882a593Smuzhiyun 	int i;
632*4882a593Smuzhiyun 	u32 erx_stat;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
635*4882a593Smuzhiyun 		populate_lancer_stats(adapter);
636*4882a593Smuzhiyun 	} else {
637*4882a593Smuzhiyun 		if (BE2_chip(adapter))
638*4882a593Smuzhiyun 			populate_be_v0_stats(adapter);
639*4882a593Smuzhiyun 		else if (BE3_chip(adapter))
640*4882a593Smuzhiyun 			/* for BE3 */
641*4882a593Smuzhiyun 			populate_be_v1_stats(adapter);
642*4882a593Smuzhiyun 		else
643*4882a593Smuzhiyun 			populate_be_v2_stats(adapter);
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		/* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
646*4882a593Smuzhiyun 		for_all_rx_queues(adapter, rxo, i) {
647*4882a593Smuzhiyun 			erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
648*4882a593Smuzhiyun 			populate_erx_stats(adapter, rxo, erx_stat);
649*4882a593Smuzhiyun 		}
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
be_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)653*4882a593Smuzhiyun static void be_get_stats64(struct net_device *netdev,
654*4882a593Smuzhiyun 			   struct rtnl_link_stats64 *stats)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
657*4882a593Smuzhiyun 	struct be_drv_stats *drvs = &adapter->drv_stats;
658*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
659*4882a593Smuzhiyun 	struct be_tx_obj *txo;
660*4882a593Smuzhiyun 	u64 pkts, bytes;
661*4882a593Smuzhiyun 	unsigned int start;
662*4882a593Smuzhiyun 	int i;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i) {
665*4882a593Smuzhiyun 		const struct be_rx_stats *rx_stats = rx_stats(rxo);
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		do {
668*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&rx_stats->sync);
669*4882a593Smuzhiyun 			pkts = rx_stats(rxo)->rx_pkts;
670*4882a593Smuzhiyun 			bytes = rx_stats(rxo)->rx_bytes;
671*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
672*4882a593Smuzhiyun 		stats->rx_packets += pkts;
673*4882a593Smuzhiyun 		stats->rx_bytes += bytes;
674*4882a593Smuzhiyun 		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
675*4882a593Smuzhiyun 		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
676*4882a593Smuzhiyun 					rx_stats(rxo)->rx_drops_no_frags;
677*4882a593Smuzhiyun 	}
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 	for_all_tx_queues(adapter, txo, i) {
680*4882a593Smuzhiyun 		const struct be_tx_stats *tx_stats = tx_stats(txo);
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 		do {
683*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&tx_stats->sync);
684*4882a593Smuzhiyun 			pkts = tx_stats(txo)->tx_pkts;
685*4882a593Smuzhiyun 			bytes = tx_stats(txo)->tx_bytes;
686*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
687*4882a593Smuzhiyun 		stats->tx_packets += pkts;
688*4882a593Smuzhiyun 		stats->tx_bytes += bytes;
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 	/* bad pkts received */
692*4882a593Smuzhiyun 	stats->rx_errors = drvs->rx_crc_errors +
693*4882a593Smuzhiyun 		drvs->rx_alignment_symbol_errors +
694*4882a593Smuzhiyun 		drvs->rx_in_range_errors +
695*4882a593Smuzhiyun 		drvs->rx_out_range_errors +
696*4882a593Smuzhiyun 		drvs->rx_frame_too_long +
697*4882a593Smuzhiyun 		drvs->rx_dropped_too_small +
698*4882a593Smuzhiyun 		drvs->rx_dropped_too_short +
699*4882a593Smuzhiyun 		drvs->rx_dropped_header_too_small +
700*4882a593Smuzhiyun 		drvs->rx_dropped_tcp_length +
701*4882a593Smuzhiyun 		drvs->rx_dropped_runt;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	/* detailed rx errors */
704*4882a593Smuzhiyun 	stats->rx_length_errors = drvs->rx_in_range_errors +
705*4882a593Smuzhiyun 		drvs->rx_out_range_errors +
706*4882a593Smuzhiyun 		drvs->rx_frame_too_long;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	stats->rx_crc_errors = drvs->rx_crc_errors;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 	/* frame alignment errors */
711*4882a593Smuzhiyun 	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	/* receiver fifo overrun */
714*4882a593Smuzhiyun 	/* drops_no_pbuf is no per i/f, it's per BE card */
715*4882a593Smuzhiyun 	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
716*4882a593Smuzhiyun 				drvs->rx_input_fifo_overflow_drop +
717*4882a593Smuzhiyun 				drvs->rx_drops_no_pbuf;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
be_link_status_update(struct be_adapter * adapter,u8 link_status)720*4882a593Smuzhiyun void be_link_status_update(struct be_adapter *adapter, u8 link_status)
721*4882a593Smuzhiyun {
722*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
725*4882a593Smuzhiyun 		netif_carrier_off(netdev);
726*4882a593Smuzhiyun 		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
727*4882a593Smuzhiyun 	}
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	if (link_status)
730*4882a593Smuzhiyun 		netif_carrier_on(netdev);
731*4882a593Smuzhiyun 	else
732*4882a593Smuzhiyun 		netif_carrier_off(netdev);
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	netdev_info(netdev, "Link is %s\n", link_status ? "Up" : "Down");
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
be_gso_hdr_len(struct sk_buff * skb)737*4882a593Smuzhiyun static int be_gso_hdr_len(struct sk_buff *skb)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	if (skb->encapsulation)
740*4882a593Smuzhiyun 		return skb_inner_transport_offset(skb) +
741*4882a593Smuzhiyun 		       inner_tcp_hdrlen(skb);
742*4882a593Smuzhiyun 	return skb_transport_offset(skb) + tcp_hdrlen(skb);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun 
be_tx_stats_update(struct be_tx_obj * txo,struct sk_buff * skb)745*4882a593Smuzhiyun static void be_tx_stats_update(struct be_tx_obj *txo, struct sk_buff *skb)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun 	struct be_tx_stats *stats = tx_stats(txo);
748*4882a593Smuzhiyun 	u32 tx_pkts = skb_shinfo(skb)->gso_segs ? : 1;
749*4882a593Smuzhiyun 	/* Account for headers which get duplicated in TSO pkt */
750*4882a593Smuzhiyun 	u32 dup_hdr_len = tx_pkts > 1 ? be_gso_hdr_len(skb) * (tx_pkts - 1) : 0;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	u64_stats_update_begin(&stats->sync);
753*4882a593Smuzhiyun 	stats->tx_reqs++;
754*4882a593Smuzhiyun 	stats->tx_bytes += skb->len + dup_hdr_len;
755*4882a593Smuzhiyun 	stats->tx_pkts += tx_pkts;
756*4882a593Smuzhiyun 	if (skb->encapsulation && skb->ip_summed == CHECKSUM_PARTIAL)
757*4882a593Smuzhiyun 		stats->tx_vxlan_offload_pkts += tx_pkts;
758*4882a593Smuzhiyun 	u64_stats_update_end(&stats->sync);
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun /* Returns number of WRBs needed for the skb */
skb_wrb_cnt(struct sk_buff * skb)762*4882a593Smuzhiyun static u32 skb_wrb_cnt(struct sk_buff *skb)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	/* +1 for the header wrb */
765*4882a593Smuzhiyun 	return 1 + (skb_headlen(skb) ? 1 : 0) + skb_shinfo(skb)->nr_frags;
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun 
wrb_fill(struct be_eth_wrb * wrb,u64 addr,int len)768*4882a593Smuzhiyun static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
769*4882a593Smuzhiyun {
770*4882a593Smuzhiyun 	wrb->frag_pa_hi = cpu_to_le32(upper_32_bits(addr));
771*4882a593Smuzhiyun 	wrb->frag_pa_lo = cpu_to_le32(lower_32_bits(addr));
772*4882a593Smuzhiyun 	wrb->frag_len = cpu_to_le32(len & ETH_WRB_FRAG_LEN_MASK);
773*4882a593Smuzhiyun 	wrb->rsvd0 = 0;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun /* A dummy wrb is just all zeros. Using a separate routine for dummy-wrb
777*4882a593Smuzhiyun  * to avoid the swap and shift/mask operations in wrb_fill().
778*4882a593Smuzhiyun  */
wrb_fill_dummy(struct be_eth_wrb * wrb)779*4882a593Smuzhiyun static inline void wrb_fill_dummy(struct be_eth_wrb *wrb)
780*4882a593Smuzhiyun {
781*4882a593Smuzhiyun 	wrb->frag_pa_hi = 0;
782*4882a593Smuzhiyun 	wrb->frag_pa_lo = 0;
783*4882a593Smuzhiyun 	wrb->frag_len = 0;
784*4882a593Smuzhiyun 	wrb->rsvd0 = 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
be_get_tx_vlan_tag(struct be_adapter * adapter,struct sk_buff * skb)787*4882a593Smuzhiyun static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
788*4882a593Smuzhiyun 				     struct sk_buff *skb)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	u8 vlan_prio;
791*4882a593Smuzhiyun 	u16 vlan_tag;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	vlan_tag = skb_vlan_tag_get(skb);
794*4882a593Smuzhiyun 	vlan_prio = skb_vlan_tag_get_prio(skb);
795*4882a593Smuzhiyun 	/* If vlan priority provided by OS is NOT in available bmap */
796*4882a593Smuzhiyun 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
797*4882a593Smuzhiyun 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
798*4882a593Smuzhiyun 				adapter->recommended_prio_bits;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	return vlan_tag;
801*4882a593Smuzhiyun }
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun /* Used only for IP tunnel packets */
skb_inner_ip_proto(struct sk_buff * skb)804*4882a593Smuzhiyun static u16 skb_inner_ip_proto(struct sk_buff *skb)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun 	return (inner_ip_hdr(skb)->version == 4) ?
807*4882a593Smuzhiyun 		inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
skb_ip_proto(struct sk_buff * skb)810*4882a593Smuzhiyun static u16 skb_ip_proto(struct sk_buff *skb)
811*4882a593Smuzhiyun {
812*4882a593Smuzhiyun 	return (ip_hdr(skb)->version == 4) ?
813*4882a593Smuzhiyun 		ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun 
be_is_txq_full(struct be_tx_obj * txo)816*4882a593Smuzhiyun static inline bool be_is_txq_full(struct be_tx_obj *txo)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun 	return atomic_read(&txo->q.used) + BE_MAX_TX_FRAG_COUNT >= txo->q.len;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun 
be_can_txq_wake(struct be_tx_obj * txo)821*4882a593Smuzhiyun static inline bool be_can_txq_wake(struct be_tx_obj *txo)
822*4882a593Smuzhiyun {
823*4882a593Smuzhiyun 	return atomic_read(&txo->q.used) < txo->q.len / 2;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun 
be_is_tx_compl_pending(struct be_tx_obj * txo)826*4882a593Smuzhiyun static inline bool be_is_tx_compl_pending(struct be_tx_obj *txo)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	return atomic_read(&txo->q.used) > txo->pend_wrb_cnt;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun 
be_get_wrb_params_from_skb(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)831*4882a593Smuzhiyun static void be_get_wrb_params_from_skb(struct be_adapter *adapter,
832*4882a593Smuzhiyun 				       struct sk_buff *skb,
833*4882a593Smuzhiyun 				       struct be_wrb_params *wrb_params)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	u16 proto;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	if (skb_is_gso(skb)) {
838*4882a593Smuzhiyun 		BE_WRB_F_SET(wrb_params->features, LSO, 1);
839*4882a593Smuzhiyun 		wrb_params->lso_mss = skb_shinfo(skb)->gso_size;
840*4882a593Smuzhiyun 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
841*4882a593Smuzhiyun 			BE_WRB_F_SET(wrb_params->features, LSO6, 1);
842*4882a593Smuzhiyun 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
843*4882a593Smuzhiyun 		if (skb->encapsulation) {
844*4882a593Smuzhiyun 			BE_WRB_F_SET(wrb_params->features, IPCS, 1);
845*4882a593Smuzhiyun 			proto = skb_inner_ip_proto(skb);
846*4882a593Smuzhiyun 		} else {
847*4882a593Smuzhiyun 			proto = skb_ip_proto(skb);
848*4882a593Smuzhiyun 		}
849*4882a593Smuzhiyun 		if (proto == IPPROTO_TCP)
850*4882a593Smuzhiyun 			BE_WRB_F_SET(wrb_params->features, TCPCS, 1);
851*4882a593Smuzhiyun 		else if (proto == IPPROTO_UDP)
852*4882a593Smuzhiyun 			BE_WRB_F_SET(wrb_params->features, UDPCS, 1);
853*4882a593Smuzhiyun 	}
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
856*4882a593Smuzhiyun 		BE_WRB_F_SET(wrb_params->features, VLAN, 1);
857*4882a593Smuzhiyun 		wrb_params->vlan_tag = be_get_tx_vlan_tag(adapter, skb);
858*4882a593Smuzhiyun 	}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 	BE_WRB_F_SET(wrb_params->features, CRC, 1);
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun 
wrb_fill_hdr(struct be_adapter * adapter,struct be_eth_hdr_wrb * hdr,struct be_wrb_params * wrb_params,struct sk_buff * skb)863*4882a593Smuzhiyun static void wrb_fill_hdr(struct be_adapter *adapter,
864*4882a593Smuzhiyun 			 struct be_eth_hdr_wrb *hdr,
865*4882a593Smuzhiyun 			 struct be_wrb_params *wrb_params,
866*4882a593Smuzhiyun 			 struct sk_buff *skb)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	memset(hdr, 0, sizeof(*hdr));
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(crc, hdr,
871*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, CRC));
872*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(ipcs, hdr,
873*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, IPCS));
874*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(tcpcs, hdr,
875*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, TCPCS));
876*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(udpcs, hdr,
877*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, UDPCS));
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(lso, hdr,
880*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, LSO));
881*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(lso6, hdr,
882*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, LSO6));
883*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(lso_mss, hdr, wrb_params->lso_mss);
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	/* Hack to skip HW VLAN tagging needs evt = 1, compl = 0. When this
886*4882a593Smuzhiyun 	 * hack is not needed, the evt bit is set while ringing DB.
887*4882a593Smuzhiyun 	 */
888*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(event, hdr,
889*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, VLAN_SKIP_HW));
890*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(vlan, hdr,
891*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, VLAN));
892*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(vlan_tag, hdr, wrb_params->vlan_tag);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(num_wrb, hdr, skb_wrb_cnt(skb));
895*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(len, hdr, skb->len);
896*4882a593Smuzhiyun 	SET_TX_WRB_HDR_BITS(mgmt, hdr,
897*4882a593Smuzhiyun 			    BE_WRB_F_GET(wrb_params->features, OS2BMC));
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
unmap_tx_frag(struct device * dev,struct be_eth_wrb * wrb,bool unmap_single)900*4882a593Smuzhiyun static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
901*4882a593Smuzhiyun 			  bool unmap_single)
902*4882a593Smuzhiyun {
903*4882a593Smuzhiyun 	dma_addr_t dma;
904*4882a593Smuzhiyun 	u32 frag_len = le32_to_cpu(wrb->frag_len);
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	dma = (u64)le32_to_cpu(wrb->frag_pa_hi) << 32 |
908*4882a593Smuzhiyun 		(u64)le32_to_cpu(wrb->frag_pa_lo);
909*4882a593Smuzhiyun 	if (frag_len) {
910*4882a593Smuzhiyun 		if (unmap_single)
911*4882a593Smuzhiyun 			dma_unmap_single(dev, dma, frag_len, DMA_TO_DEVICE);
912*4882a593Smuzhiyun 		else
913*4882a593Smuzhiyun 			dma_unmap_page(dev, dma, frag_len, DMA_TO_DEVICE);
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun /* Grab a WRB header for xmit */
be_tx_get_wrb_hdr(struct be_tx_obj * txo)918*4882a593Smuzhiyun static u32 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	u32 head = txo->q.head;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	queue_head_inc(&txo->q);
923*4882a593Smuzhiyun 	return head;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun /* Set up the WRB header for xmit */
be_tx_setup_wrb_hdr(struct be_adapter * adapter,struct be_tx_obj * txo,struct be_wrb_params * wrb_params,struct sk_buff * skb,u16 head)927*4882a593Smuzhiyun static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
928*4882a593Smuzhiyun 				struct be_tx_obj *txo,
929*4882a593Smuzhiyun 				struct be_wrb_params *wrb_params,
930*4882a593Smuzhiyun 				struct sk_buff *skb, u16 head)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun 	u32 num_frags = skb_wrb_cnt(skb);
933*4882a593Smuzhiyun 	struct be_queue_info *txq = &txo->q;
934*4882a593Smuzhiyun 	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	wrb_fill_hdr(adapter, hdr, wrb_params, skb);
937*4882a593Smuzhiyun 	be_dws_cpu_to_le(hdr, sizeof(*hdr));
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	BUG_ON(txo->sent_skb_list[head]);
940*4882a593Smuzhiyun 	txo->sent_skb_list[head] = skb;
941*4882a593Smuzhiyun 	txo->last_req_hdr = head;
942*4882a593Smuzhiyun 	atomic_add(num_frags, &txq->used);
943*4882a593Smuzhiyun 	txo->last_req_wrb_cnt = num_frags;
944*4882a593Smuzhiyun 	txo->pend_wrb_cnt += num_frags;
945*4882a593Smuzhiyun }
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun /* Setup a WRB fragment (buffer descriptor) for xmit */
be_tx_setup_wrb_frag(struct be_tx_obj * txo,dma_addr_t busaddr,int len)948*4882a593Smuzhiyun static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
949*4882a593Smuzhiyun 				 int len)
950*4882a593Smuzhiyun {
951*4882a593Smuzhiyun 	struct be_eth_wrb *wrb;
952*4882a593Smuzhiyun 	struct be_queue_info *txq = &txo->q;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	wrb = queue_head_node(txq);
955*4882a593Smuzhiyun 	wrb_fill(wrb, busaddr, len);
956*4882a593Smuzhiyun 	queue_head_inc(txq);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun /* Bring the queue back to the state it was in before be_xmit_enqueue() routine
960*4882a593Smuzhiyun  * was invoked. The producer index is restored to the previous packet and the
961*4882a593Smuzhiyun  * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
962*4882a593Smuzhiyun  */
be_xmit_restore(struct be_adapter * adapter,struct be_tx_obj * txo,u32 head,bool map_single,u32 copied)963*4882a593Smuzhiyun static void be_xmit_restore(struct be_adapter *adapter,
964*4882a593Smuzhiyun 			    struct be_tx_obj *txo, u32 head, bool map_single,
965*4882a593Smuzhiyun 			    u32 copied)
966*4882a593Smuzhiyun {
967*4882a593Smuzhiyun 	struct device *dev;
968*4882a593Smuzhiyun 	struct be_eth_wrb *wrb;
969*4882a593Smuzhiyun 	struct be_queue_info *txq = &txo->q;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	dev = &adapter->pdev->dev;
972*4882a593Smuzhiyun 	txq->head = head;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	/* skip the first wrb (hdr); it's not mapped */
975*4882a593Smuzhiyun 	queue_head_inc(txq);
976*4882a593Smuzhiyun 	while (copied) {
977*4882a593Smuzhiyun 		wrb = queue_head_node(txq);
978*4882a593Smuzhiyun 		unmap_tx_frag(dev, wrb, map_single);
979*4882a593Smuzhiyun 		map_single = false;
980*4882a593Smuzhiyun 		copied -= le32_to_cpu(wrb->frag_len);
981*4882a593Smuzhiyun 		queue_head_inc(txq);
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 	txq->head = head;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun /* Enqueue the given packet for transmit. This routine allocates WRBs for the
988*4882a593Smuzhiyun  * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
989*4882a593Smuzhiyun  * of WRBs used up by the packet.
990*4882a593Smuzhiyun  */
be_xmit_enqueue(struct be_adapter * adapter,struct be_tx_obj * txo,struct sk_buff * skb,struct be_wrb_params * wrb_params)991*4882a593Smuzhiyun static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
992*4882a593Smuzhiyun 			   struct sk_buff *skb,
993*4882a593Smuzhiyun 			   struct be_wrb_params *wrb_params)
994*4882a593Smuzhiyun {
995*4882a593Smuzhiyun 	u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
996*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
997*4882a593Smuzhiyun 	bool map_single = false;
998*4882a593Smuzhiyun 	u32 head;
999*4882a593Smuzhiyun 	dma_addr_t busaddr;
1000*4882a593Smuzhiyun 	int len;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	head = be_tx_get_wrb_hdr(txo);
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	if (skb->len > skb->data_len) {
1005*4882a593Smuzhiyun 		len = skb_headlen(skb);
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
1008*4882a593Smuzhiyun 		if (dma_mapping_error(dev, busaddr))
1009*4882a593Smuzhiyun 			goto dma_err;
1010*4882a593Smuzhiyun 		map_single = true;
1011*4882a593Smuzhiyun 		be_tx_setup_wrb_frag(txo, busaddr, len);
1012*4882a593Smuzhiyun 		copied += len;
1013*4882a593Smuzhiyun 	}
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1016*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1017*4882a593Smuzhiyun 		len = skb_frag_size(frag);
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun 		busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
1020*4882a593Smuzhiyun 		if (dma_mapping_error(dev, busaddr))
1021*4882a593Smuzhiyun 			goto dma_err;
1022*4882a593Smuzhiyun 		be_tx_setup_wrb_frag(txo, busaddr, len);
1023*4882a593Smuzhiyun 		copied += len;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun 	be_tx_stats_update(txo, skb);
1029*4882a593Smuzhiyun 	return wrb_cnt;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun dma_err:
1032*4882a593Smuzhiyun 	adapter->drv_stats.dma_map_errors++;
1033*4882a593Smuzhiyun 	be_xmit_restore(adapter, txo, head, map_single, copied);
1034*4882a593Smuzhiyun 	return 0;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
qnq_async_evt_rcvd(struct be_adapter * adapter)1037*4882a593Smuzhiyun static inline int qnq_async_evt_rcvd(struct be_adapter *adapter)
1038*4882a593Smuzhiyun {
1039*4882a593Smuzhiyun 	return adapter->flags & BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun 
be_insert_vlan_in_pkt(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1042*4882a593Smuzhiyun static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
1043*4882a593Smuzhiyun 					     struct sk_buff *skb,
1044*4882a593Smuzhiyun 					     struct be_wrb_params
1045*4882a593Smuzhiyun 					     *wrb_params)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	bool insert_vlan = false;
1048*4882a593Smuzhiyun 	u16 vlan_tag = 0;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	skb = skb_share_check(skb, GFP_ATOMIC);
1051*4882a593Smuzhiyun 	if (unlikely(!skb))
1052*4882a593Smuzhiyun 		return skb;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
1055*4882a593Smuzhiyun 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
1056*4882a593Smuzhiyun 		insert_vlan = true;
1057*4882a593Smuzhiyun 	}
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
1060*4882a593Smuzhiyun 		if (!insert_vlan) {
1061*4882a593Smuzhiyun 			vlan_tag = adapter->pvid;
1062*4882a593Smuzhiyun 			insert_vlan = true;
1063*4882a593Smuzhiyun 		}
1064*4882a593Smuzhiyun 		/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
1065*4882a593Smuzhiyun 		 * skip VLAN insertion
1066*4882a593Smuzhiyun 		 */
1067*4882a593Smuzhiyun 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1068*4882a593Smuzhiyun 	}
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 	if (insert_vlan) {
1071*4882a593Smuzhiyun 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1072*4882a593Smuzhiyun 						vlan_tag);
1073*4882a593Smuzhiyun 		if (unlikely(!skb))
1074*4882a593Smuzhiyun 			return skb;
1075*4882a593Smuzhiyun 		__vlan_hwaccel_clear_tag(skb);
1076*4882a593Smuzhiyun 	}
1077*4882a593Smuzhiyun 
1078*4882a593Smuzhiyun 	/* Insert the outer VLAN, if any */
1079*4882a593Smuzhiyun 	if (adapter->qnq_vid) {
1080*4882a593Smuzhiyun 		vlan_tag = adapter->qnq_vid;
1081*4882a593Smuzhiyun 		skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q),
1082*4882a593Smuzhiyun 						vlan_tag);
1083*4882a593Smuzhiyun 		if (unlikely(!skb))
1084*4882a593Smuzhiyun 			return skb;
1085*4882a593Smuzhiyun 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1086*4882a593Smuzhiyun 	}
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 	return skb;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun 
be_ipv6_exthdr_check(struct sk_buff * skb)1091*4882a593Smuzhiyun static bool be_ipv6_exthdr_check(struct sk_buff *skb)
1092*4882a593Smuzhiyun {
1093*4882a593Smuzhiyun 	struct ethhdr *eh = (struct ethhdr *)skb->data;
1094*4882a593Smuzhiyun 	u16 offset = ETH_HLEN;
1095*4882a593Smuzhiyun 
1096*4882a593Smuzhiyun 	if (eh->h_proto == htons(ETH_P_IPV6)) {
1097*4882a593Smuzhiyun 		struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
1098*4882a593Smuzhiyun 
1099*4882a593Smuzhiyun 		offset += sizeof(struct ipv6hdr);
1100*4882a593Smuzhiyun 		if (ip6h->nexthdr != NEXTHDR_TCP &&
1101*4882a593Smuzhiyun 		    ip6h->nexthdr != NEXTHDR_UDP) {
1102*4882a593Smuzhiyun 			struct ipv6_opt_hdr *ehdr =
1103*4882a593Smuzhiyun 				(struct ipv6_opt_hdr *)(skb->data + offset);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 			/* offending pkt: 2nd byte following IPv6 hdr is 0xff */
1106*4882a593Smuzhiyun 			if (ehdr->hdrlen == 0xff)
1107*4882a593Smuzhiyun 				return true;
1108*4882a593Smuzhiyun 		}
1109*4882a593Smuzhiyun 	}
1110*4882a593Smuzhiyun 	return false;
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun 
be_vlan_tag_tx_chk(struct be_adapter * adapter,struct sk_buff * skb)1113*4882a593Smuzhiyun static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
1114*4882a593Smuzhiyun {
1115*4882a593Smuzhiyun 	return skb_vlan_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
1116*4882a593Smuzhiyun }
1117*4882a593Smuzhiyun 
be_ipv6_tx_stall_chk(struct be_adapter * adapter,struct sk_buff * skb)1118*4882a593Smuzhiyun static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun 	return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
1121*4882a593Smuzhiyun }
1122*4882a593Smuzhiyun 
be_lancer_xmit_workarounds(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1123*4882a593Smuzhiyun static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
1124*4882a593Smuzhiyun 						  struct sk_buff *skb,
1125*4882a593Smuzhiyun 						  struct be_wrb_params
1126*4882a593Smuzhiyun 						  *wrb_params)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun 	struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1129*4882a593Smuzhiyun 	unsigned int eth_hdr_len;
1130*4882a593Smuzhiyun 	struct iphdr *ip;
1131*4882a593Smuzhiyun 
1132*4882a593Smuzhiyun 	/* For padded packets, BE HW modifies tot_len field in IP header
1133*4882a593Smuzhiyun 	 * incorrecly when VLAN tag is inserted by HW.
1134*4882a593Smuzhiyun 	 * For padded packets, Lancer computes incorrect checksum.
1135*4882a593Smuzhiyun 	 */
1136*4882a593Smuzhiyun 	eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
1137*4882a593Smuzhiyun 						VLAN_ETH_HLEN : ETH_HLEN;
1138*4882a593Smuzhiyun 	if (skb->len <= 60 &&
1139*4882a593Smuzhiyun 	    (lancer_chip(adapter) || skb_vlan_tag_present(skb)) &&
1140*4882a593Smuzhiyun 	    is_ipv4_pkt(skb)) {
1141*4882a593Smuzhiyun 		ip = (struct iphdr *)ip_hdr(skb);
1142*4882a593Smuzhiyun 		pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
1143*4882a593Smuzhiyun 	}
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/* If vlan tag is already inlined in the packet, skip HW VLAN
1146*4882a593Smuzhiyun 	 * tagging in pvid-tagging mode
1147*4882a593Smuzhiyun 	 */
1148*4882a593Smuzhiyun 	if (be_pvid_tagging_enabled(adapter) &&
1149*4882a593Smuzhiyun 	    veh->h_vlan_proto == htons(ETH_P_8021Q))
1150*4882a593Smuzhiyun 		BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1);
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	/* HW has a bug wherein it will calculate CSUM for VLAN
1153*4882a593Smuzhiyun 	 * pkts even though it is disabled.
1154*4882a593Smuzhiyun 	 * Manually insert VLAN in pkt.
1155*4882a593Smuzhiyun 	 */
1156*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL &&
1157*4882a593Smuzhiyun 	    skb_vlan_tag_present(skb)) {
1158*4882a593Smuzhiyun 		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1159*4882a593Smuzhiyun 		if (unlikely(!skb))
1160*4882a593Smuzhiyun 			goto err;
1161*4882a593Smuzhiyun 	}
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	/* HW may lockup when VLAN HW tagging is requested on
1164*4882a593Smuzhiyun 	 * certain ipv6 packets. Drop such pkts if the HW workaround to
1165*4882a593Smuzhiyun 	 * skip HW tagging is not enabled by FW.
1166*4882a593Smuzhiyun 	 */
1167*4882a593Smuzhiyun 	if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
1168*4882a593Smuzhiyun 		     (adapter->pvid || adapter->qnq_vid) &&
1169*4882a593Smuzhiyun 		     !qnq_async_evt_rcvd(adapter)))
1170*4882a593Smuzhiyun 		goto tx_drop;
1171*4882a593Smuzhiyun 
1172*4882a593Smuzhiyun 	/* Manual VLAN tag insertion to prevent:
1173*4882a593Smuzhiyun 	 * ASIC lockup when the ASIC inserts VLAN tag into
1174*4882a593Smuzhiyun 	 * certain ipv6 packets. Insert VLAN tags in driver,
1175*4882a593Smuzhiyun 	 * and set event, completion, vlan bits accordingly
1176*4882a593Smuzhiyun 	 * in the Tx WRB.
1177*4882a593Smuzhiyun 	 */
1178*4882a593Smuzhiyun 	if (be_ipv6_tx_stall_chk(adapter, skb) &&
1179*4882a593Smuzhiyun 	    be_vlan_tag_tx_chk(adapter, skb)) {
1180*4882a593Smuzhiyun 		skb = be_insert_vlan_in_pkt(adapter, skb, wrb_params);
1181*4882a593Smuzhiyun 		if (unlikely(!skb))
1182*4882a593Smuzhiyun 			goto err;
1183*4882a593Smuzhiyun 	}
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	return skb;
1186*4882a593Smuzhiyun tx_drop:
1187*4882a593Smuzhiyun 	dev_kfree_skb_any(skb);
1188*4882a593Smuzhiyun err:
1189*4882a593Smuzhiyun 	return NULL;
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun 
be_xmit_workarounds(struct be_adapter * adapter,struct sk_buff * skb,struct be_wrb_params * wrb_params)1192*4882a593Smuzhiyun static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1193*4882a593Smuzhiyun 					   struct sk_buff *skb,
1194*4882a593Smuzhiyun 					   struct be_wrb_params *wrb_params)
1195*4882a593Smuzhiyun {
1196*4882a593Smuzhiyun 	int err;
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	/* Lancer, SH and BE3 in SRIOV mode have a bug wherein
1199*4882a593Smuzhiyun 	 * packets that are 32b or less may cause a transmit stall
1200*4882a593Smuzhiyun 	 * on that port. The workaround is to pad such packets
1201*4882a593Smuzhiyun 	 * (len <= 32 bytes) to a minimum length of 36b.
1202*4882a593Smuzhiyun 	 */
1203*4882a593Smuzhiyun 	if (skb->len <= 32) {
1204*4882a593Smuzhiyun 		if (skb_put_padto(skb, 36))
1205*4882a593Smuzhiyun 			return NULL;
1206*4882a593Smuzhiyun 	}
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun 	if (BEx_chip(adapter) || lancer_chip(adapter)) {
1209*4882a593Smuzhiyun 		skb = be_lancer_xmit_workarounds(adapter, skb, wrb_params);
1210*4882a593Smuzhiyun 		if (!skb)
1211*4882a593Smuzhiyun 			return NULL;
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun 
1214*4882a593Smuzhiyun 	/* The stack can send us skbs with length greater than
1215*4882a593Smuzhiyun 	 * what the HW can handle. Trim the extra bytes.
1216*4882a593Smuzhiyun 	 */
1217*4882a593Smuzhiyun 	WARN_ON_ONCE(skb->len > BE_MAX_GSO_SIZE);
1218*4882a593Smuzhiyun 	err = pskb_trim(skb, BE_MAX_GSO_SIZE);
1219*4882a593Smuzhiyun 	WARN_ON(err);
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	return skb;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
be_xmit_flush(struct be_adapter * adapter,struct be_tx_obj * txo)1224*4882a593Smuzhiyun static void be_xmit_flush(struct be_adapter *adapter, struct be_tx_obj *txo)
1225*4882a593Smuzhiyun {
1226*4882a593Smuzhiyun 	struct be_queue_info *txq = &txo->q;
1227*4882a593Smuzhiyun 	struct be_eth_hdr_wrb *hdr = queue_index_node(txq, txo->last_req_hdr);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 	/* Mark the last request eventable if it hasn't been marked already */
1230*4882a593Smuzhiyun 	if (!(hdr->dw[2] & cpu_to_le32(TX_HDR_WRB_EVT)))
1231*4882a593Smuzhiyun 		hdr->dw[2] |= cpu_to_le32(TX_HDR_WRB_EVT | TX_HDR_WRB_COMPL);
1232*4882a593Smuzhiyun 
1233*4882a593Smuzhiyun 	/* compose a dummy wrb if there are odd set of wrbs to notify */
1234*4882a593Smuzhiyun 	if (!lancer_chip(adapter) && (txo->pend_wrb_cnt & 1)) {
1235*4882a593Smuzhiyun 		wrb_fill_dummy(queue_head_node(txq));
1236*4882a593Smuzhiyun 		queue_head_inc(txq);
1237*4882a593Smuzhiyun 		atomic_inc(&txq->used);
1238*4882a593Smuzhiyun 		txo->pend_wrb_cnt++;
1239*4882a593Smuzhiyun 		hdr->dw[2] &= ~cpu_to_le32(TX_HDR_WRB_NUM_MASK <<
1240*4882a593Smuzhiyun 					   TX_HDR_WRB_NUM_SHIFT);
1241*4882a593Smuzhiyun 		hdr->dw[2] |= cpu_to_le32((txo->last_req_wrb_cnt + 1) <<
1242*4882a593Smuzhiyun 					  TX_HDR_WRB_NUM_SHIFT);
1243*4882a593Smuzhiyun 	}
1244*4882a593Smuzhiyun 	be_txq_notify(adapter, txo, txo->pend_wrb_cnt);
1245*4882a593Smuzhiyun 	txo->pend_wrb_cnt = 0;
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun /* OS2BMC related */
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun #define DHCP_CLIENT_PORT	68
1251*4882a593Smuzhiyun #define DHCP_SERVER_PORT	67
1252*4882a593Smuzhiyun #define NET_BIOS_PORT1		137
1253*4882a593Smuzhiyun #define NET_BIOS_PORT2		138
1254*4882a593Smuzhiyun #define DHCPV6_RAS_PORT		547
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun #define is_mc_allowed_on_bmc(adapter, eh)	\
1257*4882a593Smuzhiyun 	(!is_multicast_filt_enabled(adapter) &&	\
1258*4882a593Smuzhiyun 	 is_multicast_ether_addr(eh->h_dest) &&	\
1259*4882a593Smuzhiyun 	 !is_broadcast_ether_addr(eh->h_dest))
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun #define is_bc_allowed_on_bmc(adapter, eh)	\
1262*4882a593Smuzhiyun 	(!is_broadcast_filt_enabled(adapter) &&	\
1263*4882a593Smuzhiyun 	 is_broadcast_ether_addr(eh->h_dest))
1264*4882a593Smuzhiyun 
1265*4882a593Smuzhiyun #define is_arp_allowed_on_bmc(adapter, skb)	\
1266*4882a593Smuzhiyun 	(is_arp(skb) && is_arp_filt_enabled(adapter))
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun #define is_arp(skb)	(skb->protocol == htons(ETH_P_ARP))
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun #define is_arp_filt_enabled(adapter)	\
1271*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun #define is_dhcp_client_filt_enabled(adapter)	\
1274*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun #define is_dhcp_srvr_filt_enabled(adapter)	\
1277*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun #define is_nbios_filt_enabled(adapter)	\
1280*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun #define is_ipv6_na_filt_enabled(adapter)	\
1283*4882a593Smuzhiyun 		(adapter->bmc_filt_mask &	\
1284*4882a593Smuzhiyun 			BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun #define is_ipv6_ra_filt_enabled(adapter)	\
1287*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun #define is_ipv6_ras_filt_enabled(adapter)	\
1290*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
1291*4882a593Smuzhiyun 
1292*4882a593Smuzhiyun #define is_broadcast_filt_enabled(adapter)	\
1293*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_BROADCAST)
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun #define is_multicast_filt_enabled(adapter)	\
1296*4882a593Smuzhiyun 		(adapter->bmc_filt_mask & BMC_FILT_MULTICAST)
1297*4882a593Smuzhiyun 
be_send_pkt_to_bmc(struct be_adapter * adapter,struct sk_buff ** skb)1298*4882a593Smuzhiyun static bool be_send_pkt_to_bmc(struct be_adapter *adapter,
1299*4882a593Smuzhiyun 			       struct sk_buff **skb)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	struct ethhdr *eh = (struct ethhdr *)(*skb)->data;
1302*4882a593Smuzhiyun 	bool os2bmc = false;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	if (!be_is_os2bmc_enabled(adapter))
1305*4882a593Smuzhiyun 		goto done;
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	if (!is_multicast_ether_addr(eh->h_dest))
1308*4882a593Smuzhiyun 		goto done;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	if (is_mc_allowed_on_bmc(adapter, eh) ||
1311*4882a593Smuzhiyun 	    is_bc_allowed_on_bmc(adapter, eh) ||
1312*4882a593Smuzhiyun 	    is_arp_allowed_on_bmc(adapter, (*skb))) {
1313*4882a593Smuzhiyun 		os2bmc = true;
1314*4882a593Smuzhiyun 		goto done;
1315*4882a593Smuzhiyun 	}
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if ((*skb)->protocol == htons(ETH_P_IPV6)) {
1318*4882a593Smuzhiyun 		struct ipv6hdr *hdr = ipv6_hdr((*skb));
1319*4882a593Smuzhiyun 		u8 nexthdr = hdr->nexthdr;
1320*4882a593Smuzhiyun 
1321*4882a593Smuzhiyun 		if (nexthdr == IPPROTO_ICMPV6) {
1322*4882a593Smuzhiyun 			struct icmp6hdr *icmp6 = icmp6_hdr((*skb));
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 			switch (icmp6->icmp6_type) {
1325*4882a593Smuzhiyun 			case NDISC_ROUTER_ADVERTISEMENT:
1326*4882a593Smuzhiyun 				os2bmc = is_ipv6_ra_filt_enabled(adapter);
1327*4882a593Smuzhiyun 				goto done;
1328*4882a593Smuzhiyun 			case NDISC_NEIGHBOUR_ADVERTISEMENT:
1329*4882a593Smuzhiyun 				os2bmc = is_ipv6_na_filt_enabled(adapter);
1330*4882a593Smuzhiyun 				goto done;
1331*4882a593Smuzhiyun 			default:
1332*4882a593Smuzhiyun 				break;
1333*4882a593Smuzhiyun 			}
1334*4882a593Smuzhiyun 		}
1335*4882a593Smuzhiyun 	}
1336*4882a593Smuzhiyun 
1337*4882a593Smuzhiyun 	if (is_udp_pkt((*skb))) {
1338*4882a593Smuzhiyun 		struct udphdr *udp = udp_hdr((*skb));
1339*4882a593Smuzhiyun 
1340*4882a593Smuzhiyun 		switch (ntohs(udp->dest)) {
1341*4882a593Smuzhiyun 		case DHCP_CLIENT_PORT:
1342*4882a593Smuzhiyun 			os2bmc = is_dhcp_client_filt_enabled(adapter);
1343*4882a593Smuzhiyun 			goto done;
1344*4882a593Smuzhiyun 		case DHCP_SERVER_PORT:
1345*4882a593Smuzhiyun 			os2bmc = is_dhcp_srvr_filt_enabled(adapter);
1346*4882a593Smuzhiyun 			goto done;
1347*4882a593Smuzhiyun 		case NET_BIOS_PORT1:
1348*4882a593Smuzhiyun 		case NET_BIOS_PORT2:
1349*4882a593Smuzhiyun 			os2bmc = is_nbios_filt_enabled(adapter);
1350*4882a593Smuzhiyun 			goto done;
1351*4882a593Smuzhiyun 		case DHCPV6_RAS_PORT:
1352*4882a593Smuzhiyun 			os2bmc = is_ipv6_ras_filt_enabled(adapter);
1353*4882a593Smuzhiyun 			goto done;
1354*4882a593Smuzhiyun 		default:
1355*4882a593Smuzhiyun 			break;
1356*4882a593Smuzhiyun 		}
1357*4882a593Smuzhiyun 	}
1358*4882a593Smuzhiyun done:
1359*4882a593Smuzhiyun 	/* For packets over a vlan, which are destined
1360*4882a593Smuzhiyun 	 * to BMC, asic expects the vlan to be inline in the packet.
1361*4882a593Smuzhiyun 	 */
1362*4882a593Smuzhiyun 	if (os2bmc)
1363*4882a593Smuzhiyun 		*skb = be_insert_vlan_in_pkt(adapter, *skb, NULL);
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	return os2bmc;
1366*4882a593Smuzhiyun }
1367*4882a593Smuzhiyun 
be_xmit(struct sk_buff * skb,struct net_device * netdev)1368*4882a593Smuzhiyun static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1369*4882a593Smuzhiyun {
1370*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1371*4882a593Smuzhiyun 	u16 q_idx = skb_get_queue_mapping(skb);
1372*4882a593Smuzhiyun 	struct be_tx_obj *txo = &adapter->tx_obj[q_idx];
1373*4882a593Smuzhiyun 	struct be_wrb_params wrb_params = { 0 };
1374*4882a593Smuzhiyun 	bool flush = !netdev_xmit_more();
1375*4882a593Smuzhiyun 	u16 wrb_cnt;
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	skb = be_xmit_workarounds(adapter, skb, &wrb_params);
1378*4882a593Smuzhiyun 	if (unlikely(!skb))
1379*4882a593Smuzhiyun 		goto drop;
1380*4882a593Smuzhiyun 
1381*4882a593Smuzhiyun 	be_get_wrb_params_from_skb(adapter, skb, &wrb_params);
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun 	wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1384*4882a593Smuzhiyun 	if (unlikely(!wrb_cnt)) {
1385*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
1386*4882a593Smuzhiyun 		goto drop;
1387*4882a593Smuzhiyun 	}
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 	/* if os2bmc is enabled and if the pkt is destined to bmc,
1390*4882a593Smuzhiyun 	 * enqueue the pkt a 2nd time with mgmt bit set.
1391*4882a593Smuzhiyun 	 */
1392*4882a593Smuzhiyun 	if (be_send_pkt_to_bmc(adapter, &skb)) {
1393*4882a593Smuzhiyun 		BE_WRB_F_SET(wrb_params.features, OS2BMC, 1);
1394*4882a593Smuzhiyun 		wrb_cnt = be_xmit_enqueue(adapter, txo, skb, &wrb_params);
1395*4882a593Smuzhiyun 		if (unlikely(!wrb_cnt))
1396*4882a593Smuzhiyun 			goto drop;
1397*4882a593Smuzhiyun 		else
1398*4882a593Smuzhiyun 			skb_get(skb);
1399*4882a593Smuzhiyun 	}
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	if (be_is_txq_full(txo)) {
1402*4882a593Smuzhiyun 		netif_stop_subqueue(netdev, q_idx);
1403*4882a593Smuzhiyun 		tx_stats(txo)->tx_stops++;
1404*4882a593Smuzhiyun 	}
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	if (flush || __netif_subqueue_stopped(netdev, q_idx))
1407*4882a593Smuzhiyun 		be_xmit_flush(adapter, txo);
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1410*4882a593Smuzhiyun drop:
1411*4882a593Smuzhiyun 	tx_stats(txo)->tx_drv_drops++;
1412*4882a593Smuzhiyun 	/* Flush the already enqueued tx requests */
1413*4882a593Smuzhiyun 	if (flush && txo->pend_wrb_cnt)
1414*4882a593Smuzhiyun 		be_xmit_flush(adapter, txo);
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun 
be_tx_timeout(struct net_device * netdev,unsigned int txqueue)1419*4882a593Smuzhiyun static void be_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1422*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
1423*4882a593Smuzhiyun 	struct be_tx_obj *txo;
1424*4882a593Smuzhiyun 	struct sk_buff *skb;
1425*4882a593Smuzhiyun 	struct tcphdr *tcphdr;
1426*4882a593Smuzhiyun 	struct udphdr *udphdr;
1427*4882a593Smuzhiyun 	u32 *entry;
1428*4882a593Smuzhiyun 	int status;
1429*4882a593Smuzhiyun 	int i, j;
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	for_all_tx_queues(adapter, txo, i) {
1432*4882a593Smuzhiyun 		dev_info(dev, "TXQ Dump: %d H: %d T: %d used: %d, qid: 0x%x\n",
1433*4882a593Smuzhiyun 			 i, txo->q.head, txo->q.tail,
1434*4882a593Smuzhiyun 			 atomic_read(&txo->q.used), txo->q.id);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 		entry = txo->q.dma_mem.va;
1437*4882a593Smuzhiyun 		for (j = 0; j < TX_Q_LEN * 4; j += 4) {
1438*4882a593Smuzhiyun 			if (entry[j] != 0 || entry[j + 1] != 0 ||
1439*4882a593Smuzhiyun 			    entry[j + 2] != 0 || entry[j + 3] != 0) {
1440*4882a593Smuzhiyun 				dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1441*4882a593Smuzhiyun 					 j, entry[j], entry[j + 1],
1442*4882a593Smuzhiyun 					 entry[j + 2], entry[j + 3]);
1443*4882a593Smuzhiyun 			}
1444*4882a593Smuzhiyun 		}
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun 		entry = txo->cq.dma_mem.va;
1447*4882a593Smuzhiyun 		dev_info(dev, "TXCQ Dump: %d  H: %d T: %d used: %d\n",
1448*4882a593Smuzhiyun 			 i, txo->cq.head, txo->cq.tail,
1449*4882a593Smuzhiyun 			 atomic_read(&txo->cq.used));
1450*4882a593Smuzhiyun 		for (j = 0; j < TX_CQ_LEN * 4; j += 4) {
1451*4882a593Smuzhiyun 			if (entry[j] != 0 || entry[j + 1] != 0 ||
1452*4882a593Smuzhiyun 			    entry[j + 2] != 0 || entry[j + 3] != 0) {
1453*4882a593Smuzhiyun 				dev_info(dev, "Entry %d 0x%x 0x%x 0x%x 0x%x\n",
1454*4882a593Smuzhiyun 					 j, entry[j], entry[j + 1],
1455*4882a593Smuzhiyun 					 entry[j + 2], entry[j + 3]);
1456*4882a593Smuzhiyun 			}
1457*4882a593Smuzhiyun 		}
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 		for (j = 0; j < TX_Q_LEN; j++) {
1460*4882a593Smuzhiyun 			if (txo->sent_skb_list[j]) {
1461*4882a593Smuzhiyun 				skb = txo->sent_skb_list[j];
1462*4882a593Smuzhiyun 				if (ip_hdr(skb)->protocol == IPPROTO_TCP) {
1463*4882a593Smuzhiyun 					tcphdr = tcp_hdr(skb);
1464*4882a593Smuzhiyun 					dev_info(dev, "TCP source port %d\n",
1465*4882a593Smuzhiyun 						 ntohs(tcphdr->source));
1466*4882a593Smuzhiyun 					dev_info(dev, "TCP dest port %d\n",
1467*4882a593Smuzhiyun 						 ntohs(tcphdr->dest));
1468*4882a593Smuzhiyun 					dev_info(dev, "TCP sequence num %d\n",
1469*4882a593Smuzhiyun 						 ntohs(tcphdr->seq));
1470*4882a593Smuzhiyun 					dev_info(dev, "TCP ack_seq %d\n",
1471*4882a593Smuzhiyun 						 ntohs(tcphdr->ack_seq));
1472*4882a593Smuzhiyun 				} else if (ip_hdr(skb)->protocol ==
1473*4882a593Smuzhiyun 					   IPPROTO_UDP) {
1474*4882a593Smuzhiyun 					udphdr = udp_hdr(skb);
1475*4882a593Smuzhiyun 					dev_info(dev, "UDP source port %d\n",
1476*4882a593Smuzhiyun 						 ntohs(udphdr->source));
1477*4882a593Smuzhiyun 					dev_info(dev, "UDP dest port %d\n",
1478*4882a593Smuzhiyun 						 ntohs(udphdr->dest));
1479*4882a593Smuzhiyun 				}
1480*4882a593Smuzhiyun 				dev_info(dev, "skb[%d] %p len %d proto 0x%x\n",
1481*4882a593Smuzhiyun 					 j, skb, skb->len, skb->protocol);
1482*4882a593Smuzhiyun 			}
1483*4882a593Smuzhiyun 		}
1484*4882a593Smuzhiyun 	}
1485*4882a593Smuzhiyun 
1486*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
1487*4882a593Smuzhiyun 		dev_info(dev, "Initiating reset due to tx timeout\n");
1488*4882a593Smuzhiyun 		dev_info(dev, "Resetting adapter\n");
1489*4882a593Smuzhiyun 		status = lancer_physdev_ctrl(adapter,
1490*4882a593Smuzhiyun 					     PHYSDEV_CONTROL_FW_RESET_MASK);
1491*4882a593Smuzhiyun 		if (status)
1492*4882a593Smuzhiyun 			dev_err(dev, "Reset failed .. Reboot server\n");
1493*4882a593Smuzhiyun 	}
1494*4882a593Smuzhiyun }
1495*4882a593Smuzhiyun 
be_in_all_promisc(struct be_adapter * adapter)1496*4882a593Smuzhiyun static inline bool be_in_all_promisc(struct be_adapter *adapter)
1497*4882a593Smuzhiyun {
1498*4882a593Smuzhiyun 	return (adapter->if_flags & BE_IF_FLAGS_ALL_PROMISCUOUS) ==
1499*4882a593Smuzhiyun 			BE_IF_FLAGS_ALL_PROMISCUOUS;
1500*4882a593Smuzhiyun }
1501*4882a593Smuzhiyun 
be_set_vlan_promisc(struct be_adapter * adapter)1502*4882a593Smuzhiyun static int be_set_vlan_promisc(struct be_adapter *adapter)
1503*4882a593Smuzhiyun {
1504*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
1505*4882a593Smuzhiyun 	int status;
1506*4882a593Smuzhiyun 
1507*4882a593Smuzhiyun 	if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS)
1508*4882a593Smuzhiyun 		return 0;
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, ON);
1511*4882a593Smuzhiyun 	if (!status) {
1512*4882a593Smuzhiyun 		dev_info(dev, "Enabled VLAN promiscuous mode\n");
1513*4882a593Smuzhiyun 		adapter->if_flags |= BE_IF_FLAGS_VLAN_PROMISCUOUS;
1514*4882a593Smuzhiyun 	} else {
1515*4882a593Smuzhiyun 		dev_err(dev, "Failed to enable VLAN promiscuous mode\n");
1516*4882a593Smuzhiyun 	}
1517*4882a593Smuzhiyun 	return status;
1518*4882a593Smuzhiyun }
1519*4882a593Smuzhiyun 
be_clear_vlan_promisc(struct be_adapter * adapter)1520*4882a593Smuzhiyun static int be_clear_vlan_promisc(struct be_adapter *adapter)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
1523*4882a593Smuzhiyun 	int status;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_VLAN_PROMISCUOUS, OFF);
1526*4882a593Smuzhiyun 	if (!status) {
1527*4882a593Smuzhiyun 		dev_info(dev, "Disabling VLAN promiscuous mode\n");
1528*4882a593Smuzhiyun 		adapter->if_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
1529*4882a593Smuzhiyun 	}
1530*4882a593Smuzhiyun 	return status;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun /*
1534*4882a593Smuzhiyun  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1535*4882a593Smuzhiyun  * If the user configures more, place BE in vlan promiscuous mode.
1536*4882a593Smuzhiyun  */
be_vid_config(struct be_adapter * adapter)1537*4882a593Smuzhiyun static int be_vid_config(struct be_adapter *adapter)
1538*4882a593Smuzhiyun {
1539*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
1540*4882a593Smuzhiyun 	u16 vids[BE_NUM_VLANS_SUPPORTED];
1541*4882a593Smuzhiyun 	u16 num = 0, i = 0;
1542*4882a593Smuzhiyun 	int status = 0;
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun 	/* No need to change the VLAN state if the I/F is in promiscuous */
1545*4882a593Smuzhiyun 	if (adapter->netdev->flags & IFF_PROMISC)
1546*4882a593Smuzhiyun 		return 0;
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	if (adapter->vlans_added > be_max_vlans(adapter))
1549*4882a593Smuzhiyun 		return be_set_vlan_promisc(adapter);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	if (adapter->if_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
1552*4882a593Smuzhiyun 		status = be_clear_vlan_promisc(adapter);
1553*4882a593Smuzhiyun 		if (status)
1554*4882a593Smuzhiyun 			return status;
1555*4882a593Smuzhiyun 	}
1556*4882a593Smuzhiyun 	/* Construct VLAN Table to give to HW */
1557*4882a593Smuzhiyun 	for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1558*4882a593Smuzhiyun 		vids[num++] = cpu_to_le16(i);
1559*4882a593Smuzhiyun 
1560*4882a593Smuzhiyun 	status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num, 0);
1561*4882a593Smuzhiyun 	if (status) {
1562*4882a593Smuzhiyun 		dev_err(dev, "Setting HW VLAN filtering failed\n");
1563*4882a593Smuzhiyun 		/* Set to VLAN promisc mode as setting VLAN filter failed */
1564*4882a593Smuzhiyun 		if (addl_status(status) == MCC_ADDL_STATUS_INSUFFICIENT_VLANS ||
1565*4882a593Smuzhiyun 		    addl_status(status) ==
1566*4882a593Smuzhiyun 				MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1567*4882a593Smuzhiyun 			return be_set_vlan_promisc(adapter);
1568*4882a593Smuzhiyun 	}
1569*4882a593Smuzhiyun 	return status;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun 
be_vlan_add_vid(struct net_device * netdev,__be16 proto,u16 vid)1572*4882a593Smuzhiyun static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1573*4882a593Smuzhiyun {
1574*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1575*4882a593Smuzhiyun 	int status = 0;
1576*4882a593Smuzhiyun 
1577*4882a593Smuzhiyun 	mutex_lock(&adapter->rx_filter_lock);
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun 	/* Packets with VID 0 are always received by Lancer by default */
1580*4882a593Smuzhiyun 	if (lancer_chip(adapter) && vid == 0)
1581*4882a593Smuzhiyun 		goto done;
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	if (test_bit(vid, adapter->vids))
1584*4882a593Smuzhiyun 		goto done;
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	set_bit(vid, adapter->vids);
1587*4882a593Smuzhiyun 	adapter->vlans_added++;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	status = be_vid_config(adapter);
1590*4882a593Smuzhiyun done:
1591*4882a593Smuzhiyun 	mutex_unlock(&adapter->rx_filter_lock);
1592*4882a593Smuzhiyun 	return status;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun 
be_vlan_rem_vid(struct net_device * netdev,__be16 proto,u16 vid)1595*4882a593Smuzhiyun static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1596*4882a593Smuzhiyun {
1597*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1598*4882a593Smuzhiyun 	int status = 0;
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	mutex_lock(&adapter->rx_filter_lock);
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 	/* Packets with VID 0 are always received by Lancer by default */
1603*4882a593Smuzhiyun 	if (lancer_chip(adapter) && vid == 0)
1604*4882a593Smuzhiyun 		goto done;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 	if (!test_bit(vid, adapter->vids))
1607*4882a593Smuzhiyun 		goto done;
1608*4882a593Smuzhiyun 
1609*4882a593Smuzhiyun 	clear_bit(vid, adapter->vids);
1610*4882a593Smuzhiyun 	adapter->vlans_added--;
1611*4882a593Smuzhiyun 
1612*4882a593Smuzhiyun 	status = be_vid_config(adapter);
1613*4882a593Smuzhiyun done:
1614*4882a593Smuzhiyun 	mutex_unlock(&adapter->rx_filter_lock);
1615*4882a593Smuzhiyun 	return status;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun 
be_set_all_promisc(struct be_adapter * adapter)1618*4882a593Smuzhiyun static void be_set_all_promisc(struct be_adapter *adapter)
1619*4882a593Smuzhiyun {
1620*4882a593Smuzhiyun 	be_cmd_rx_filter(adapter, BE_IF_FLAGS_ALL_PROMISCUOUS, ON);
1621*4882a593Smuzhiyun 	adapter->if_flags |= BE_IF_FLAGS_ALL_PROMISCUOUS;
1622*4882a593Smuzhiyun }
1623*4882a593Smuzhiyun 
be_set_mc_promisc(struct be_adapter * adapter)1624*4882a593Smuzhiyun static void be_set_mc_promisc(struct be_adapter *adapter)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun 	int status;
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS)
1629*4882a593Smuzhiyun 		return;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MCAST_PROMISCUOUS, ON);
1632*4882a593Smuzhiyun 	if (!status)
1633*4882a593Smuzhiyun 		adapter->if_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun 
be_set_uc_promisc(struct be_adapter * adapter)1636*4882a593Smuzhiyun static void be_set_uc_promisc(struct be_adapter *adapter)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun 	int status;
1639*4882a593Smuzhiyun 
1640*4882a593Smuzhiyun 	if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS)
1641*4882a593Smuzhiyun 		return;
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, ON);
1644*4882a593Smuzhiyun 	if (!status)
1645*4882a593Smuzhiyun 		adapter->if_flags |= BE_IF_FLAGS_PROMISCUOUS;
1646*4882a593Smuzhiyun }
1647*4882a593Smuzhiyun 
be_clear_uc_promisc(struct be_adapter * adapter)1648*4882a593Smuzhiyun static void be_clear_uc_promisc(struct be_adapter *adapter)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun 	int status;
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	if (!(adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS))
1653*4882a593Smuzhiyun 		return;
1654*4882a593Smuzhiyun 
1655*4882a593Smuzhiyun 	status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_PROMISCUOUS, OFF);
1656*4882a593Smuzhiyun 	if (!status)
1657*4882a593Smuzhiyun 		adapter->if_flags &= ~BE_IF_FLAGS_PROMISCUOUS;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun /* The below 2 functions are the callback args for __dev_mc_sync/dev_uc_sync().
1661*4882a593Smuzhiyun  * We use a single callback function for both sync and unsync. We really don't
1662*4882a593Smuzhiyun  * add/remove addresses through this callback. But, we use it to detect changes
1663*4882a593Smuzhiyun  * to the uc/mc lists. The entire uc/mc list is programmed in be_set_rx_mode().
1664*4882a593Smuzhiyun  */
be_uc_list_update(struct net_device * netdev,const unsigned char * addr)1665*4882a593Smuzhiyun static int be_uc_list_update(struct net_device *netdev,
1666*4882a593Smuzhiyun 			     const unsigned char *addr)
1667*4882a593Smuzhiyun {
1668*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	adapter->update_uc_list = true;
1671*4882a593Smuzhiyun 	return 0;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun 
be_mc_list_update(struct net_device * netdev,const unsigned char * addr)1674*4882a593Smuzhiyun static int be_mc_list_update(struct net_device *netdev,
1675*4882a593Smuzhiyun 			     const unsigned char *addr)
1676*4882a593Smuzhiyun {
1677*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1678*4882a593Smuzhiyun 
1679*4882a593Smuzhiyun 	adapter->update_mc_list = true;
1680*4882a593Smuzhiyun 	return 0;
1681*4882a593Smuzhiyun }
1682*4882a593Smuzhiyun 
be_set_mc_list(struct be_adapter * adapter)1683*4882a593Smuzhiyun static void be_set_mc_list(struct be_adapter *adapter)
1684*4882a593Smuzhiyun {
1685*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1686*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
1687*4882a593Smuzhiyun 	bool mc_promisc = false;
1688*4882a593Smuzhiyun 	int status;
1689*4882a593Smuzhiyun 
1690*4882a593Smuzhiyun 	netif_addr_lock_bh(netdev);
1691*4882a593Smuzhiyun 	__dev_mc_sync(netdev, be_mc_list_update, be_mc_list_update);
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC) {
1694*4882a593Smuzhiyun 		adapter->update_mc_list = false;
1695*4882a593Smuzhiyun 	} else if (netdev->flags & IFF_ALLMULTI ||
1696*4882a593Smuzhiyun 		   netdev_mc_count(netdev) > be_max_mc(adapter)) {
1697*4882a593Smuzhiyun 		/* Enable multicast promisc if num configured exceeds
1698*4882a593Smuzhiyun 		 * what we support
1699*4882a593Smuzhiyun 		 */
1700*4882a593Smuzhiyun 		mc_promisc = true;
1701*4882a593Smuzhiyun 		adapter->update_mc_list = false;
1702*4882a593Smuzhiyun 	} else if (adapter->if_flags & BE_IF_FLAGS_MCAST_PROMISCUOUS) {
1703*4882a593Smuzhiyun 		/* Update mc-list unconditionally if the iface was previously
1704*4882a593Smuzhiyun 		 * in mc-promisc mode and now is out of that mode.
1705*4882a593Smuzhiyun 		 */
1706*4882a593Smuzhiyun 		adapter->update_mc_list = true;
1707*4882a593Smuzhiyun 	}
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 	if (adapter->update_mc_list) {
1710*4882a593Smuzhiyun 		int i = 0;
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 		/* cache the mc-list in adapter */
1713*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, netdev) {
1714*4882a593Smuzhiyun 			ether_addr_copy(adapter->mc_list[i].mac, ha->addr);
1715*4882a593Smuzhiyun 			i++;
1716*4882a593Smuzhiyun 		}
1717*4882a593Smuzhiyun 		adapter->mc_count = netdev_mc_count(netdev);
1718*4882a593Smuzhiyun 	}
1719*4882a593Smuzhiyun 	netif_addr_unlock_bh(netdev);
1720*4882a593Smuzhiyun 
1721*4882a593Smuzhiyun 	if (mc_promisc) {
1722*4882a593Smuzhiyun 		be_set_mc_promisc(adapter);
1723*4882a593Smuzhiyun 	} else if (adapter->update_mc_list) {
1724*4882a593Smuzhiyun 		status = be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, ON);
1725*4882a593Smuzhiyun 		if (!status)
1726*4882a593Smuzhiyun 			adapter->if_flags &= ~BE_IF_FLAGS_MCAST_PROMISCUOUS;
1727*4882a593Smuzhiyun 		else
1728*4882a593Smuzhiyun 			be_set_mc_promisc(adapter);
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 		adapter->update_mc_list = false;
1731*4882a593Smuzhiyun 	}
1732*4882a593Smuzhiyun }
1733*4882a593Smuzhiyun 
be_clear_mc_list(struct be_adapter * adapter)1734*4882a593Smuzhiyun static void be_clear_mc_list(struct be_adapter *adapter)
1735*4882a593Smuzhiyun {
1736*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun 	__dev_mc_unsync(netdev, NULL);
1739*4882a593Smuzhiyun 	be_cmd_rx_filter(adapter, BE_IF_FLAGS_MULTICAST, OFF);
1740*4882a593Smuzhiyun 	adapter->mc_count = 0;
1741*4882a593Smuzhiyun }
1742*4882a593Smuzhiyun 
be_uc_mac_add(struct be_adapter * adapter,int uc_idx)1743*4882a593Smuzhiyun static int be_uc_mac_add(struct be_adapter *adapter, int uc_idx)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun 	if (ether_addr_equal(adapter->uc_list[uc_idx].mac, adapter->dev_mac)) {
1746*4882a593Smuzhiyun 		adapter->pmac_id[uc_idx + 1] = adapter->pmac_id[0];
1747*4882a593Smuzhiyun 		return 0;
1748*4882a593Smuzhiyun 	}
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	return be_cmd_pmac_add(adapter, adapter->uc_list[uc_idx].mac,
1751*4882a593Smuzhiyun 			       adapter->if_handle,
1752*4882a593Smuzhiyun 			       &adapter->pmac_id[uc_idx + 1], 0);
1753*4882a593Smuzhiyun }
1754*4882a593Smuzhiyun 
be_uc_mac_del(struct be_adapter * adapter,int pmac_id)1755*4882a593Smuzhiyun static void be_uc_mac_del(struct be_adapter *adapter, int pmac_id)
1756*4882a593Smuzhiyun {
1757*4882a593Smuzhiyun 	if (pmac_id == adapter->pmac_id[0])
1758*4882a593Smuzhiyun 		return;
1759*4882a593Smuzhiyun 
1760*4882a593Smuzhiyun 	be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
1761*4882a593Smuzhiyun }
1762*4882a593Smuzhiyun 
be_set_uc_list(struct be_adapter * adapter)1763*4882a593Smuzhiyun static void be_set_uc_list(struct be_adapter *adapter)
1764*4882a593Smuzhiyun {
1765*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1766*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
1767*4882a593Smuzhiyun 	bool uc_promisc = false;
1768*4882a593Smuzhiyun 	int curr_uc_macs = 0, i;
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	netif_addr_lock_bh(netdev);
1771*4882a593Smuzhiyun 	__dev_uc_sync(netdev, be_uc_list_update, be_uc_list_update);
1772*4882a593Smuzhiyun 
1773*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC) {
1774*4882a593Smuzhiyun 		adapter->update_uc_list = false;
1775*4882a593Smuzhiyun 	} else if (netdev_uc_count(netdev) > (be_max_uc(adapter) - 1)) {
1776*4882a593Smuzhiyun 		uc_promisc = true;
1777*4882a593Smuzhiyun 		adapter->update_uc_list = false;
1778*4882a593Smuzhiyun 	}  else if (adapter->if_flags & BE_IF_FLAGS_PROMISCUOUS) {
1779*4882a593Smuzhiyun 		/* Update uc-list unconditionally if the iface was previously
1780*4882a593Smuzhiyun 		 * in uc-promisc mode and now is out of that mode.
1781*4882a593Smuzhiyun 		 */
1782*4882a593Smuzhiyun 		adapter->update_uc_list = true;
1783*4882a593Smuzhiyun 	}
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	if (adapter->update_uc_list) {
1786*4882a593Smuzhiyun 		/* cache the uc-list in adapter array */
1787*4882a593Smuzhiyun 		i = 0;
1788*4882a593Smuzhiyun 		netdev_for_each_uc_addr(ha, netdev) {
1789*4882a593Smuzhiyun 			ether_addr_copy(adapter->uc_list[i].mac, ha->addr);
1790*4882a593Smuzhiyun 			i++;
1791*4882a593Smuzhiyun 		}
1792*4882a593Smuzhiyun 		curr_uc_macs = netdev_uc_count(netdev);
1793*4882a593Smuzhiyun 	}
1794*4882a593Smuzhiyun 	netif_addr_unlock_bh(netdev);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	if (uc_promisc) {
1797*4882a593Smuzhiyun 		be_set_uc_promisc(adapter);
1798*4882a593Smuzhiyun 	} else if (adapter->update_uc_list) {
1799*4882a593Smuzhiyun 		be_clear_uc_promisc(adapter);
1800*4882a593Smuzhiyun 
1801*4882a593Smuzhiyun 		for (i = 0; i < adapter->uc_macs; i++)
1802*4882a593Smuzhiyun 			be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1803*4882a593Smuzhiyun 
1804*4882a593Smuzhiyun 		for (i = 0; i < curr_uc_macs; i++)
1805*4882a593Smuzhiyun 			be_uc_mac_add(adapter, i);
1806*4882a593Smuzhiyun 		adapter->uc_macs = curr_uc_macs;
1807*4882a593Smuzhiyun 		adapter->update_uc_list = false;
1808*4882a593Smuzhiyun 	}
1809*4882a593Smuzhiyun }
1810*4882a593Smuzhiyun 
be_clear_uc_list(struct be_adapter * adapter)1811*4882a593Smuzhiyun static void be_clear_uc_list(struct be_adapter *adapter)
1812*4882a593Smuzhiyun {
1813*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1814*4882a593Smuzhiyun 	int i;
1815*4882a593Smuzhiyun 
1816*4882a593Smuzhiyun 	__dev_uc_unsync(netdev, NULL);
1817*4882a593Smuzhiyun 	for (i = 0; i < adapter->uc_macs; i++)
1818*4882a593Smuzhiyun 		be_uc_mac_del(adapter, adapter->pmac_id[i + 1]);
1819*4882a593Smuzhiyun 
1820*4882a593Smuzhiyun 	adapter->uc_macs = 0;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun 
__be_set_rx_mode(struct be_adapter * adapter)1823*4882a593Smuzhiyun static void __be_set_rx_mode(struct be_adapter *adapter)
1824*4882a593Smuzhiyun {
1825*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	mutex_lock(&adapter->rx_filter_lock);
1828*4882a593Smuzhiyun 
1829*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC) {
1830*4882a593Smuzhiyun 		if (!be_in_all_promisc(adapter))
1831*4882a593Smuzhiyun 			be_set_all_promisc(adapter);
1832*4882a593Smuzhiyun 	} else if (be_in_all_promisc(adapter)) {
1833*4882a593Smuzhiyun 		/* We need to re-program the vlan-list or clear
1834*4882a593Smuzhiyun 		 * vlan-promisc mode (if needed) when the interface
1835*4882a593Smuzhiyun 		 * comes out of promisc mode.
1836*4882a593Smuzhiyun 		 */
1837*4882a593Smuzhiyun 		be_vid_config(adapter);
1838*4882a593Smuzhiyun 	}
1839*4882a593Smuzhiyun 
1840*4882a593Smuzhiyun 	be_set_uc_list(adapter);
1841*4882a593Smuzhiyun 	be_set_mc_list(adapter);
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun 	mutex_unlock(&adapter->rx_filter_lock);
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun 
be_work_set_rx_mode(struct work_struct * work)1846*4882a593Smuzhiyun static void be_work_set_rx_mode(struct work_struct *work)
1847*4882a593Smuzhiyun {
1848*4882a593Smuzhiyun 	struct be_cmd_work *cmd_work =
1849*4882a593Smuzhiyun 				container_of(work, struct be_cmd_work, work);
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	__be_set_rx_mode(cmd_work->adapter);
1852*4882a593Smuzhiyun 	kfree(cmd_work);
1853*4882a593Smuzhiyun }
1854*4882a593Smuzhiyun 
be_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)1855*4882a593Smuzhiyun static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1856*4882a593Smuzhiyun {
1857*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1858*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1859*4882a593Smuzhiyun 	int status;
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
1862*4882a593Smuzhiyun 		return -EPERM;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1865*4882a593Smuzhiyun 		return -EINVAL;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	/* Proceed further only if user provided MAC is different
1868*4882a593Smuzhiyun 	 * from active MAC
1869*4882a593Smuzhiyun 	 */
1870*4882a593Smuzhiyun 	if (ether_addr_equal(mac, vf_cfg->mac_addr))
1871*4882a593Smuzhiyun 		return 0;
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	if (BEx_chip(adapter)) {
1874*4882a593Smuzhiyun 		be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1875*4882a593Smuzhiyun 				vf + 1);
1876*4882a593Smuzhiyun 
1877*4882a593Smuzhiyun 		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1878*4882a593Smuzhiyun 					 &vf_cfg->pmac_id, vf + 1);
1879*4882a593Smuzhiyun 	} else {
1880*4882a593Smuzhiyun 		status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1881*4882a593Smuzhiyun 					vf + 1);
1882*4882a593Smuzhiyun 	}
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun 	if (status) {
1885*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1886*4882a593Smuzhiyun 			mac, vf, status);
1887*4882a593Smuzhiyun 		return be_cmd_status(status);
1888*4882a593Smuzhiyun 	}
1889*4882a593Smuzhiyun 
1890*4882a593Smuzhiyun 	ether_addr_copy(vf_cfg->mac_addr, mac);
1891*4882a593Smuzhiyun 
1892*4882a593Smuzhiyun 	return 0;
1893*4882a593Smuzhiyun }
1894*4882a593Smuzhiyun 
be_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * vi)1895*4882a593Smuzhiyun static int be_get_vf_config(struct net_device *netdev, int vf,
1896*4882a593Smuzhiyun 			    struct ifla_vf_info *vi)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1899*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1900*4882a593Smuzhiyun 
1901*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
1902*4882a593Smuzhiyun 		return -EPERM;
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	if (vf >= adapter->num_vfs)
1905*4882a593Smuzhiyun 		return -EINVAL;
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	vi->vf = vf;
1908*4882a593Smuzhiyun 	vi->max_tx_rate = vf_cfg->tx_rate;
1909*4882a593Smuzhiyun 	vi->min_tx_rate = 0;
1910*4882a593Smuzhiyun 	vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1911*4882a593Smuzhiyun 	vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1912*4882a593Smuzhiyun 	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1913*4882a593Smuzhiyun 	vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1914*4882a593Smuzhiyun 	vi->spoofchk = adapter->vf_cfg[vf].spoofchk;
1915*4882a593Smuzhiyun 
1916*4882a593Smuzhiyun 	return 0;
1917*4882a593Smuzhiyun }
1918*4882a593Smuzhiyun 
be_set_vf_tvt(struct be_adapter * adapter,int vf,u16 vlan)1919*4882a593Smuzhiyun static int be_set_vf_tvt(struct be_adapter *adapter, int vf, u16 vlan)
1920*4882a593Smuzhiyun {
1921*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1922*4882a593Smuzhiyun 	u16 vids[BE_NUM_VLANS_SUPPORTED];
1923*4882a593Smuzhiyun 	int vf_if_id = vf_cfg->if_handle;
1924*4882a593Smuzhiyun 	int status;
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	/* Enable Transparent VLAN Tagging */
1927*4882a593Smuzhiyun 	status = be_cmd_set_hsw_config(adapter, vlan, vf + 1, vf_if_id, 0, 0);
1928*4882a593Smuzhiyun 	if (status)
1929*4882a593Smuzhiyun 		return status;
1930*4882a593Smuzhiyun 
1931*4882a593Smuzhiyun 	/* Clear pre-programmed VLAN filters on VF if any, if TVT is enabled */
1932*4882a593Smuzhiyun 	vids[0] = 0;
1933*4882a593Smuzhiyun 	status = be_cmd_vlan_config(adapter, vf_if_id, vids, 1, vf + 1);
1934*4882a593Smuzhiyun 	if (!status)
1935*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev,
1936*4882a593Smuzhiyun 			 "Cleared guest VLANs on VF%d", vf);
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	/* After TVT is enabled, disallow VFs to program VLAN filters */
1939*4882a593Smuzhiyun 	if (vf_cfg->privileges & BE_PRIV_FILTMGMT) {
1940*4882a593Smuzhiyun 		status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges &
1941*4882a593Smuzhiyun 						  ~BE_PRIV_FILTMGMT, vf + 1);
1942*4882a593Smuzhiyun 		if (!status)
1943*4882a593Smuzhiyun 			vf_cfg->privileges &= ~BE_PRIV_FILTMGMT;
1944*4882a593Smuzhiyun 	}
1945*4882a593Smuzhiyun 	return 0;
1946*4882a593Smuzhiyun }
1947*4882a593Smuzhiyun 
be_clear_vf_tvt(struct be_adapter * adapter,int vf)1948*4882a593Smuzhiyun static int be_clear_vf_tvt(struct be_adapter *adapter, int vf)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1951*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
1952*4882a593Smuzhiyun 	int status;
1953*4882a593Smuzhiyun 
1954*4882a593Smuzhiyun 	/* Reset Transparent VLAN Tagging. */
1955*4882a593Smuzhiyun 	status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID, vf + 1,
1956*4882a593Smuzhiyun 				       vf_cfg->if_handle, 0, 0);
1957*4882a593Smuzhiyun 	if (status)
1958*4882a593Smuzhiyun 		return status;
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun 	/* Allow VFs to program VLAN filtering */
1961*4882a593Smuzhiyun 	if (!(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
1962*4882a593Smuzhiyun 		status = be_cmd_set_fn_privileges(adapter, vf_cfg->privileges |
1963*4882a593Smuzhiyun 						  BE_PRIV_FILTMGMT, vf + 1);
1964*4882a593Smuzhiyun 		if (!status) {
1965*4882a593Smuzhiyun 			vf_cfg->privileges |= BE_PRIV_FILTMGMT;
1966*4882a593Smuzhiyun 			dev_info(dev, "VF%d: FILTMGMT priv enabled", vf);
1967*4882a593Smuzhiyun 		}
1968*4882a593Smuzhiyun 	}
1969*4882a593Smuzhiyun 
1970*4882a593Smuzhiyun 	dev_info(dev,
1971*4882a593Smuzhiyun 		 "Disable/re-enable i/f in VM to clear Transparent VLAN tag");
1972*4882a593Smuzhiyun 	return 0;
1973*4882a593Smuzhiyun }
1974*4882a593Smuzhiyun 
be_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos,__be16 vlan_proto)1975*4882a593Smuzhiyun static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
1976*4882a593Smuzhiyun 			  __be16 vlan_proto)
1977*4882a593Smuzhiyun {
1978*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
1979*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1980*4882a593Smuzhiyun 	int status;
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
1983*4882a593Smuzhiyun 		return -EPERM;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1986*4882a593Smuzhiyun 		return -EINVAL;
1987*4882a593Smuzhiyun 
1988*4882a593Smuzhiyun 	if (vlan_proto != htons(ETH_P_8021Q))
1989*4882a593Smuzhiyun 		return -EPROTONOSUPPORT;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	if (vlan || qos) {
1992*4882a593Smuzhiyun 		vlan |= qos << VLAN_PRIO_SHIFT;
1993*4882a593Smuzhiyun 		status = be_set_vf_tvt(adapter, vf, vlan);
1994*4882a593Smuzhiyun 	} else {
1995*4882a593Smuzhiyun 		status = be_clear_vf_tvt(adapter, vf);
1996*4882a593Smuzhiyun 	}
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 	if (status) {
1999*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
2000*4882a593Smuzhiyun 			"VLAN %d config on VF %d failed : %#x\n", vlan, vf,
2001*4882a593Smuzhiyun 			status);
2002*4882a593Smuzhiyun 		return be_cmd_status(status);
2003*4882a593Smuzhiyun 	}
2004*4882a593Smuzhiyun 
2005*4882a593Smuzhiyun 	vf_cfg->vlan_tag = vlan;
2006*4882a593Smuzhiyun 	return 0;
2007*4882a593Smuzhiyun }
2008*4882a593Smuzhiyun 
be_set_vf_tx_rate(struct net_device * netdev,int vf,int min_tx_rate,int max_tx_rate)2009*4882a593Smuzhiyun static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
2010*4882a593Smuzhiyun 			     int min_tx_rate, int max_tx_rate)
2011*4882a593Smuzhiyun {
2012*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
2013*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
2014*4882a593Smuzhiyun 	int percent_rate, status = 0;
2015*4882a593Smuzhiyun 	u16 link_speed = 0;
2016*4882a593Smuzhiyun 	u8 link_status;
2017*4882a593Smuzhiyun 
2018*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
2019*4882a593Smuzhiyun 		return -EPERM;
2020*4882a593Smuzhiyun 
2021*4882a593Smuzhiyun 	if (vf >= adapter->num_vfs)
2022*4882a593Smuzhiyun 		return -EINVAL;
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun 	if (min_tx_rate)
2025*4882a593Smuzhiyun 		return -EINVAL;
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun 	if (!max_tx_rate)
2028*4882a593Smuzhiyun 		goto config_qos;
2029*4882a593Smuzhiyun 
2030*4882a593Smuzhiyun 	status = be_cmd_link_status_query(adapter, &link_speed,
2031*4882a593Smuzhiyun 					  &link_status, 0);
2032*4882a593Smuzhiyun 	if (status)
2033*4882a593Smuzhiyun 		goto err;
2034*4882a593Smuzhiyun 
2035*4882a593Smuzhiyun 	if (!link_status) {
2036*4882a593Smuzhiyun 		dev_err(dev, "TX-rate setting not allowed when link is down\n");
2037*4882a593Smuzhiyun 		status = -ENETDOWN;
2038*4882a593Smuzhiyun 		goto err;
2039*4882a593Smuzhiyun 	}
2040*4882a593Smuzhiyun 
2041*4882a593Smuzhiyun 	if (max_tx_rate < 100 || max_tx_rate > link_speed) {
2042*4882a593Smuzhiyun 		dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
2043*4882a593Smuzhiyun 			link_speed);
2044*4882a593Smuzhiyun 		status = -EINVAL;
2045*4882a593Smuzhiyun 		goto err;
2046*4882a593Smuzhiyun 	}
2047*4882a593Smuzhiyun 
2048*4882a593Smuzhiyun 	/* On Skyhawk the QOS setting must be done only as a % value */
2049*4882a593Smuzhiyun 	percent_rate = link_speed / 100;
2050*4882a593Smuzhiyun 	if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
2051*4882a593Smuzhiyun 		dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
2052*4882a593Smuzhiyun 			percent_rate);
2053*4882a593Smuzhiyun 		status = -EINVAL;
2054*4882a593Smuzhiyun 		goto err;
2055*4882a593Smuzhiyun 	}
2056*4882a593Smuzhiyun 
2057*4882a593Smuzhiyun config_qos:
2058*4882a593Smuzhiyun 	status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
2059*4882a593Smuzhiyun 	if (status)
2060*4882a593Smuzhiyun 		goto err;
2061*4882a593Smuzhiyun 
2062*4882a593Smuzhiyun 	adapter->vf_cfg[vf].tx_rate = max_tx_rate;
2063*4882a593Smuzhiyun 	return 0;
2064*4882a593Smuzhiyun 
2065*4882a593Smuzhiyun err:
2066*4882a593Smuzhiyun 	dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
2067*4882a593Smuzhiyun 		max_tx_rate, vf);
2068*4882a593Smuzhiyun 	return be_cmd_status(status);
2069*4882a593Smuzhiyun }
2070*4882a593Smuzhiyun 
be_set_vf_link_state(struct net_device * netdev,int vf,int link_state)2071*4882a593Smuzhiyun static int be_set_vf_link_state(struct net_device *netdev, int vf,
2072*4882a593Smuzhiyun 				int link_state)
2073*4882a593Smuzhiyun {
2074*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
2075*4882a593Smuzhiyun 	int status;
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
2078*4882a593Smuzhiyun 		return -EPERM;
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 	if (vf >= adapter->num_vfs)
2081*4882a593Smuzhiyun 		return -EINVAL;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
2084*4882a593Smuzhiyun 	if (status) {
2085*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
2086*4882a593Smuzhiyun 			"Link state change on VF %d failed: %#x\n", vf, status);
2087*4882a593Smuzhiyun 		return be_cmd_status(status);
2088*4882a593Smuzhiyun 	}
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 	adapter->vf_cfg[vf].plink_tracking = link_state;
2091*4882a593Smuzhiyun 
2092*4882a593Smuzhiyun 	return 0;
2093*4882a593Smuzhiyun }
2094*4882a593Smuzhiyun 
be_set_vf_spoofchk(struct net_device * netdev,int vf,bool enable)2095*4882a593Smuzhiyun static int be_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
2096*4882a593Smuzhiyun {
2097*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
2098*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
2099*4882a593Smuzhiyun 	u8 spoofchk;
2100*4882a593Smuzhiyun 	int status;
2101*4882a593Smuzhiyun 
2102*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
2103*4882a593Smuzhiyun 		return -EPERM;
2104*4882a593Smuzhiyun 
2105*4882a593Smuzhiyun 	if (vf >= adapter->num_vfs)
2106*4882a593Smuzhiyun 		return -EINVAL;
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 	if (BEx_chip(adapter))
2109*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	if (enable == vf_cfg->spoofchk)
2112*4882a593Smuzhiyun 		return 0;
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	spoofchk = enable ? ENABLE_MAC_SPOOFCHK : DISABLE_MAC_SPOOFCHK;
2115*4882a593Smuzhiyun 
2116*4882a593Smuzhiyun 	status = be_cmd_set_hsw_config(adapter, 0, vf + 1, vf_cfg->if_handle,
2117*4882a593Smuzhiyun 				       0, spoofchk);
2118*4882a593Smuzhiyun 	if (status) {
2119*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
2120*4882a593Smuzhiyun 			"Spoofchk change on VF %d failed: %#x\n", vf, status);
2121*4882a593Smuzhiyun 		return be_cmd_status(status);
2122*4882a593Smuzhiyun 	}
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	vf_cfg->spoofchk = enable;
2125*4882a593Smuzhiyun 	return 0;
2126*4882a593Smuzhiyun }
2127*4882a593Smuzhiyun 
be_aic_update(struct be_aic_obj * aic,u64 rx_pkts,u64 tx_pkts,ulong now)2128*4882a593Smuzhiyun static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
2129*4882a593Smuzhiyun 			  ulong now)
2130*4882a593Smuzhiyun {
2131*4882a593Smuzhiyun 	aic->rx_pkts_prev = rx_pkts;
2132*4882a593Smuzhiyun 	aic->tx_reqs_prev = tx_pkts;
2133*4882a593Smuzhiyun 	aic->jiffies = now;
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
be_get_new_eqd(struct be_eq_obj * eqo)2136*4882a593Smuzhiyun static int be_get_new_eqd(struct be_eq_obj *eqo)
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun 	struct be_adapter *adapter = eqo->adapter;
2139*4882a593Smuzhiyun 	int eqd, start;
2140*4882a593Smuzhiyun 	struct be_aic_obj *aic;
2141*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
2142*4882a593Smuzhiyun 	struct be_tx_obj *txo;
2143*4882a593Smuzhiyun 	u64 rx_pkts = 0, tx_pkts = 0;
2144*4882a593Smuzhiyun 	ulong now;
2145*4882a593Smuzhiyun 	u32 pps, delta;
2146*4882a593Smuzhiyun 	int i;
2147*4882a593Smuzhiyun 
2148*4882a593Smuzhiyun 	aic = &adapter->aic_obj[eqo->idx];
2149*4882a593Smuzhiyun 	if (!adapter->aic_enabled) {
2150*4882a593Smuzhiyun 		if (aic->jiffies)
2151*4882a593Smuzhiyun 			aic->jiffies = 0;
2152*4882a593Smuzhiyun 		eqd = aic->et_eqd;
2153*4882a593Smuzhiyun 		return eqd;
2154*4882a593Smuzhiyun 	}
2155*4882a593Smuzhiyun 
2156*4882a593Smuzhiyun 	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2157*4882a593Smuzhiyun 		do {
2158*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
2159*4882a593Smuzhiyun 			rx_pkts += rxo->stats.rx_pkts;
2160*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
2161*4882a593Smuzhiyun 	}
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 	for_all_tx_queues_on_eq(adapter, eqo, txo, i) {
2164*4882a593Smuzhiyun 		do {
2165*4882a593Smuzhiyun 			start = u64_stats_fetch_begin_irq(&txo->stats.sync);
2166*4882a593Smuzhiyun 			tx_pkts += txo->stats.tx_reqs;
2167*4882a593Smuzhiyun 		} while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
2168*4882a593Smuzhiyun 	}
2169*4882a593Smuzhiyun 
2170*4882a593Smuzhiyun 	/* Skip, if wrapped around or first calculation */
2171*4882a593Smuzhiyun 	now = jiffies;
2172*4882a593Smuzhiyun 	if (!aic->jiffies || time_before(now, aic->jiffies) ||
2173*4882a593Smuzhiyun 	    rx_pkts < aic->rx_pkts_prev ||
2174*4882a593Smuzhiyun 	    tx_pkts < aic->tx_reqs_prev) {
2175*4882a593Smuzhiyun 		be_aic_update(aic, rx_pkts, tx_pkts, now);
2176*4882a593Smuzhiyun 		return aic->prev_eqd;
2177*4882a593Smuzhiyun 	}
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	delta = jiffies_to_msecs(now - aic->jiffies);
2180*4882a593Smuzhiyun 	if (delta == 0)
2181*4882a593Smuzhiyun 		return aic->prev_eqd;
2182*4882a593Smuzhiyun 
2183*4882a593Smuzhiyun 	pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
2184*4882a593Smuzhiyun 		(((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
2185*4882a593Smuzhiyun 	eqd = (pps / 15000) << 2;
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	if (eqd < 8)
2188*4882a593Smuzhiyun 		eqd = 0;
2189*4882a593Smuzhiyun 	eqd = min_t(u32, eqd, aic->max_eqd);
2190*4882a593Smuzhiyun 	eqd = max_t(u32, eqd, aic->min_eqd);
2191*4882a593Smuzhiyun 
2192*4882a593Smuzhiyun 	be_aic_update(aic, rx_pkts, tx_pkts, now);
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	return eqd;
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun /* For Skyhawk-R only */
be_get_eq_delay_mult_enc(struct be_eq_obj * eqo)2198*4882a593Smuzhiyun static u32 be_get_eq_delay_mult_enc(struct be_eq_obj *eqo)
2199*4882a593Smuzhiyun {
2200*4882a593Smuzhiyun 	struct be_adapter *adapter = eqo->adapter;
2201*4882a593Smuzhiyun 	struct be_aic_obj *aic = &adapter->aic_obj[eqo->idx];
2202*4882a593Smuzhiyun 	ulong now = jiffies;
2203*4882a593Smuzhiyun 	int eqd;
2204*4882a593Smuzhiyun 	u32 mult_enc;
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	if (!adapter->aic_enabled)
2207*4882a593Smuzhiyun 		return 0;
2208*4882a593Smuzhiyun 
2209*4882a593Smuzhiyun 	if (jiffies_to_msecs(now - aic->jiffies) < 1)
2210*4882a593Smuzhiyun 		eqd = aic->prev_eqd;
2211*4882a593Smuzhiyun 	else
2212*4882a593Smuzhiyun 		eqd = be_get_new_eqd(eqo);
2213*4882a593Smuzhiyun 
2214*4882a593Smuzhiyun 	if (eqd > 100)
2215*4882a593Smuzhiyun 		mult_enc = R2I_DLY_ENC_1;
2216*4882a593Smuzhiyun 	else if (eqd > 60)
2217*4882a593Smuzhiyun 		mult_enc = R2I_DLY_ENC_2;
2218*4882a593Smuzhiyun 	else if (eqd > 20)
2219*4882a593Smuzhiyun 		mult_enc = R2I_DLY_ENC_3;
2220*4882a593Smuzhiyun 	else
2221*4882a593Smuzhiyun 		mult_enc = R2I_DLY_ENC_0;
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	aic->prev_eqd = eqd;
2224*4882a593Smuzhiyun 
2225*4882a593Smuzhiyun 	return mult_enc;
2226*4882a593Smuzhiyun }
2227*4882a593Smuzhiyun 
be_eqd_update(struct be_adapter * adapter,bool force_update)2228*4882a593Smuzhiyun void be_eqd_update(struct be_adapter *adapter, bool force_update)
2229*4882a593Smuzhiyun {
2230*4882a593Smuzhiyun 	struct be_set_eqd set_eqd[MAX_EVT_QS];
2231*4882a593Smuzhiyun 	struct be_aic_obj *aic;
2232*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
2233*4882a593Smuzhiyun 	int i, num = 0, eqd;
2234*4882a593Smuzhiyun 
2235*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
2236*4882a593Smuzhiyun 		aic = &adapter->aic_obj[eqo->idx];
2237*4882a593Smuzhiyun 		eqd = be_get_new_eqd(eqo);
2238*4882a593Smuzhiyun 		if (force_update || eqd != aic->prev_eqd) {
2239*4882a593Smuzhiyun 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
2240*4882a593Smuzhiyun 			set_eqd[num].eq_id = eqo->q.id;
2241*4882a593Smuzhiyun 			aic->prev_eqd = eqd;
2242*4882a593Smuzhiyun 			num++;
2243*4882a593Smuzhiyun 		}
2244*4882a593Smuzhiyun 	}
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	if (num)
2247*4882a593Smuzhiyun 		be_cmd_modify_eqd(adapter, set_eqd, num);
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun 
be_rx_stats_update(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)2250*4882a593Smuzhiyun static void be_rx_stats_update(struct be_rx_obj *rxo,
2251*4882a593Smuzhiyun 			       struct be_rx_compl_info *rxcp)
2252*4882a593Smuzhiyun {
2253*4882a593Smuzhiyun 	struct be_rx_stats *stats = rx_stats(rxo);
2254*4882a593Smuzhiyun 
2255*4882a593Smuzhiyun 	u64_stats_update_begin(&stats->sync);
2256*4882a593Smuzhiyun 	stats->rx_compl++;
2257*4882a593Smuzhiyun 	stats->rx_bytes += rxcp->pkt_size;
2258*4882a593Smuzhiyun 	stats->rx_pkts++;
2259*4882a593Smuzhiyun 	if (rxcp->tunneled)
2260*4882a593Smuzhiyun 		stats->rx_vxlan_offload_pkts++;
2261*4882a593Smuzhiyun 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
2262*4882a593Smuzhiyun 		stats->rx_mcast_pkts++;
2263*4882a593Smuzhiyun 	if (rxcp->err)
2264*4882a593Smuzhiyun 		stats->rx_compl_err++;
2265*4882a593Smuzhiyun 	u64_stats_update_end(&stats->sync);
2266*4882a593Smuzhiyun }
2267*4882a593Smuzhiyun 
csum_passed(struct be_rx_compl_info * rxcp)2268*4882a593Smuzhiyun static inline bool csum_passed(struct be_rx_compl_info *rxcp)
2269*4882a593Smuzhiyun {
2270*4882a593Smuzhiyun 	/* L4 checksum is not reliable for non TCP/UDP packets.
2271*4882a593Smuzhiyun 	 * Also ignore ipcksm for ipv6 pkts
2272*4882a593Smuzhiyun 	 */
2273*4882a593Smuzhiyun 	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
2274*4882a593Smuzhiyun 		(rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
2275*4882a593Smuzhiyun }
2276*4882a593Smuzhiyun 
get_rx_page_info(struct be_rx_obj * rxo)2277*4882a593Smuzhiyun static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
2278*4882a593Smuzhiyun {
2279*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
2280*4882a593Smuzhiyun 	struct be_rx_page_info *rx_page_info;
2281*4882a593Smuzhiyun 	struct be_queue_info *rxq = &rxo->q;
2282*4882a593Smuzhiyun 	u32 frag_idx = rxq->tail;
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	rx_page_info = &rxo->page_info_tbl[frag_idx];
2285*4882a593Smuzhiyun 	BUG_ON(!rx_page_info->page);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	if (rx_page_info->last_frag) {
2288*4882a593Smuzhiyun 		dma_unmap_page(&adapter->pdev->dev,
2289*4882a593Smuzhiyun 			       dma_unmap_addr(rx_page_info, bus),
2290*4882a593Smuzhiyun 			       adapter->big_page_size, DMA_FROM_DEVICE);
2291*4882a593Smuzhiyun 		rx_page_info->last_frag = false;
2292*4882a593Smuzhiyun 	} else {
2293*4882a593Smuzhiyun 		dma_sync_single_for_cpu(&adapter->pdev->dev,
2294*4882a593Smuzhiyun 					dma_unmap_addr(rx_page_info, bus),
2295*4882a593Smuzhiyun 					rx_frag_size, DMA_FROM_DEVICE);
2296*4882a593Smuzhiyun 	}
2297*4882a593Smuzhiyun 
2298*4882a593Smuzhiyun 	queue_tail_inc(rxq);
2299*4882a593Smuzhiyun 	atomic_dec(&rxq->used);
2300*4882a593Smuzhiyun 	return rx_page_info;
2301*4882a593Smuzhiyun }
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun /* Throwaway the data in the Rx completion */
be_rx_compl_discard(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)2304*4882a593Smuzhiyun static void be_rx_compl_discard(struct be_rx_obj *rxo,
2305*4882a593Smuzhiyun 				struct be_rx_compl_info *rxcp)
2306*4882a593Smuzhiyun {
2307*4882a593Smuzhiyun 	struct be_rx_page_info *page_info;
2308*4882a593Smuzhiyun 	u16 i, num_rcvd = rxcp->num_rcvd;
2309*4882a593Smuzhiyun 
2310*4882a593Smuzhiyun 	for (i = 0; i < num_rcvd; i++) {
2311*4882a593Smuzhiyun 		page_info = get_rx_page_info(rxo);
2312*4882a593Smuzhiyun 		put_page(page_info->page);
2313*4882a593Smuzhiyun 		memset(page_info, 0, sizeof(*page_info));
2314*4882a593Smuzhiyun 	}
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun /*
2318*4882a593Smuzhiyun  * skb_fill_rx_data forms a complete skb for an ether frame
2319*4882a593Smuzhiyun  * indicated by rxcp.
2320*4882a593Smuzhiyun  */
skb_fill_rx_data(struct be_rx_obj * rxo,struct sk_buff * skb,struct be_rx_compl_info * rxcp)2321*4882a593Smuzhiyun static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
2322*4882a593Smuzhiyun 			     struct be_rx_compl_info *rxcp)
2323*4882a593Smuzhiyun {
2324*4882a593Smuzhiyun 	struct be_rx_page_info *page_info;
2325*4882a593Smuzhiyun 	u16 i, j;
2326*4882a593Smuzhiyun 	u16 hdr_len, curr_frag_len, remaining;
2327*4882a593Smuzhiyun 	u8 *start;
2328*4882a593Smuzhiyun 
2329*4882a593Smuzhiyun 	page_info = get_rx_page_info(rxo);
2330*4882a593Smuzhiyun 	start = page_address(page_info->page) + page_info->page_offset;
2331*4882a593Smuzhiyun 	prefetch(start);
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	/* Copy data in the first descriptor of this completion */
2334*4882a593Smuzhiyun 	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	skb->len = curr_frag_len;
2337*4882a593Smuzhiyun 	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
2338*4882a593Smuzhiyun 		memcpy(skb->data, start, curr_frag_len);
2339*4882a593Smuzhiyun 		/* Complete packet has now been moved to data */
2340*4882a593Smuzhiyun 		put_page(page_info->page);
2341*4882a593Smuzhiyun 		skb->data_len = 0;
2342*4882a593Smuzhiyun 		skb->tail += curr_frag_len;
2343*4882a593Smuzhiyun 	} else {
2344*4882a593Smuzhiyun 		hdr_len = ETH_HLEN;
2345*4882a593Smuzhiyun 		memcpy(skb->data, start, hdr_len);
2346*4882a593Smuzhiyun 		skb_shinfo(skb)->nr_frags = 1;
2347*4882a593Smuzhiyun 		skb_frag_set_page(skb, 0, page_info->page);
2348*4882a593Smuzhiyun 		skb_frag_off_set(&skb_shinfo(skb)->frags[0],
2349*4882a593Smuzhiyun 				 page_info->page_offset + hdr_len);
2350*4882a593Smuzhiyun 		skb_frag_size_set(&skb_shinfo(skb)->frags[0],
2351*4882a593Smuzhiyun 				  curr_frag_len - hdr_len);
2352*4882a593Smuzhiyun 		skb->data_len = curr_frag_len - hdr_len;
2353*4882a593Smuzhiyun 		skb->truesize += rx_frag_size;
2354*4882a593Smuzhiyun 		skb->tail += hdr_len;
2355*4882a593Smuzhiyun 	}
2356*4882a593Smuzhiyun 	page_info->page = NULL;
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun 	if (rxcp->pkt_size <= rx_frag_size) {
2359*4882a593Smuzhiyun 		BUG_ON(rxcp->num_rcvd != 1);
2360*4882a593Smuzhiyun 		return;
2361*4882a593Smuzhiyun 	}
2362*4882a593Smuzhiyun 
2363*4882a593Smuzhiyun 	/* More frags present for this completion */
2364*4882a593Smuzhiyun 	remaining = rxcp->pkt_size - curr_frag_len;
2365*4882a593Smuzhiyun 	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
2366*4882a593Smuzhiyun 		page_info = get_rx_page_info(rxo);
2367*4882a593Smuzhiyun 		curr_frag_len = min(remaining, rx_frag_size);
2368*4882a593Smuzhiyun 
2369*4882a593Smuzhiyun 		/* Coalesce all frags from the same physical page in one slot */
2370*4882a593Smuzhiyun 		if (page_info->page_offset == 0) {
2371*4882a593Smuzhiyun 			/* Fresh page */
2372*4882a593Smuzhiyun 			j++;
2373*4882a593Smuzhiyun 			skb_frag_set_page(skb, j, page_info->page);
2374*4882a593Smuzhiyun 			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2375*4882a593Smuzhiyun 					 page_info->page_offset);
2376*4882a593Smuzhiyun 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2377*4882a593Smuzhiyun 			skb_shinfo(skb)->nr_frags++;
2378*4882a593Smuzhiyun 		} else {
2379*4882a593Smuzhiyun 			put_page(page_info->page);
2380*4882a593Smuzhiyun 		}
2381*4882a593Smuzhiyun 
2382*4882a593Smuzhiyun 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2383*4882a593Smuzhiyun 		skb->len += curr_frag_len;
2384*4882a593Smuzhiyun 		skb->data_len += curr_frag_len;
2385*4882a593Smuzhiyun 		skb->truesize += rx_frag_size;
2386*4882a593Smuzhiyun 		remaining -= curr_frag_len;
2387*4882a593Smuzhiyun 		page_info->page = NULL;
2388*4882a593Smuzhiyun 	}
2389*4882a593Smuzhiyun 	BUG_ON(j > MAX_SKB_FRAGS);
2390*4882a593Smuzhiyun }
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun /* Process the RX completion indicated by rxcp when GRO is disabled */
be_rx_compl_process(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)2393*4882a593Smuzhiyun static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
2394*4882a593Smuzhiyun 				struct be_rx_compl_info *rxcp)
2395*4882a593Smuzhiyun {
2396*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
2397*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
2398*4882a593Smuzhiyun 	struct sk_buff *skb;
2399*4882a593Smuzhiyun 
2400*4882a593Smuzhiyun 	skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
2401*4882a593Smuzhiyun 	if (unlikely(!skb)) {
2402*4882a593Smuzhiyun 		rx_stats(rxo)->rx_drops_no_skbs++;
2403*4882a593Smuzhiyun 		be_rx_compl_discard(rxo, rxcp);
2404*4882a593Smuzhiyun 		return;
2405*4882a593Smuzhiyun 	}
2406*4882a593Smuzhiyun 
2407*4882a593Smuzhiyun 	skb_fill_rx_data(rxo, skb, rxcp);
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
2410*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2411*4882a593Smuzhiyun 	else
2412*4882a593Smuzhiyun 		skb_checksum_none_assert(skb);
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, netdev);
2415*4882a593Smuzhiyun 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2416*4882a593Smuzhiyun 	if (netdev->features & NETIF_F_RXHASH)
2417*4882a593Smuzhiyun 		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2418*4882a593Smuzhiyun 
2419*4882a593Smuzhiyun 	skb->csum_level = rxcp->tunneled;
2420*4882a593Smuzhiyun 	skb_mark_napi_id(skb, napi);
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun 	if (rxcp->vlanf)
2423*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2424*4882a593Smuzhiyun 
2425*4882a593Smuzhiyun 	netif_receive_skb(skb);
2426*4882a593Smuzhiyun }
2427*4882a593Smuzhiyun 
2428*4882a593Smuzhiyun /* Process the RX completion indicated by rxcp when GRO is enabled */
be_rx_compl_process_gro(struct be_rx_obj * rxo,struct napi_struct * napi,struct be_rx_compl_info * rxcp)2429*4882a593Smuzhiyun static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
2430*4882a593Smuzhiyun 				    struct napi_struct *napi,
2431*4882a593Smuzhiyun 				    struct be_rx_compl_info *rxcp)
2432*4882a593Smuzhiyun {
2433*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
2434*4882a593Smuzhiyun 	struct be_rx_page_info *page_info;
2435*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
2436*4882a593Smuzhiyun 	u16 remaining, curr_frag_len;
2437*4882a593Smuzhiyun 	u16 i, j;
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	skb = napi_get_frags(napi);
2440*4882a593Smuzhiyun 	if (!skb) {
2441*4882a593Smuzhiyun 		be_rx_compl_discard(rxo, rxcp);
2442*4882a593Smuzhiyun 		return;
2443*4882a593Smuzhiyun 	}
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	remaining = rxcp->pkt_size;
2446*4882a593Smuzhiyun 	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
2447*4882a593Smuzhiyun 		page_info = get_rx_page_info(rxo);
2448*4882a593Smuzhiyun 
2449*4882a593Smuzhiyun 		curr_frag_len = min(remaining, rx_frag_size);
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 		/* Coalesce all frags from the same physical page in one slot */
2452*4882a593Smuzhiyun 		if (i == 0 || page_info->page_offset == 0) {
2453*4882a593Smuzhiyun 			/* First frag or Fresh page */
2454*4882a593Smuzhiyun 			j++;
2455*4882a593Smuzhiyun 			skb_frag_set_page(skb, j, page_info->page);
2456*4882a593Smuzhiyun 			skb_frag_off_set(&skb_shinfo(skb)->frags[j],
2457*4882a593Smuzhiyun 					 page_info->page_offset);
2458*4882a593Smuzhiyun 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
2459*4882a593Smuzhiyun 		} else {
2460*4882a593Smuzhiyun 			put_page(page_info->page);
2461*4882a593Smuzhiyun 		}
2462*4882a593Smuzhiyun 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
2463*4882a593Smuzhiyun 		skb->truesize += rx_frag_size;
2464*4882a593Smuzhiyun 		remaining -= curr_frag_len;
2465*4882a593Smuzhiyun 		memset(page_info, 0, sizeof(*page_info));
2466*4882a593Smuzhiyun 	}
2467*4882a593Smuzhiyun 	BUG_ON(j > MAX_SKB_FRAGS);
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	skb_shinfo(skb)->nr_frags = j + 1;
2470*4882a593Smuzhiyun 	skb->len = rxcp->pkt_size;
2471*4882a593Smuzhiyun 	skb->data_len = rxcp->pkt_size;
2472*4882a593Smuzhiyun 	skb->ip_summed = CHECKSUM_UNNECESSARY;
2473*4882a593Smuzhiyun 	skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
2474*4882a593Smuzhiyun 	if (adapter->netdev->features & NETIF_F_RXHASH)
2475*4882a593Smuzhiyun 		skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	skb->csum_level = rxcp->tunneled;
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	if (rxcp->vlanf)
2480*4882a593Smuzhiyun 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
2481*4882a593Smuzhiyun 
2482*4882a593Smuzhiyun 	napi_gro_frags(napi);
2483*4882a593Smuzhiyun }
2484*4882a593Smuzhiyun 
be_parse_rx_compl_v1(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)2485*4882a593Smuzhiyun static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
2486*4882a593Smuzhiyun 				 struct be_rx_compl_info *rxcp)
2487*4882a593Smuzhiyun {
2488*4882a593Smuzhiyun 	rxcp->pkt_size = GET_RX_COMPL_V1_BITS(pktsize, compl);
2489*4882a593Smuzhiyun 	rxcp->vlanf = GET_RX_COMPL_V1_BITS(vtp, compl);
2490*4882a593Smuzhiyun 	rxcp->err = GET_RX_COMPL_V1_BITS(err, compl);
2491*4882a593Smuzhiyun 	rxcp->tcpf = GET_RX_COMPL_V1_BITS(tcpf, compl);
2492*4882a593Smuzhiyun 	rxcp->udpf = GET_RX_COMPL_V1_BITS(udpf, compl);
2493*4882a593Smuzhiyun 	rxcp->ip_csum = GET_RX_COMPL_V1_BITS(ipcksm, compl);
2494*4882a593Smuzhiyun 	rxcp->l4_csum = GET_RX_COMPL_V1_BITS(l4_cksm, compl);
2495*4882a593Smuzhiyun 	rxcp->ipv6 = GET_RX_COMPL_V1_BITS(ip_version, compl);
2496*4882a593Smuzhiyun 	rxcp->num_rcvd = GET_RX_COMPL_V1_BITS(numfrags, compl);
2497*4882a593Smuzhiyun 	rxcp->pkt_type = GET_RX_COMPL_V1_BITS(cast_enc, compl);
2498*4882a593Smuzhiyun 	rxcp->rss_hash = GET_RX_COMPL_V1_BITS(rsshash, compl);
2499*4882a593Smuzhiyun 	if (rxcp->vlanf) {
2500*4882a593Smuzhiyun 		rxcp->qnq = GET_RX_COMPL_V1_BITS(qnq, compl);
2501*4882a593Smuzhiyun 		rxcp->vlan_tag = GET_RX_COMPL_V1_BITS(vlan_tag, compl);
2502*4882a593Smuzhiyun 	}
2503*4882a593Smuzhiyun 	rxcp->port = GET_RX_COMPL_V1_BITS(port, compl);
2504*4882a593Smuzhiyun 	rxcp->tunneled =
2505*4882a593Smuzhiyun 		GET_RX_COMPL_V1_BITS(tunneled, compl);
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun 
be_parse_rx_compl_v0(struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)2508*4882a593Smuzhiyun static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
2509*4882a593Smuzhiyun 				 struct be_rx_compl_info *rxcp)
2510*4882a593Smuzhiyun {
2511*4882a593Smuzhiyun 	rxcp->pkt_size = GET_RX_COMPL_V0_BITS(pktsize, compl);
2512*4882a593Smuzhiyun 	rxcp->vlanf = GET_RX_COMPL_V0_BITS(vtp, compl);
2513*4882a593Smuzhiyun 	rxcp->err = GET_RX_COMPL_V0_BITS(err, compl);
2514*4882a593Smuzhiyun 	rxcp->tcpf = GET_RX_COMPL_V0_BITS(tcpf, compl);
2515*4882a593Smuzhiyun 	rxcp->udpf = GET_RX_COMPL_V0_BITS(udpf, compl);
2516*4882a593Smuzhiyun 	rxcp->ip_csum = GET_RX_COMPL_V0_BITS(ipcksm, compl);
2517*4882a593Smuzhiyun 	rxcp->l4_csum = GET_RX_COMPL_V0_BITS(l4_cksm, compl);
2518*4882a593Smuzhiyun 	rxcp->ipv6 = GET_RX_COMPL_V0_BITS(ip_version, compl);
2519*4882a593Smuzhiyun 	rxcp->num_rcvd = GET_RX_COMPL_V0_BITS(numfrags, compl);
2520*4882a593Smuzhiyun 	rxcp->pkt_type = GET_RX_COMPL_V0_BITS(cast_enc, compl);
2521*4882a593Smuzhiyun 	rxcp->rss_hash = GET_RX_COMPL_V0_BITS(rsshash, compl);
2522*4882a593Smuzhiyun 	if (rxcp->vlanf) {
2523*4882a593Smuzhiyun 		rxcp->qnq = GET_RX_COMPL_V0_BITS(qnq, compl);
2524*4882a593Smuzhiyun 		rxcp->vlan_tag = GET_RX_COMPL_V0_BITS(vlan_tag, compl);
2525*4882a593Smuzhiyun 	}
2526*4882a593Smuzhiyun 	rxcp->port = GET_RX_COMPL_V0_BITS(port, compl);
2527*4882a593Smuzhiyun 	rxcp->ip_frag = GET_RX_COMPL_V0_BITS(ip_frag, compl);
2528*4882a593Smuzhiyun }
2529*4882a593Smuzhiyun 
be_rx_compl_get(struct be_rx_obj * rxo)2530*4882a593Smuzhiyun static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
2531*4882a593Smuzhiyun {
2532*4882a593Smuzhiyun 	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
2533*4882a593Smuzhiyun 	struct be_rx_compl_info *rxcp = &rxo->rxcp;
2534*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun 	/* For checking the valid bit it is Ok to use either definition as the
2537*4882a593Smuzhiyun 	 * valid bit is at the same position in both v0 and v1 Rx compl */
2538*4882a593Smuzhiyun 	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
2539*4882a593Smuzhiyun 		return NULL;
2540*4882a593Smuzhiyun 
2541*4882a593Smuzhiyun 	rmb();
2542*4882a593Smuzhiyun 	be_dws_le_to_cpu(compl, sizeof(*compl));
2543*4882a593Smuzhiyun 
2544*4882a593Smuzhiyun 	if (adapter->be3_native)
2545*4882a593Smuzhiyun 		be_parse_rx_compl_v1(compl, rxcp);
2546*4882a593Smuzhiyun 	else
2547*4882a593Smuzhiyun 		be_parse_rx_compl_v0(compl, rxcp);
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	if (rxcp->ip_frag)
2550*4882a593Smuzhiyun 		rxcp->l4_csum = 0;
2551*4882a593Smuzhiyun 
2552*4882a593Smuzhiyun 	if (rxcp->vlanf) {
2553*4882a593Smuzhiyun 		/* In QNQ modes, if qnq bit is not set, then the packet was
2554*4882a593Smuzhiyun 		 * tagged only with the transparent outer vlan-tag and must
2555*4882a593Smuzhiyun 		 * not be treated as a vlan packet by host
2556*4882a593Smuzhiyun 		 */
2557*4882a593Smuzhiyun 		if (be_is_qnq_mode(adapter) && !rxcp->qnq)
2558*4882a593Smuzhiyun 			rxcp->vlanf = 0;
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 		if (!lancer_chip(adapter))
2561*4882a593Smuzhiyun 			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
2564*4882a593Smuzhiyun 		    !test_bit(rxcp->vlan_tag, adapter->vids))
2565*4882a593Smuzhiyun 			rxcp->vlanf = 0;
2566*4882a593Smuzhiyun 	}
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	/* As the compl has been parsed, reset it; we wont touch it again */
2569*4882a593Smuzhiyun 	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	queue_tail_inc(&rxo->cq);
2572*4882a593Smuzhiyun 	return rxcp;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun 
be_alloc_pages(u32 size,gfp_t gfp)2575*4882a593Smuzhiyun static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
2576*4882a593Smuzhiyun {
2577*4882a593Smuzhiyun 	u32 order = get_order(size);
2578*4882a593Smuzhiyun 
2579*4882a593Smuzhiyun 	if (order > 0)
2580*4882a593Smuzhiyun 		gfp |= __GFP_COMP;
2581*4882a593Smuzhiyun 	return  alloc_pages(gfp, order);
2582*4882a593Smuzhiyun }
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun /*
2585*4882a593Smuzhiyun  * Allocate a page, split it to fragments of size rx_frag_size and post as
2586*4882a593Smuzhiyun  * receive buffers to BE
2587*4882a593Smuzhiyun  */
be_post_rx_frags(struct be_rx_obj * rxo,gfp_t gfp,u32 frags_needed)2588*4882a593Smuzhiyun static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp, u32 frags_needed)
2589*4882a593Smuzhiyun {
2590*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
2591*4882a593Smuzhiyun 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
2592*4882a593Smuzhiyun 	struct be_queue_info *rxq = &rxo->q;
2593*4882a593Smuzhiyun 	struct page *pagep = NULL;
2594*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
2595*4882a593Smuzhiyun 	struct be_eth_rx_d *rxd;
2596*4882a593Smuzhiyun 	u64 page_dmaaddr = 0, frag_dmaaddr;
2597*4882a593Smuzhiyun 	u32 posted, page_offset = 0, notify = 0;
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	page_info = &rxo->page_info_tbl[rxq->head];
2600*4882a593Smuzhiyun 	for (posted = 0; posted < frags_needed && !page_info->page; posted++) {
2601*4882a593Smuzhiyun 		if (!pagep) {
2602*4882a593Smuzhiyun 			pagep = be_alloc_pages(adapter->big_page_size, gfp);
2603*4882a593Smuzhiyun 			if (unlikely(!pagep)) {
2604*4882a593Smuzhiyun 				rx_stats(rxo)->rx_post_fail++;
2605*4882a593Smuzhiyun 				break;
2606*4882a593Smuzhiyun 			}
2607*4882a593Smuzhiyun 			page_dmaaddr = dma_map_page(dev, pagep, 0,
2608*4882a593Smuzhiyun 						    adapter->big_page_size,
2609*4882a593Smuzhiyun 						    DMA_FROM_DEVICE);
2610*4882a593Smuzhiyun 			if (dma_mapping_error(dev, page_dmaaddr)) {
2611*4882a593Smuzhiyun 				put_page(pagep);
2612*4882a593Smuzhiyun 				pagep = NULL;
2613*4882a593Smuzhiyun 				adapter->drv_stats.dma_map_errors++;
2614*4882a593Smuzhiyun 				break;
2615*4882a593Smuzhiyun 			}
2616*4882a593Smuzhiyun 			page_offset = 0;
2617*4882a593Smuzhiyun 		} else {
2618*4882a593Smuzhiyun 			get_page(pagep);
2619*4882a593Smuzhiyun 			page_offset += rx_frag_size;
2620*4882a593Smuzhiyun 		}
2621*4882a593Smuzhiyun 		page_info->page_offset = page_offset;
2622*4882a593Smuzhiyun 		page_info->page = pagep;
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 		rxd = queue_head_node(rxq);
2625*4882a593Smuzhiyun 		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
2626*4882a593Smuzhiyun 		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
2627*4882a593Smuzhiyun 		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
2628*4882a593Smuzhiyun 
2629*4882a593Smuzhiyun 		/* Any space left in the current big page for another frag? */
2630*4882a593Smuzhiyun 		if ((page_offset + rx_frag_size + rx_frag_size) >
2631*4882a593Smuzhiyun 					adapter->big_page_size) {
2632*4882a593Smuzhiyun 			pagep = NULL;
2633*4882a593Smuzhiyun 			page_info->last_frag = true;
2634*4882a593Smuzhiyun 			dma_unmap_addr_set(page_info, bus, page_dmaaddr);
2635*4882a593Smuzhiyun 		} else {
2636*4882a593Smuzhiyun 			dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
2637*4882a593Smuzhiyun 		}
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 		prev_page_info = page_info;
2640*4882a593Smuzhiyun 		queue_head_inc(rxq);
2641*4882a593Smuzhiyun 		page_info = &rxo->page_info_tbl[rxq->head];
2642*4882a593Smuzhiyun 	}
2643*4882a593Smuzhiyun 
2644*4882a593Smuzhiyun 	/* Mark the last frag of a page when we break out of the above loop
2645*4882a593Smuzhiyun 	 * with no more slots available in the RXQ
2646*4882a593Smuzhiyun 	 */
2647*4882a593Smuzhiyun 	if (pagep) {
2648*4882a593Smuzhiyun 		prev_page_info->last_frag = true;
2649*4882a593Smuzhiyun 		dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
2650*4882a593Smuzhiyun 	}
2651*4882a593Smuzhiyun 
2652*4882a593Smuzhiyun 	if (posted) {
2653*4882a593Smuzhiyun 		atomic_add(posted, &rxq->used);
2654*4882a593Smuzhiyun 		if (rxo->rx_post_starved)
2655*4882a593Smuzhiyun 			rxo->rx_post_starved = false;
2656*4882a593Smuzhiyun 		do {
2657*4882a593Smuzhiyun 			notify = min(MAX_NUM_POST_ERX_DB, posted);
2658*4882a593Smuzhiyun 			be_rxq_notify(adapter, rxq->id, notify);
2659*4882a593Smuzhiyun 			posted -= notify;
2660*4882a593Smuzhiyun 		} while (posted);
2661*4882a593Smuzhiyun 	} else if (atomic_read(&rxq->used) == 0) {
2662*4882a593Smuzhiyun 		/* Let be_worker replenish when memory is available */
2663*4882a593Smuzhiyun 		rxo->rx_post_starved = true;
2664*4882a593Smuzhiyun 	}
2665*4882a593Smuzhiyun }
2666*4882a593Smuzhiyun 
be_update_tx_err(struct be_tx_obj * txo,u8 status)2667*4882a593Smuzhiyun static inline void be_update_tx_err(struct be_tx_obj *txo, u8 status)
2668*4882a593Smuzhiyun {
2669*4882a593Smuzhiyun 	switch (status) {
2670*4882a593Smuzhiyun 	case BE_TX_COMP_HDR_PARSE_ERR:
2671*4882a593Smuzhiyun 		tx_stats(txo)->tx_hdr_parse_err++;
2672*4882a593Smuzhiyun 		break;
2673*4882a593Smuzhiyun 	case BE_TX_COMP_NDMA_ERR:
2674*4882a593Smuzhiyun 		tx_stats(txo)->tx_dma_err++;
2675*4882a593Smuzhiyun 		break;
2676*4882a593Smuzhiyun 	case BE_TX_COMP_ACL_ERR:
2677*4882a593Smuzhiyun 		tx_stats(txo)->tx_spoof_check_err++;
2678*4882a593Smuzhiyun 		break;
2679*4882a593Smuzhiyun 	}
2680*4882a593Smuzhiyun }
2681*4882a593Smuzhiyun 
lancer_update_tx_err(struct be_tx_obj * txo,u8 status)2682*4882a593Smuzhiyun static inline void lancer_update_tx_err(struct be_tx_obj *txo, u8 status)
2683*4882a593Smuzhiyun {
2684*4882a593Smuzhiyun 	switch (status) {
2685*4882a593Smuzhiyun 	case LANCER_TX_COMP_LSO_ERR:
2686*4882a593Smuzhiyun 		tx_stats(txo)->tx_tso_err++;
2687*4882a593Smuzhiyun 		break;
2688*4882a593Smuzhiyun 	case LANCER_TX_COMP_HSW_DROP_MAC_ERR:
2689*4882a593Smuzhiyun 	case LANCER_TX_COMP_HSW_DROP_VLAN_ERR:
2690*4882a593Smuzhiyun 		tx_stats(txo)->tx_spoof_check_err++;
2691*4882a593Smuzhiyun 		break;
2692*4882a593Smuzhiyun 	case LANCER_TX_COMP_QINQ_ERR:
2693*4882a593Smuzhiyun 		tx_stats(txo)->tx_qinq_err++;
2694*4882a593Smuzhiyun 		break;
2695*4882a593Smuzhiyun 	case LANCER_TX_COMP_PARITY_ERR:
2696*4882a593Smuzhiyun 		tx_stats(txo)->tx_internal_parity_err++;
2697*4882a593Smuzhiyun 		break;
2698*4882a593Smuzhiyun 	case LANCER_TX_COMP_DMA_ERR:
2699*4882a593Smuzhiyun 		tx_stats(txo)->tx_dma_err++;
2700*4882a593Smuzhiyun 		break;
2701*4882a593Smuzhiyun 	case LANCER_TX_COMP_SGE_ERR:
2702*4882a593Smuzhiyun 		tx_stats(txo)->tx_sge_err++;
2703*4882a593Smuzhiyun 		break;
2704*4882a593Smuzhiyun 	}
2705*4882a593Smuzhiyun }
2706*4882a593Smuzhiyun 
be_tx_compl_get(struct be_adapter * adapter,struct be_tx_obj * txo)2707*4882a593Smuzhiyun static struct be_tx_compl_info *be_tx_compl_get(struct be_adapter *adapter,
2708*4882a593Smuzhiyun 						struct be_tx_obj *txo)
2709*4882a593Smuzhiyun {
2710*4882a593Smuzhiyun 	struct be_queue_info *tx_cq = &txo->cq;
2711*4882a593Smuzhiyun 	struct be_tx_compl_info *txcp = &txo->txcp;
2712*4882a593Smuzhiyun 	struct be_eth_tx_compl *compl = queue_tail_node(tx_cq);
2713*4882a593Smuzhiyun 
2714*4882a593Smuzhiyun 	if (compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
2715*4882a593Smuzhiyun 		return NULL;
2716*4882a593Smuzhiyun 
2717*4882a593Smuzhiyun 	/* Ensure load ordering of valid bit dword and other dwords below */
2718*4882a593Smuzhiyun 	rmb();
2719*4882a593Smuzhiyun 	be_dws_le_to_cpu(compl, sizeof(*compl));
2720*4882a593Smuzhiyun 
2721*4882a593Smuzhiyun 	txcp->status = GET_TX_COMPL_BITS(status, compl);
2722*4882a593Smuzhiyun 	txcp->end_index = GET_TX_COMPL_BITS(wrb_index, compl);
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun 	if (txcp->status) {
2725*4882a593Smuzhiyun 		if (lancer_chip(adapter)) {
2726*4882a593Smuzhiyun 			lancer_update_tx_err(txo, txcp->status);
2727*4882a593Smuzhiyun 			/* Reset the adapter incase of TSO,
2728*4882a593Smuzhiyun 			 * SGE or Parity error
2729*4882a593Smuzhiyun 			 */
2730*4882a593Smuzhiyun 			if (txcp->status == LANCER_TX_COMP_LSO_ERR ||
2731*4882a593Smuzhiyun 			    txcp->status == LANCER_TX_COMP_PARITY_ERR ||
2732*4882a593Smuzhiyun 			    txcp->status == LANCER_TX_COMP_SGE_ERR)
2733*4882a593Smuzhiyun 				be_set_error(adapter, BE_ERROR_TX);
2734*4882a593Smuzhiyun 		} else {
2735*4882a593Smuzhiyun 			be_update_tx_err(txo, txcp->status);
2736*4882a593Smuzhiyun 		}
2737*4882a593Smuzhiyun 	}
2738*4882a593Smuzhiyun 
2739*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_TX))
2740*4882a593Smuzhiyun 		return NULL;
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 	compl->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
2743*4882a593Smuzhiyun 	queue_tail_inc(tx_cq);
2744*4882a593Smuzhiyun 	return txcp;
2745*4882a593Smuzhiyun }
2746*4882a593Smuzhiyun 
be_tx_compl_process(struct be_adapter * adapter,struct be_tx_obj * txo,u16 last_index)2747*4882a593Smuzhiyun static u16 be_tx_compl_process(struct be_adapter *adapter,
2748*4882a593Smuzhiyun 			       struct be_tx_obj *txo, u16 last_index)
2749*4882a593Smuzhiyun {
2750*4882a593Smuzhiyun 	struct sk_buff **sent_skbs = txo->sent_skb_list;
2751*4882a593Smuzhiyun 	struct be_queue_info *txq = &txo->q;
2752*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
2753*4882a593Smuzhiyun 	bool unmap_skb_hdr = false;
2754*4882a593Smuzhiyun 	struct be_eth_wrb *wrb;
2755*4882a593Smuzhiyun 	u16 num_wrbs = 0;
2756*4882a593Smuzhiyun 	u32 frag_index;
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun 	do {
2759*4882a593Smuzhiyun 		if (sent_skbs[txq->tail]) {
2760*4882a593Smuzhiyun 			/* Free skb from prev req */
2761*4882a593Smuzhiyun 			if (skb)
2762*4882a593Smuzhiyun 				dev_consume_skb_any(skb);
2763*4882a593Smuzhiyun 			skb = sent_skbs[txq->tail];
2764*4882a593Smuzhiyun 			sent_skbs[txq->tail] = NULL;
2765*4882a593Smuzhiyun 			queue_tail_inc(txq);  /* skip hdr wrb */
2766*4882a593Smuzhiyun 			num_wrbs++;
2767*4882a593Smuzhiyun 			unmap_skb_hdr = true;
2768*4882a593Smuzhiyun 		}
2769*4882a593Smuzhiyun 		wrb = queue_tail_node(txq);
2770*4882a593Smuzhiyun 		frag_index = txq->tail;
2771*4882a593Smuzhiyun 		unmap_tx_frag(&adapter->pdev->dev, wrb,
2772*4882a593Smuzhiyun 			      (unmap_skb_hdr && skb_headlen(skb)));
2773*4882a593Smuzhiyun 		unmap_skb_hdr = false;
2774*4882a593Smuzhiyun 		queue_tail_inc(txq);
2775*4882a593Smuzhiyun 		num_wrbs++;
2776*4882a593Smuzhiyun 	} while (frag_index != last_index);
2777*4882a593Smuzhiyun 	dev_consume_skb_any(skb);
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 	return num_wrbs;
2780*4882a593Smuzhiyun }
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun /* Return the number of events in the event queue */
events_get(struct be_eq_obj * eqo)2783*4882a593Smuzhiyun static inline int events_get(struct be_eq_obj *eqo)
2784*4882a593Smuzhiyun {
2785*4882a593Smuzhiyun 	struct be_eq_entry *eqe;
2786*4882a593Smuzhiyun 	int num = 0;
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	do {
2789*4882a593Smuzhiyun 		eqe = queue_tail_node(&eqo->q);
2790*4882a593Smuzhiyun 		if (eqe->evt == 0)
2791*4882a593Smuzhiyun 			break;
2792*4882a593Smuzhiyun 
2793*4882a593Smuzhiyun 		rmb();
2794*4882a593Smuzhiyun 		eqe->evt = 0;
2795*4882a593Smuzhiyun 		num++;
2796*4882a593Smuzhiyun 		queue_tail_inc(&eqo->q);
2797*4882a593Smuzhiyun 	} while (true);
2798*4882a593Smuzhiyun 
2799*4882a593Smuzhiyun 	return num;
2800*4882a593Smuzhiyun }
2801*4882a593Smuzhiyun 
2802*4882a593Smuzhiyun /* Leaves the EQ is disarmed state */
be_eq_clean(struct be_eq_obj * eqo)2803*4882a593Smuzhiyun static void be_eq_clean(struct be_eq_obj *eqo)
2804*4882a593Smuzhiyun {
2805*4882a593Smuzhiyun 	int num = events_get(eqo);
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun 	be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2808*4882a593Smuzhiyun }
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun /* Free posted rx buffers that were not used */
be_rxq_clean(struct be_rx_obj * rxo)2811*4882a593Smuzhiyun static void be_rxq_clean(struct be_rx_obj *rxo)
2812*4882a593Smuzhiyun {
2813*4882a593Smuzhiyun 	struct be_queue_info *rxq = &rxo->q;
2814*4882a593Smuzhiyun 	struct be_rx_page_info *page_info;
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	while (atomic_read(&rxq->used) > 0) {
2817*4882a593Smuzhiyun 		page_info = get_rx_page_info(rxo);
2818*4882a593Smuzhiyun 		put_page(page_info->page);
2819*4882a593Smuzhiyun 		memset(page_info, 0, sizeof(*page_info));
2820*4882a593Smuzhiyun 	}
2821*4882a593Smuzhiyun 	BUG_ON(atomic_read(&rxq->used));
2822*4882a593Smuzhiyun 	rxq->tail = 0;
2823*4882a593Smuzhiyun 	rxq->head = 0;
2824*4882a593Smuzhiyun }
2825*4882a593Smuzhiyun 
be_rx_cq_clean(struct be_rx_obj * rxo)2826*4882a593Smuzhiyun static void be_rx_cq_clean(struct be_rx_obj *rxo)
2827*4882a593Smuzhiyun {
2828*4882a593Smuzhiyun 	struct be_queue_info *rx_cq = &rxo->cq;
2829*4882a593Smuzhiyun 	struct be_rx_compl_info *rxcp;
2830*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
2831*4882a593Smuzhiyun 	int flush_wait = 0;
2832*4882a593Smuzhiyun 
2833*4882a593Smuzhiyun 	/* Consume pending rx completions.
2834*4882a593Smuzhiyun 	 * Wait for the flush completion (identified by zero num_rcvd)
2835*4882a593Smuzhiyun 	 * to arrive. Notify CQ even when there are no more CQ entries
2836*4882a593Smuzhiyun 	 * for HW to flush partially coalesced CQ entries.
2837*4882a593Smuzhiyun 	 * In Lancer, there is no need to wait for flush compl.
2838*4882a593Smuzhiyun 	 */
2839*4882a593Smuzhiyun 	for (;;) {
2840*4882a593Smuzhiyun 		rxcp = be_rx_compl_get(rxo);
2841*4882a593Smuzhiyun 		if (!rxcp) {
2842*4882a593Smuzhiyun 			if (lancer_chip(adapter))
2843*4882a593Smuzhiyun 				break;
2844*4882a593Smuzhiyun 
2845*4882a593Smuzhiyun 			if (flush_wait++ > 50 ||
2846*4882a593Smuzhiyun 			    be_check_error(adapter,
2847*4882a593Smuzhiyun 					   BE_ERROR_HW)) {
2848*4882a593Smuzhiyun 				dev_warn(&adapter->pdev->dev,
2849*4882a593Smuzhiyun 					 "did not receive flush compl\n");
2850*4882a593Smuzhiyun 				break;
2851*4882a593Smuzhiyun 			}
2852*4882a593Smuzhiyun 			be_cq_notify(adapter, rx_cq->id, true, 0);
2853*4882a593Smuzhiyun 			mdelay(1);
2854*4882a593Smuzhiyun 		} else {
2855*4882a593Smuzhiyun 			be_rx_compl_discard(rxo, rxcp);
2856*4882a593Smuzhiyun 			be_cq_notify(adapter, rx_cq->id, false, 1);
2857*4882a593Smuzhiyun 			if (rxcp->num_rcvd == 0)
2858*4882a593Smuzhiyun 				break;
2859*4882a593Smuzhiyun 		}
2860*4882a593Smuzhiyun 	}
2861*4882a593Smuzhiyun 
2862*4882a593Smuzhiyun 	/* After cleanup, leave the CQ in unarmed state */
2863*4882a593Smuzhiyun 	be_cq_notify(adapter, rx_cq->id, false, 0);
2864*4882a593Smuzhiyun }
2865*4882a593Smuzhiyun 
be_tx_compl_clean(struct be_adapter * adapter)2866*4882a593Smuzhiyun static void be_tx_compl_clean(struct be_adapter *adapter)
2867*4882a593Smuzhiyun {
2868*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
2869*4882a593Smuzhiyun 	u16 cmpl = 0, timeo = 0, num_wrbs = 0;
2870*4882a593Smuzhiyun 	struct be_tx_compl_info *txcp;
2871*4882a593Smuzhiyun 	struct be_queue_info *txq;
2872*4882a593Smuzhiyun 	u32 end_idx, notified_idx;
2873*4882a593Smuzhiyun 	struct be_tx_obj *txo;
2874*4882a593Smuzhiyun 	int i, pending_txqs;
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	/* Stop polling for compls when HW has been silent for 10ms */
2877*4882a593Smuzhiyun 	do {
2878*4882a593Smuzhiyun 		pending_txqs = adapter->num_tx_qs;
2879*4882a593Smuzhiyun 
2880*4882a593Smuzhiyun 		for_all_tx_queues(adapter, txo, i) {
2881*4882a593Smuzhiyun 			cmpl = 0;
2882*4882a593Smuzhiyun 			num_wrbs = 0;
2883*4882a593Smuzhiyun 			txq = &txo->q;
2884*4882a593Smuzhiyun 			while ((txcp = be_tx_compl_get(adapter, txo))) {
2885*4882a593Smuzhiyun 				num_wrbs +=
2886*4882a593Smuzhiyun 					be_tx_compl_process(adapter, txo,
2887*4882a593Smuzhiyun 							    txcp->end_index);
2888*4882a593Smuzhiyun 				cmpl++;
2889*4882a593Smuzhiyun 			}
2890*4882a593Smuzhiyun 			if (cmpl) {
2891*4882a593Smuzhiyun 				be_cq_notify(adapter, txo->cq.id, false, cmpl);
2892*4882a593Smuzhiyun 				atomic_sub(num_wrbs, &txq->used);
2893*4882a593Smuzhiyun 				timeo = 0;
2894*4882a593Smuzhiyun 			}
2895*4882a593Smuzhiyun 			if (!be_is_tx_compl_pending(txo))
2896*4882a593Smuzhiyun 				pending_txqs--;
2897*4882a593Smuzhiyun 		}
2898*4882a593Smuzhiyun 
2899*4882a593Smuzhiyun 		if (pending_txqs == 0 || ++timeo > 10 ||
2900*4882a593Smuzhiyun 		    be_check_error(adapter, BE_ERROR_HW))
2901*4882a593Smuzhiyun 			break;
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 		mdelay(1);
2904*4882a593Smuzhiyun 	} while (true);
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	/* Free enqueued TX that was never notified to HW */
2907*4882a593Smuzhiyun 	for_all_tx_queues(adapter, txo, i) {
2908*4882a593Smuzhiyun 		txq = &txo->q;
2909*4882a593Smuzhiyun 
2910*4882a593Smuzhiyun 		if (atomic_read(&txq->used)) {
2911*4882a593Smuzhiyun 			dev_info(dev, "txq%d: cleaning %d pending tx-wrbs\n",
2912*4882a593Smuzhiyun 				 i, atomic_read(&txq->used));
2913*4882a593Smuzhiyun 			notified_idx = txq->tail;
2914*4882a593Smuzhiyun 			end_idx = txq->tail;
2915*4882a593Smuzhiyun 			index_adv(&end_idx, atomic_read(&txq->used) - 1,
2916*4882a593Smuzhiyun 				  txq->len);
2917*4882a593Smuzhiyun 			/* Use the tx-compl process logic to handle requests
2918*4882a593Smuzhiyun 			 * that were not sent to the HW.
2919*4882a593Smuzhiyun 			 */
2920*4882a593Smuzhiyun 			num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2921*4882a593Smuzhiyun 			atomic_sub(num_wrbs, &txq->used);
2922*4882a593Smuzhiyun 			BUG_ON(atomic_read(&txq->used));
2923*4882a593Smuzhiyun 			txo->pend_wrb_cnt = 0;
2924*4882a593Smuzhiyun 			/* Since hw was never notified of these requests,
2925*4882a593Smuzhiyun 			 * reset TXQ indices
2926*4882a593Smuzhiyun 			 */
2927*4882a593Smuzhiyun 			txq->head = notified_idx;
2928*4882a593Smuzhiyun 			txq->tail = notified_idx;
2929*4882a593Smuzhiyun 		}
2930*4882a593Smuzhiyun 	}
2931*4882a593Smuzhiyun }
2932*4882a593Smuzhiyun 
be_evt_queues_destroy(struct be_adapter * adapter)2933*4882a593Smuzhiyun static void be_evt_queues_destroy(struct be_adapter *adapter)
2934*4882a593Smuzhiyun {
2935*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
2936*4882a593Smuzhiyun 	int i;
2937*4882a593Smuzhiyun 
2938*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
2939*4882a593Smuzhiyun 		if (eqo->q.created) {
2940*4882a593Smuzhiyun 			be_eq_clean(eqo);
2941*4882a593Smuzhiyun 			be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2942*4882a593Smuzhiyun 			netif_napi_del(&eqo->napi);
2943*4882a593Smuzhiyun 			free_cpumask_var(eqo->affinity_mask);
2944*4882a593Smuzhiyun 		}
2945*4882a593Smuzhiyun 		be_queue_free(adapter, &eqo->q);
2946*4882a593Smuzhiyun 	}
2947*4882a593Smuzhiyun }
2948*4882a593Smuzhiyun 
be_evt_queues_create(struct be_adapter * adapter)2949*4882a593Smuzhiyun static int be_evt_queues_create(struct be_adapter *adapter)
2950*4882a593Smuzhiyun {
2951*4882a593Smuzhiyun 	struct be_queue_info *eq;
2952*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
2953*4882a593Smuzhiyun 	struct be_aic_obj *aic;
2954*4882a593Smuzhiyun 	int i, rc;
2955*4882a593Smuzhiyun 
2956*4882a593Smuzhiyun 	/* need enough EQs to service both RX and TX queues */
2957*4882a593Smuzhiyun 	adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2958*4882a593Smuzhiyun 				    max(adapter->cfg_num_rx_irqs,
2959*4882a593Smuzhiyun 					adapter->cfg_num_tx_irqs));
2960*4882a593Smuzhiyun 
2961*4882a593Smuzhiyun 	adapter->aic_enabled = true;
2962*4882a593Smuzhiyun 
2963*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
2964*4882a593Smuzhiyun 		int numa_node = dev_to_node(&adapter->pdev->dev);
2965*4882a593Smuzhiyun 
2966*4882a593Smuzhiyun 		aic = &adapter->aic_obj[i];
2967*4882a593Smuzhiyun 		eqo->adapter = adapter;
2968*4882a593Smuzhiyun 		eqo->idx = i;
2969*4882a593Smuzhiyun 		aic->max_eqd = BE_MAX_EQD;
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun 		eq = &eqo->q;
2972*4882a593Smuzhiyun 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2973*4882a593Smuzhiyun 				    sizeof(struct be_eq_entry));
2974*4882a593Smuzhiyun 		if (rc)
2975*4882a593Smuzhiyun 			return rc;
2976*4882a593Smuzhiyun 
2977*4882a593Smuzhiyun 		rc = be_cmd_eq_create(adapter, eqo);
2978*4882a593Smuzhiyun 		if (rc)
2979*4882a593Smuzhiyun 			return rc;
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun 		if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2982*4882a593Smuzhiyun 			return -ENOMEM;
2983*4882a593Smuzhiyun 		cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2984*4882a593Smuzhiyun 				eqo->affinity_mask);
2985*4882a593Smuzhiyun 		netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2986*4882a593Smuzhiyun 			       BE_NAPI_WEIGHT);
2987*4882a593Smuzhiyun 	}
2988*4882a593Smuzhiyun 	return 0;
2989*4882a593Smuzhiyun }
2990*4882a593Smuzhiyun 
be_mcc_queues_destroy(struct be_adapter * adapter)2991*4882a593Smuzhiyun static void be_mcc_queues_destroy(struct be_adapter *adapter)
2992*4882a593Smuzhiyun {
2993*4882a593Smuzhiyun 	struct be_queue_info *q;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	q = &adapter->mcc_obj.q;
2996*4882a593Smuzhiyun 	if (q->created)
2997*4882a593Smuzhiyun 		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2998*4882a593Smuzhiyun 	be_queue_free(adapter, q);
2999*4882a593Smuzhiyun 
3000*4882a593Smuzhiyun 	q = &adapter->mcc_obj.cq;
3001*4882a593Smuzhiyun 	if (q->created)
3002*4882a593Smuzhiyun 		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3003*4882a593Smuzhiyun 	be_queue_free(adapter, q);
3004*4882a593Smuzhiyun }
3005*4882a593Smuzhiyun 
3006*4882a593Smuzhiyun /* Must be called only after TX qs are created as MCC shares TX EQ */
be_mcc_queues_create(struct be_adapter * adapter)3007*4882a593Smuzhiyun static int be_mcc_queues_create(struct be_adapter *adapter)
3008*4882a593Smuzhiyun {
3009*4882a593Smuzhiyun 	struct be_queue_info *q, *cq;
3010*4882a593Smuzhiyun 
3011*4882a593Smuzhiyun 	cq = &adapter->mcc_obj.cq;
3012*4882a593Smuzhiyun 	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
3013*4882a593Smuzhiyun 			   sizeof(struct be_mcc_compl)))
3014*4882a593Smuzhiyun 		goto err;
3015*4882a593Smuzhiyun 
3016*4882a593Smuzhiyun 	/* Use the default EQ for MCC completions */
3017*4882a593Smuzhiyun 	if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
3018*4882a593Smuzhiyun 		goto mcc_cq_free;
3019*4882a593Smuzhiyun 
3020*4882a593Smuzhiyun 	q = &adapter->mcc_obj.q;
3021*4882a593Smuzhiyun 	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3022*4882a593Smuzhiyun 		goto mcc_cq_destroy;
3023*4882a593Smuzhiyun 
3024*4882a593Smuzhiyun 	if (be_cmd_mccq_create(adapter, q, cq))
3025*4882a593Smuzhiyun 		goto mcc_q_free;
3026*4882a593Smuzhiyun 
3027*4882a593Smuzhiyun 	return 0;
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun mcc_q_free:
3030*4882a593Smuzhiyun 	be_queue_free(adapter, q);
3031*4882a593Smuzhiyun mcc_cq_destroy:
3032*4882a593Smuzhiyun 	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
3033*4882a593Smuzhiyun mcc_cq_free:
3034*4882a593Smuzhiyun 	be_queue_free(adapter, cq);
3035*4882a593Smuzhiyun err:
3036*4882a593Smuzhiyun 	return -1;
3037*4882a593Smuzhiyun }
3038*4882a593Smuzhiyun 
be_tx_queues_destroy(struct be_adapter * adapter)3039*4882a593Smuzhiyun static void be_tx_queues_destroy(struct be_adapter *adapter)
3040*4882a593Smuzhiyun {
3041*4882a593Smuzhiyun 	struct be_queue_info *q;
3042*4882a593Smuzhiyun 	struct be_tx_obj *txo;
3043*4882a593Smuzhiyun 	u8 i;
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun 	for_all_tx_queues(adapter, txo, i) {
3046*4882a593Smuzhiyun 		q = &txo->q;
3047*4882a593Smuzhiyun 		if (q->created)
3048*4882a593Smuzhiyun 			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
3049*4882a593Smuzhiyun 		be_queue_free(adapter, q);
3050*4882a593Smuzhiyun 
3051*4882a593Smuzhiyun 		q = &txo->cq;
3052*4882a593Smuzhiyun 		if (q->created)
3053*4882a593Smuzhiyun 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3054*4882a593Smuzhiyun 		be_queue_free(adapter, q);
3055*4882a593Smuzhiyun 	}
3056*4882a593Smuzhiyun }
3057*4882a593Smuzhiyun 
be_tx_qs_create(struct be_adapter * adapter)3058*4882a593Smuzhiyun static int be_tx_qs_create(struct be_adapter *adapter)
3059*4882a593Smuzhiyun {
3060*4882a593Smuzhiyun 	struct be_queue_info *cq;
3061*4882a593Smuzhiyun 	struct be_tx_obj *txo;
3062*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
3063*4882a593Smuzhiyun 	int status, i;
3064*4882a593Smuzhiyun 
3065*4882a593Smuzhiyun 	adapter->num_tx_qs = min(adapter->num_evt_qs, adapter->cfg_num_tx_irqs);
3066*4882a593Smuzhiyun 
3067*4882a593Smuzhiyun 	for_all_tx_queues(adapter, txo, i) {
3068*4882a593Smuzhiyun 		cq = &txo->cq;
3069*4882a593Smuzhiyun 		status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
3070*4882a593Smuzhiyun 					sizeof(struct be_eth_tx_compl));
3071*4882a593Smuzhiyun 		if (status)
3072*4882a593Smuzhiyun 			return status;
3073*4882a593Smuzhiyun 
3074*4882a593Smuzhiyun 		u64_stats_init(&txo->stats.sync);
3075*4882a593Smuzhiyun 		u64_stats_init(&txo->stats.sync_compl);
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 		/* If num_evt_qs is less than num_tx_qs, then more than
3078*4882a593Smuzhiyun 		 * one txq share an eq
3079*4882a593Smuzhiyun 		 */
3080*4882a593Smuzhiyun 		eqo = &adapter->eq_obj[i % adapter->num_evt_qs];
3081*4882a593Smuzhiyun 		status = be_cmd_cq_create(adapter, cq, &eqo->q, false, 3);
3082*4882a593Smuzhiyun 		if (status)
3083*4882a593Smuzhiyun 			return status;
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun 		status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
3086*4882a593Smuzhiyun 					sizeof(struct be_eth_wrb));
3087*4882a593Smuzhiyun 		if (status)
3088*4882a593Smuzhiyun 			return status;
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 		status = be_cmd_txq_create(adapter, txo);
3091*4882a593Smuzhiyun 		if (status)
3092*4882a593Smuzhiyun 			return status;
3093*4882a593Smuzhiyun 
3094*4882a593Smuzhiyun 		netif_set_xps_queue(adapter->netdev, eqo->affinity_mask,
3095*4882a593Smuzhiyun 				    eqo->idx);
3096*4882a593Smuzhiyun 	}
3097*4882a593Smuzhiyun 
3098*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
3099*4882a593Smuzhiyun 		 adapter->num_tx_qs);
3100*4882a593Smuzhiyun 	return 0;
3101*4882a593Smuzhiyun }
3102*4882a593Smuzhiyun 
be_rx_cqs_destroy(struct be_adapter * adapter)3103*4882a593Smuzhiyun static void be_rx_cqs_destroy(struct be_adapter *adapter)
3104*4882a593Smuzhiyun {
3105*4882a593Smuzhiyun 	struct be_queue_info *q;
3106*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
3107*4882a593Smuzhiyun 	int i;
3108*4882a593Smuzhiyun 
3109*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i) {
3110*4882a593Smuzhiyun 		q = &rxo->cq;
3111*4882a593Smuzhiyun 		if (q->created)
3112*4882a593Smuzhiyun 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
3113*4882a593Smuzhiyun 		be_queue_free(adapter, q);
3114*4882a593Smuzhiyun 	}
3115*4882a593Smuzhiyun }
3116*4882a593Smuzhiyun 
be_rx_cqs_create(struct be_adapter * adapter)3117*4882a593Smuzhiyun static int be_rx_cqs_create(struct be_adapter *adapter)
3118*4882a593Smuzhiyun {
3119*4882a593Smuzhiyun 	struct be_queue_info *eq, *cq;
3120*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
3121*4882a593Smuzhiyun 	int rc, i;
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	adapter->num_rss_qs =
3124*4882a593Smuzhiyun 			min(adapter->num_evt_qs, adapter->cfg_num_rx_irqs);
3125*4882a593Smuzhiyun 
3126*4882a593Smuzhiyun 	/* We'll use RSS only if atleast 2 RSS rings are supported. */
3127*4882a593Smuzhiyun 	if (adapter->num_rss_qs < 2)
3128*4882a593Smuzhiyun 		adapter->num_rss_qs = 0;
3129*4882a593Smuzhiyun 
3130*4882a593Smuzhiyun 	adapter->num_rx_qs = adapter->num_rss_qs + adapter->need_def_rxq;
3131*4882a593Smuzhiyun 
3132*4882a593Smuzhiyun 	/* When the interface is not capable of RSS rings (and there is no
3133*4882a593Smuzhiyun 	 * need to create a default RXQ) we'll still need one RXQ
3134*4882a593Smuzhiyun 	 */
3135*4882a593Smuzhiyun 	if (adapter->num_rx_qs == 0)
3136*4882a593Smuzhiyun 		adapter->num_rx_qs = 1;
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun 	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
3139*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i) {
3140*4882a593Smuzhiyun 		rxo->adapter = adapter;
3141*4882a593Smuzhiyun 		cq = &rxo->cq;
3142*4882a593Smuzhiyun 		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
3143*4882a593Smuzhiyun 				    sizeof(struct be_eth_rx_compl));
3144*4882a593Smuzhiyun 		if (rc)
3145*4882a593Smuzhiyun 			return rc;
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun 		u64_stats_init(&rxo->stats.sync);
3148*4882a593Smuzhiyun 		eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
3149*4882a593Smuzhiyun 		rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
3150*4882a593Smuzhiyun 		if (rc)
3151*4882a593Smuzhiyun 			return rc;
3152*4882a593Smuzhiyun 	}
3153*4882a593Smuzhiyun 
3154*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev,
3155*4882a593Smuzhiyun 		 "created %d RX queue(s)\n", adapter->num_rx_qs);
3156*4882a593Smuzhiyun 	return 0;
3157*4882a593Smuzhiyun }
3158*4882a593Smuzhiyun 
be_intx(int irq,void * dev)3159*4882a593Smuzhiyun static irqreturn_t be_intx(int irq, void *dev)
3160*4882a593Smuzhiyun {
3161*4882a593Smuzhiyun 	struct be_eq_obj *eqo = dev;
3162*4882a593Smuzhiyun 	struct be_adapter *adapter = eqo->adapter;
3163*4882a593Smuzhiyun 	int num_evts = 0;
3164*4882a593Smuzhiyun 
3165*4882a593Smuzhiyun 	/* IRQ is not expected when NAPI is scheduled as the EQ
3166*4882a593Smuzhiyun 	 * will not be armed.
3167*4882a593Smuzhiyun 	 * But, this can happen on Lancer INTx where it takes
3168*4882a593Smuzhiyun 	 * a while to de-assert INTx or in BE2 where occasionaly
3169*4882a593Smuzhiyun 	 * an interrupt may be raised even when EQ is unarmed.
3170*4882a593Smuzhiyun 	 * If NAPI is already scheduled, then counting & notifying
3171*4882a593Smuzhiyun 	 * events will orphan them.
3172*4882a593Smuzhiyun 	 */
3173*4882a593Smuzhiyun 	if (napi_schedule_prep(&eqo->napi)) {
3174*4882a593Smuzhiyun 		num_evts = events_get(eqo);
3175*4882a593Smuzhiyun 		__napi_schedule(&eqo->napi);
3176*4882a593Smuzhiyun 		if (num_evts)
3177*4882a593Smuzhiyun 			eqo->spurious_intr = 0;
3178*4882a593Smuzhiyun 	}
3179*4882a593Smuzhiyun 	be_eq_notify(adapter, eqo->q.id, false, true, num_evts, 0);
3180*4882a593Smuzhiyun 
3181*4882a593Smuzhiyun 	/* Return IRQ_HANDLED only for the the first spurious intr
3182*4882a593Smuzhiyun 	 * after a valid intr to stop the kernel from branding
3183*4882a593Smuzhiyun 	 * this irq as a bad one!
3184*4882a593Smuzhiyun 	 */
3185*4882a593Smuzhiyun 	if (num_evts || eqo->spurious_intr++ == 0)
3186*4882a593Smuzhiyun 		return IRQ_HANDLED;
3187*4882a593Smuzhiyun 	else
3188*4882a593Smuzhiyun 		return IRQ_NONE;
3189*4882a593Smuzhiyun }
3190*4882a593Smuzhiyun 
be_msix(int irq,void * dev)3191*4882a593Smuzhiyun static irqreturn_t be_msix(int irq, void *dev)
3192*4882a593Smuzhiyun {
3193*4882a593Smuzhiyun 	struct be_eq_obj *eqo = dev;
3194*4882a593Smuzhiyun 
3195*4882a593Smuzhiyun 	be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
3196*4882a593Smuzhiyun 	napi_schedule(&eqo->napi);
3197*4882a593Smuzhiyun 	return IRQ_HANDLED;
3198*4882a593Smuzhiyun }
3199*4882a593Smuzhiyun 
do_gro(struct be_rx_compl_info * rxcp)3200*4882a593Smuzhiyun static inline bool do_gro(struct be_rx_compl_info *rxcp)
3201*4882a593Smuzhiyun {
3202*4882a593Smuzhiyun 	return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
3203*4882a593Smuzhiyun }
3204*4882a593Smuzhiyun 
be_process_rx(struct be_rx_obj * rxo,struct napi_struct * napi,int budget)3205*4882a593Smuzhiyun static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
3206*4882a593Smuzhiyun 			 int budget)
3207*4882a593Smuzhiyun {
3208*4882a593Smuzhiyun 	struct be_adapter *adapter = rxo->adapter;
3209*4882a593Smuzhiyun 	struct be_queue_info *rx_cq = &rxo->cq;
3210*4882a593Smuzhiyun 	struct be_rx_compl_info *rxcp;
3211*4882a593Smuzhiyun 	u32 work_done;
3212*4882a593Smuzhiyun 	u32 frags_consumed = 0;
3213*4882a593Smuzhiyun 
3214*4882a593Smuzhiyun 	for (work_done = 0; work_done < budget; work_done++) {
3215*4882a593Smuzhiyun 		rxcp = be_rx_compl_get(rxo);
3216*4882a593Smuzhiyun 		if (!rxcp)
3217*4882a593Smuzhiyun 			break;
3218*4882a593Smuzhiyun 
3219*4882a593Smuzhiyun 		/* Is it a flush compl that has no data */
3220*4882a593Smuzhiyun 		if (unlikely(rxcp->num_rcvd == 0))
3221*4882a593Smuzhiyun 			goto loop_continue;
3222*4882a593Smuzhiyun 
3223*4882a593Smuzhiyun 		/* Discard compl with partial DMA Lancer B0 */
3224*4882a593Smuzhiyun 		if (unlikely(!rxcp->pkt_size)) {
3225*4882a593Smuzhiyun 			be_rx_compl_discard(rxo, rxcp);
3226*4882a593Smuzhiyun 			goto loop_continue;
3227*4882a593Smuzhiyun 		}
3228*4882a593Smuzhiyun 
3229*4882a593Smuzhiyun 		/* On BE drop pkts that arrive due to imperfect filtering in
3230*4882a593Smuzhiyun 		 * promiscuous mode on some skews
3231*4882a593Smuzhiyun 		 */
3232*4882a593Smuzhiyun 		if (unlikely(rxcp->port != adapter->port_num &&
3233*4882a593Smuzhiyun 			     !lancer_chip(adapter))) {
3234*4882a593Smuzhiyun 			be_rx_compl_discard(rxo, rxcp);
3235*4882a593Smuzhiyun 			goto loop_continue;
3236*4882a593Smuzhiyun 		}
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 		if (do_gro(rxcp))
3239*4882a593Smuzhiyun 			be_rx_compl_process_gro(rxo, napi, rxcp);
3240*4882a593Smuzhiyun 		else
3241*4882a593Smuzhiyun 			be_rx_compl_process(rxo, napi, rxcp);
3242*4882a593Smuzhiyun 
3243*4882a593Smuzhiyun loop_continue:
3244*4882a593Smuzhiyun 		frags_consumed += rxcp->num_rcvd;
3245*4882a593Smuzhiyun 		be_rx_stats_update(rxo, rxcp);
3246*4882a593Smuzhiyun 	}
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 	if (work_done) {
3249*4882a593Smuzhiyun 		be_cq_notify(adapter, rx_cq->id, true, work_done);
3250*4882a593Smuzhiyun 
3251*4882a593Smuzhiyun 		/* When an rx-obj gets into post_starved state, just
3252*4882a593Smuzhiyun 		 * let be_worker do the posting.
3253*4882a593Smuzhiyun 		 */
3254*4882a593Smuzhiyun 		if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
3255*4882a593Smuzhiyun 		    !rxo->rx_post_starved)
3256*4882a593Smuzhiyun 			be_post_rx_frags(rxo, GFP_ATOMIC,
3257*4882a593Smuzhiyun 					 max_t(u32, MAX_RX_POST,
3258*4882a593Smuzhiyun 					       frags_consumed));
3259*4882a593Smuzhiyun 	}
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	return work_done;
3262*4882a593Smuzhiyun }
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun 
be_process_tx(struct be_adapter * adapter,struct be_tx_obj * txo,int idx)3265*4882a593Smuzhiyun static void be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
3266*4882a593Smuzhiyun 			  int idx)
3267*4882a593Smuzhiyun {
3268*4882a593Smuzhiyun 	int num_wrbs = 0, work_done = 0;
3269*4882a593Smuzhiyun 	struct be_tx_compl_info *txcp;
3270*4882a593Smuzhiyun 
3271*4882a593Smuzhiyun 	while ((txcp = be_tx_compl_get(adapter, txo))) {
3272*4882a593Smuzhiyun 		num_wrbs += be_tx_compl_process(adapter, txo, txcp->end_index);
3273*4882a593Smuzhiyun 		work_done++;
3274*4882a593Smuzhiyun 	}
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	if (work_done) {
3277*4882a593Smuzhiyun 		be_cq_notify(adapter, txo->cq.id, true, work_done);
3278*4882a593Smuzhiyun 		atomic_sub(num_wrbs, &txo->q.used);
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 		/* As Tx wrbs have been freed up, wake up netdev queue
3281*4882a593Smuzhiyun 		 * if it was stopped due to lack of tx wrbs.  */
3282*4882a593Smuzhiyun 		if (__netif_subqueue_stopped(adapter->netdev, idx) &&
3283*4882a593Smuzhiyun 		    be_can_txq_wake(txo)) {
3284*4882a593Smuzhiyun 			netif_wake_subqueue(adapter->netdev, idx);
3285*4882a593Smuzhiyun 		}
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 		u64_stats_update_begin(&tx_stats(txo)->sync_compl);
3288*4882a593Smuzhiyun 		tx_stats(txo)->tx_compl += work_done;
3289*4882a593Smuzhiyun 		u64_stats_update_end(&tx_stats(txo)->sync_compl);
3290*4882a593Smuzhiyun 	}
3291*4882a593Smuzhiyun }
3292*4882a593Smuzhiyun 
be_poll(struct napi_struct * napi,int budget)3293*4882a593Smuzhiyun int be_poll(struct napi_struct *napi, int budget)
3294*4882a593Smuzhiyun {
3295*4882a593Smuzhiyun 	struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
3296*4882a593Smuzhiyun 	struct be_adapter *adapter = eqo->adapter;
3297*4882a593Smuzhiyun 	int max_work = 0, work, i, num_evts;
3298*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
3299*4882a593Smuzhiyun 	struct be_tx_obj *txo;
3300*4882a593Smuzhiyun 	u32 mult_enc = 0;
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun 	num_evts = events_get(eqo);
3303*4882a593Smuzhiyun 
3304*4882a593Smuzhiyun 	for_all_tx_queues_on_eq(adapter, eqo, txo, i)
3305*4882a593Smuzhiyun 		be_process_tx(adapter, txo, i);
3306*4882a593Smuzhiyun 
3307*4882a593Smuzhiyun 	/* This loop will iterate twice for EQ0 in which
3308*4882a593Smuzhiyun 	 * completions of the last RXQ (default one) are also processed
3309*4882a593Smuzhiyun 	 * For other EQs the loop iterates only once
3310*4882a593Smuzhiyun 	 */
3311*4882a593Smuzhiyun 	for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
3312*4882a593Smuzhiyun 		work = be_process_rx(rxo, napi, budget);
3313*4882a593Smuzhiyun 		max_work = max(work, max_work);
3314*4882a593Smuzhiyun 	}
3315*4882a593Smuzhiyun 
3316*4882a593Smuzhiyun 	if (is_mcc_eqo(eqo))
3317*4882a593Smuzhiyun 		be_process_mcc(adapter);
3318*4882a593Smuzhiyun 
3319*4882a593Smuzhiyun 	if (max_work < budget) {
3320*4882a593Smuzhiyun 		napi_complete_done(napi, max_work);
3321*4882a593Smuzhiyun 
3322*4882a593Smuzhiyun 		/* Skyhawk EQ_DB has a provision to set the rearm to interrupt
3323*4882a593Smuzhiyun 		 * delay via a delay multiplier encoding value
3324*4882a593Smuzhiyun 		 */
3325*4882a593Smuzhiyun 		if (skyhawk_chip(adapter))
3326*4882a593Smuzhiyun 			mult_enc = be_get_eq_delay_mult_enc(eqo);
3327*4882a593Smuzhiyun 
3328*4882a593Smuzhiyun 		be_eq_notify(adapter, eqo->q.id, true, false, num_evts,
3329*4882a593Smuzhiyun 			     mult_enc);
3330*4882a593Smuzhiyun 	} else {
3331*4882a593Smuzhiyun 		/* As we'll continue in polling mode, count and clear events */
3332*4882a593Smuzhiyun 		be_eq_notify(adapter, eqo->q.id, false, false, num_evts, 0);
3333*4882a593Smuzhiyun 	}
3334*4882a593Smuzhiyun 	return max_work;
3335*4882a593Smuzhiyun }
3336*4882a593Smuzhiyun 
be_detect_error(struct be_adapter * adapter)3337*4882a593Smuzhiyun void be_detect_error(struct be_adapter *adapter)
3338*4882a593Smuzhiyun {
3339*4882a593Smuzhiyun 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
3340*4882a593Smuzhiyun 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
3341*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
3342*4882a593Smuzhiyun 	u16 val;
3343*4882a593Smuzhiyun 	u32 i;
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun 	if (be_check_error(adapter, BE_ERROR_HW))
3346*4882a593Smuzhiyun 		return;
3347*4882a593Smuzhiyun 
3348*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
3349*4882a593Smuzhiyun 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3350*4882a593Smuzhiyun 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3351*4882a593Smuzhiyun 			be_set_error(adapter, BE_ERROR_UE);
3352*4882a593Smuzhiyun 			sliport_err1 = ioread32(adapter->db +
3353*4882a593Smuzhiyun 						SLIPORT_ERROR1_OFFSET);
3354*4882a593Smuzhiyun 			sliport_err2 = ioread32(adapter->db +
3355*4882a593Smuzhiyun 						SLIPORT_ERROR2_OFFSET);
3356*4882a593Smuzhiyun 			/* Do not log error messages if its a FW reset */
3357*4882a593Smuzhiyun 			if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
3358*4882a593Smuzhiyun 			    sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
3359*4882a593Smuzhiyun 				dev_info(dev, "Reset is in progress\n");
3360*4882a593Smuzhiyun 			} else {
3361*4882a593Smuzhiyun 				dev_err(dev, "Error detected in the card\n");
3362*4882a593Smuzhiyun 				dev_err(dev, "ERR: sliport status 0x%x\n",
3363*4882a593Smuzhiyun 					sliport_status);
3364*4882a593Smuzhiyun 				dev_err(dev, "ERR: sliport error1 0x%x\n",
3365*4882a593Smuzhiyun 					sliport_err1);
3366*4882a593Smuzhiyun 				dev_err(dev, "ERR: sliport error2 0x%x\n",
3367*4882a593Smuzhiyun 					sliport_err2);
3368*4882a593Smuzhiyun 			}
3369*4882a593Smuzhiyun 		}
3370*4882a593Smuzhiyun 	} else {
3371*4882a593Smuzhiyun 		ue_lo = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_LOW);
3372*4882a593Smuzhiyun 		ue_hi = ioread32(adapter->pcicfg + PCICFG_UE_STATUS_HIGH);
3373*4882a593Smuzhiyun 		ue_lo_mask = ioread32(adapter->pcicfg +
3374*4882a593Smuzhiyun 				      PCICFG_UE_STATUS_LOW_MASK);
3375*4882a593Smuzhiyun 		ue_hi_mask = ioread32(adapter->pcicfg +
3376*4882a593Smuzhiyun 				      PCICFG_UE_STATUS_HI_MASK);
3377*4882a593Smuzhiyun 
3378*4882a593Smuzhiyun 		ue_lo = (ue_lo & ~ue_lo_mask);
3379*4882a593Smuzhiyun 		ue_hi = (ue_hi & ~ue_hi_mask);
3380*4882a593Smuzhiyun 
3381*4882a593Smuzhiyun 		if (ue_lo || ue_hi) {
3382*4882a593Smuzhiyun 			/* On certain platforms BE3 hardware can indicate
3383*4882a593Smuzhiyun 			 * spurious UEs. In case of a UE in the chip,
3384*4882a593Smuzhiyun 			 * the POST register correctly reports either a
3385*4882a593Smuzhiyun 			 * FAT_LOG_START state (FW is currently dumping
3386*4882a593Smuzhiyun 			 * FAT log data) or a ARMFW_UE state. Check for the
3387*4882a593Smuzhiyun 			 * above states to ascertain if the UE is valid or not.
3388*4882a593Smuzhiyun 			 */
3389*4882a593Smuzhiyun 			if (BE3_chip(adapter)) {
3390*4882a593Smuzhiyun 				val = be_POST_stage_get(adapter);
3391*4882a593Smuzhiyun 				if ((val & POST_STAGE_FAT_LOG_START)
3392*4882a593Smuzhiyun 				     != POST_STAGE_FAT_LOG_START &&
3393*4882a593Smuzhiyun 				    (val & POST_STAGE_ARMFW_UE)
3394*4882a593Smuzhiyun 				     != POST_STAGE_ARMFW_UE &&
3395*4882a593Smuzhiyun 				    (val & POST_STAGE_RECOVERABLE_ERR)
3396*4882a593Smuzhiyun 				     != POST_STAGE_RECOVERABLE_ERR)
3397*4882a593Smuzhiyun 					return;
3398*4882a593Smuzhiyun 			}
3399*4882a593Smuzhiyun 
3400*4882a593Smuzhiyun 			dev_err(dev, "Error detected in the adapter");
3401*4882a593Smuzhiyun 			be_set_error(adapter, BE_ERROR_UE);
3402*4882a593Smuzhiyun 
3403*4882a593Smuzhiyun 			for (i = 0; ue_lo; ue_lo >>= 1, i++) {
3404*4882a593Smuzhiyun 				if (ue_lo & 1)
3405*4882a593Smuzhiyun 					dev_err(dev, "UE: %s bit set\n",
3406*4882a593Smuzhiyun 						ue_status_low_desc[i]);
3407*4882a593Smuzhiyun 			}
3408*4882a593Smuzhiyun 			for (i = 0; ue_hi; ue_hi >>= 1, i++) {
3409*4882a593Smuzhiyun 				if (ue_hi & 1)
3410*4882a593Smuzhiyun 					dev_err(dev, "UE: %s bit set\n",
3411*4882a593Smuzhiyun 						ue_status_hi_desc[i]);
3412*4882a593Smuzhiyun 			}
3413*4882a593Smuzhiyun 		}
3414*4882a593Smuzhiyun 	}
3415*4882a593Smuzhiyun }
3416*4882a593Smuzhiyun 
be_msix_disable(struct be_adapter * adapter)3417*4882a593Smuzhiyun static void be_msix_disable(struct be_adapter *adapter)
3418*4882a593Smuzhiyun {
3419*4882a593Smuzhiyun 	if (msix_enabled(adapter)) {
3420*4882a593Smuzhiyun 		pci_disable_msix(adapter->pdev);
3421*4882a593Smuzhiyun 		adapter->num_msix_vec = 0;
3422*4882a593Smuzhiyun 		adapter->num_msix_roce_vec = 0;
3423*4882a593Smuzhiyun 	}
3424*4882a593Smuzhiyun }
3425*4882a593Smuzhiyun 
be_msix_enable(struct be_adapter * adapter)3426*4882a593Smuzhiyun static int be_msix_enable(struct be_adapter *adapter)
3427*4882a593Smuzhiyun {
3428*4882a593Smuzhiyun 	unsigned int i, max_roce_eqs;
3429*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
3430*4882a593Smuzhiyun 	int num_vec;
3431*4882a593Smuzhiyun 
3432*4882a593Smuzhiyun 	/* If RoCE is supported, program the max number of vectors that
3433*4882a593Smuzhiyun 	 * could be used for NIC and RoCE, else, just program the number
3434*4882a593Smuzhiyun 	 * we'll use initially.
3435*4882a593Smuzhiyun 	 */
3436*4882a593Smuzhiyun 	if (be_roce_supported(adapter)) {
3437*4882a593Smuzhiyun 		max_roce_eqs =
3438*4882a593Smuzhiyun 			be_max_func_eqs(adapter) - be_max_nic_eqs(adapter);
3439*4882a593Smuzhiyun 		max_roce_eqs = min(max_roce_eqs, num_online_cpus());
3440*4882a593Smuzhiyun 		num_vec = be_max_any_irqs(adapter) + max_roce_eqs;
3441*4882a593Smuzhiyun 	} else {
3442*4882a593Smuzhiyun 		num_vec = max(adapter->cfg_num_rx_irqs,
3443*4882a593Smuzhiyun 			      adapter->cfg_num_tx_irqs);
3444*4882a593Smuzhiyun 	}
3445*4882a593Smuzhiyun 
3446*4882a593Smuzhiyun 	for (i = 0; i < num_vec; i++)
3447*4882a593Smuzhiyun 		adapter->msix_entries[i].entry = i;
3448*4882a593Smuzhiyun 
3449*4882a593Smuzhiyun 	num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
3450*4882a593Smuzhiyun 					MIN_MSIX_VECTORS, num_vec);
3451*4882a593Smuzhiyun 	if (num_vec < 0)
3452*4882a593Smuzhiyun 		goto fail;
3453*4882a593Smuzhiyun 
3454*4882a593Smuzhiyun 	if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
3455*4882a593Smuzhiyun 		adapter->num_msix_roce_vec = num_vec / 2;
3456*4882a593Smuzhiyun 		dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
3457*4882a593Smuzhiyun 			 adapter->num_msix_roce_vec);
3458*4882a593Smuzhiyun 	}
3459*4882a593Smuzhiyun 
3460*4882a593Smuzhiyun 	adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
3461*4882a593Smuzhiyun 
3462*4882a593Smuzhiyun 	dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
3463*4882a593Smuzhiyun 		 adapter->num_msix_vec);
3464*4882a593Smuzhiyun 	return 0;
3465*4882a593Smuzhiyun 
3466*4882a593Smuzhiyun fail:
3467*4882a593Smuzhiyun 	dev_warn(dev, "MSIx enable failed\n");
3468*4882a593Smuzhiyun 
3469*4882a593Smuzhiyun 	/* INTx is not supported in VFs, so fail probe if enable_msix fails */
3470*4882a593Smuzhiyun 	if (be_virtfn(adapter))
3471*4882a593Smuzhiyun 		return num_vec;
3472*4882a593Smuzhiyun 	return 0;
3473*4882a593Smuzhiyun }
3474*4882a593Smuzhiyun 
be_msix_vec_get(struct be_adapter * adapter,struct be_eq_obj * eqo)3475*4882a593Smuzhiyun static inline int be_msix_vec_get(struct be_adapter *adapter,
3476*4882a593Smuzhiyun 				  struct be_eq_obj *eqo)
3477*4882a593Smuzhiyun {
3478*4882a593Smuzhiyun 	return adapter->msix_entries[eqo->msix_idx].vector;
3479*4882a593Smuzhiyun }
3480*4882a593Smuzhiyun 
be_msix_register(struct be_adapter * adapter)3481*4882a593Smuzhiyun static int be_msix_register(struct be_adapter *adapter)
3482*4882a593Smuzhiyun {
3483*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
3484*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
3485*4882a593Smuzhiyun 	int status, i, vec;
3486*4882a593Smuzhiyun 
3487*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
3488*4882a593Smuzhiyun 		sprintf(eqo->desc, "%s-q%d", netdev->name, i);
3489*4882a593Smuzhiyun 		vec = be_msix_vec_get(adapter, eqo);
3490*4882a593Smuzhiyun 		status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
3491*4882a593Smuzhiyun 		if (status)
3492*4882a593Smuzhiyun 			goto err_msix;
3493*4882a593Smuzhiyun 
3494*4882a593Smuzhiyun 		irq_set_affinity_hint(vec, eqo->affinity_mask);
3495*4882a593Smuzhiyun 	}
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	return 0;
3498*4882a593Smuzhiyun err_msix:
3499*4882a593Smuzhiyun 	for (i--; i >= 0; i--) {
3500*4882a593Smuzhiyun 		eqo = &adapter->eq_obj[i];
3501*4882a593Smuzhiyun 		free_irq(be_msix_vec_get(adapter, eqo), eqo);
3502*4882a593Smuzhiyun 	}
3503*4882a593Smuzhiyun 	dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
3504*4882a593Smuzhiyun 		 status);
3505*4882a593Smuzhiyun 	be_msix_disable(adapter);
3506*4882a593Smuzhiyun 	return status;
3507*4882a593Smuzhiyun }
3508*4882a593Smuzhiyun 
be_irq_register(struct be_adapter * adapter)3509*4882a593Smuzhiyun static int be_irq_register(struct be_adapter *adapter)
3510*4882a593Smuzhiyun {
3511*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
3512*4882a593Smuzhiyun 	int status;
3513*4882a593Smuzhiyun 
3514*4882a593Smuzhiyun 	if (msix_enabled(adapter)) {
3515*4882a593Smuzhiyun 		status = be_msix_register(adapter);
3516*4882a593Smuzhiyun 		if (status == 0)
3517*4882a593Smuzhiyun 			goto done;
3518*4882a593Smuzhiyun 		/* INTx is not supported for VF */
3519*4882a593Smuzhiyun 		if (be_virtfn(adapter))
3520*4882a593Smuzhiyun 			return status;
3521*4882a593Smuzhiyun 	}
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 	/* INTx: only the first EQ is used */
3524*4882a593Smuzhiyun 	netdev->irq = adapter->pdev->irq;
3525*4882a593Smuzhiyun 	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
3526*4882a593Smuzhiyun 			     &adapter->eq_obj[0]);
3527*4882a593Smuzhiyun 	if (status) {
3528*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
3529*4882a593Smuzhiyun 			"INTx request IRQ failed - err %d\n", status);
3530*4882a593Smuzhiyun 		return status;
3531*4882a593Smuzhiyun 	}
3532*4882a593Smuzhiyun done:
3533*4882a593Smuzhiyun 	adapter->isr_registered = true;
3534*4882a593Smuzhiyun 	return 0;
3535*4882a593Smuzhiyun }
3536*4882a593Smuzhiyun 
be_irq_unregister(struct be_adapter * adapter)3537*4882a593Smuzhiyun static void be_irq_unregister(struct be_adapter *adapter)
3538*4882a593Smuzhiyun {
3539*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
3540*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
3541*4882a593Smuzhiyun 	int i, vec;
3542*4882a593Smuzhiyun 
3543*4882a593Smuzhiyun 	if (!adapter->isr_registered)
3544*4882a593Smuzhiyun 		return;
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun 	/* INTx */
3547*4882a593Smuzhiyun 	if (!msix_enabled(adapter)) {
3548*4882a593Smuzhiyun 		free_irq(netdev->irq, &adapter->eq_obj[0]);
3549*4882a593Smuzhiyun 		goto done;
3550*4882a593Smuzhiyun 	}
3551*4882a593Smuzhiyun 
3552*4882a593Smuzhiyun 	/* MSIx */
3553*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
3554*4882a593Smuzhiyun 		vec = be_msix_vec_get(adapter, eqo);
3555*4882a593Smuzhiyun 		irq_set_affinity_hint(vec, NULL);
3556*4882a593Smuzhiyun 		free_irq(vec, eqo);
3557*4882a593Smuzhiyun 	}
3558*4882a593Smuzhiyun 
3559*4882a593Smuzhiyun done:
3560*4882a593Smuzhiyun 	adapter->isr_registered = false;
3561*4882a593Smuzhiyun }
3562*4882a593Smuzhiyun 
be_rx_qs_destroy(struct be_adapter * adapter)3563*4882a593Smuzhiyun static void be_rx_qs_destroy(struct be_adapter *adapter)
3564*4882a593Smuzhiyun {
3565*4882a593Smuzhiyun 	struct rss_info *rss = &adapter->rss_info;
3566*4882a593Smuzhiyun 	struct be_queue_info *q;
3567*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
3568*4882a593Smuzhiyun 	int i;
3569*4882a593Smuzhiyun 
3570*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i) {
3571*4882a593Smuzhiyun 		q = &rxo->q;
3572*4882a593Smuzhiyun 		if (q->created) {
3573*4882a593Smuzhiyun 			/* If RXQs are destroyed while in an "out of buffer"
3574*4882a593Smuzhiyun 			 * state, there is a possibility of an HW stall on
3575*4882a593Smuzhiyun 			 * Lancer. So, post 64 buffers to each queue to relieve
3576*4882a593Smuzhiyun 			 * the "out of buffer" condition.
3577*4882a593Smuzhiyun 			 * Make sure there's space in the RXQ before posting.
3578*4882a593Smuzhiyun 			 */
3579*4882a593Smuzhiyun 			if (lancer_chip(adapter)) {
3580*4882a593Smuzhiyun 				be_rx_cq_clean(rxo);
3581*4882a593Smuzhiyun 				if (atomic_read(&q->used) == 0)
3582*4882a593Smuzhiyun 					be_post_rx_frags(rxo, GFP_KERNEL,
3583*4882a593Smuzhiyun 							 MAX_RX_POST);
3584*4882a593Smuzhiyun 			}
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun 			be_cmd_rxq_destroy(adapter, q);
3587*4882a593Smuzhiyun 			be_rx_cq_clean(rxo);
3588*4882a593Smuzhiyun 			be_rxq_clean(rxo);
3589*4882a593Smuzhiyun 		}
3590*4882a593Smuzhiyun 		be_queue_free(adapter, q);
3591*4882a593Smuzhiyun 	}
3592*4882a593Smuzhiyun 
3593*4882a593Smuzhiyun 	if (rss->rss_flags) {
3594*4882a593Smuzhiyun 		rss->rss_flags = RSS_ENABLE_NONE;
3595*4882a593Smuzhiyun 		be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3596*4882a593Smuzhiyun 				  128, rss->rss_hkey);
3597*4882a593Smuzhiyun 	}
3598*4882a593Smuzhiyun }
3599*4882a593Smuzhiyun 
be_disable_if_filters(struct be_adapter * adapter)3600*4882a593Smuzhiyun static void be_disable_if_filters(struct be_adapter *adapter)
3601*4882a593Smuzhiyun {
3602*4882a593Smuzhiyun 	/* Don't delete MAC on BE3 VFs without FILTMGMT privilege  */
3603*4882a593Smuzhiyun 	if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
3604*4882a593Smuzhiyun 	    check_privilege(adapter, BE_PRIV_FILTMGMT)) {
3605*4882a593Smuzhiyun 		be_dev_mac_del(adapter, adapter->pmac_id[0]);
3606*4882a593Smuzhiyun 		eth_zero_addr(adapter->dev_mac);
3607*4882a593Smuzhiyun 	}
3608*4882a593Smuzhiyun 
3609*4882a593Smuzhiyun 	be_clear_uc_list(adapter);
3610*4882a593Smuzhiyun 	be_clear_mc_list(adapter);
3611*4882a593Smuzhiyun 
3612*4882a593Smuzhiyun 	/* The IFACE flags are enabled in the open path and cleared
3613*4882a593Smuzhiyun 	 * in the close path. When a VF gets detached from the host and
3614*4882a593Smuzhiyun 	 * assigned to a VM the following happens:
3615*4882a593Smuzhiyun 	 *	- VF's IFACE flags get cleared in the detach path
3616*4882a593Smuzhiyun 	 *	- IFACE create is issued by the VF in the attach path
3617*4882a593Smuzhiyun 	 * Due to a bug in the BE3/Skyhawk-R FW
3618*4882a593Smuzhiyun 	 * (Lancer FW doesn't have the bug), the IFACE capability flags
3619*4882a593Smuzhiyun 	 * specified along with the IFACE create cmd issued by a VF are not
3620*4882a593Smuzhiyun 	 * honoured by FW.  As a consequence, if a *new* driver
3621*4882a593Smuzhiyun 	 * (that enables/disables IFACE flags in open/close)
3622*4882a593Smuzhiyun 	 * is loaded in the host and an *old* driver is * used by a VM/VF,
3623*4882a593Smuzhiyun 	 * the IFACE gets created *without* the needed flags.
3624*4882a593Smuzhiyun 	 * To avoid this, disable RX-filter flags only for Lancer.
3625*4882a593Smuzhiyun 	 */
3626*4882a593Smuzhiyun 	if (lancer_chip(adapter)) {
3627*4882a593Smuzhiyun 		be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3628*4882a593Smuzhiyun 		adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3629*4882a593Smuzhiyun 	}
3630*4882a593Smuzhiyun }
3631*4882a593Smuzhiyun 
be_close(struct net_device * netdev)3632*4882a593Smuzhiyun static int be_close(struct net_device *netdev)
3633*4882a593Smuzhiyun {
3634*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
3635*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
3636*4882a593Smuzhiyun 	int i;
3637*4882a593Smuzhiyun 
3638*4882a593Smuzhiyun 	/* This protection is needed as be_close() may be called even when the
3639*4882a593Smuzhiyun 	 * adapter is in cleared state (after eeh perm failure)
3640*4882a593Smuzhiyun 	 */
3641*4882a593Smuzhiyun 	if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3642*4882a593Smuzhiyun 		return 0;
3643*4882a593Smuzhiyun 
3644*4882a593Smuzhiyun 	/* Before attempting cleanup ensure all the pending cmds in the
3645*4882a593Smuzhiyun 	 * config_wq have finished execution
3646*4882a593Smuzhiyun 	 */
3647*4882a593Smuzhiyun 	flush_workqueue(be_wq);
3648*4882a593Smuzhiyun 
3649*4882a593Smuzhiyun 	be_disable_if_filters(adapter);
3650*4882a593Smuzhiyun 
3651*4882a593Smuzhiyun 	if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
3652*4882a593Smuzhiyun 		for_all_evt_queues(adapter, eqo, i) {
3653*4882a593Smuzhiyun 			napi_disable(&eqo->napi);
3654*4882a593Smuzhiyun 		}
3655*4882a593Smuzhiyun 		adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
3656*4882a593Smuzhiyun 	}
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun 	be_async_mcc_disable(adapter);
3659*4882a593Smuzhiyun 
3660*4882a593Smuzhiyun 	/* Wait for all pending tx completions to arrive so that
3661*4882a593Smuzhiyun 	 * all tx skbs are freed.
3662*4882a593Smuzhiyun 	 */
3663*4882a593Smuzhiyun 	netif_tx_disable(netdev);
3664*4882a593Smuzhiyun 	be_tx_compl_clean(adapter);
3665*4882a593Smuzhiyun 
3666*4882a593Smuzhiyun 	be_rx_qs_destroy(adapter);
3667*4882a593Smuzhiyun 
3668*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
3669*4882a593Smuzhiyun 		if (msix_enabled(adapter))
3670*4882a593Smuzhiyun 			synchronize_irq(be_msix_vec_get(adapter, eqo));
3671*4882a593Smuzhiyun 		else
3672*4882a593Smuzhiyun 			synchronize_irq(netdev->irq);
3673*4882a593Smuzhiyun 		be_eq_clean(eqo);
3674*4882a593Smuzhiyun 	}
3675*4882a593Smuzhiyun 
3676*4882a593Smuzhiyun 	be_irq_unregister(adapter);
3677*4882a593Smuzhiyun 
3678*4882a593Smuzhiyun 	return 0;
3679*4882a593Smuzhiyun }
3680*4882a593Smuzhiyun 
be_rx_qs_create(struct be_adapter * adapter)3681*4882a593Smuzhiyun static int be_rx_qs_create(struct be_adapter *adapter)
3682*4882a593Smuzhiyun {
3683*4882a593Smuzhiyun 	struct rss_info *rss = &adapter->rss_info;
3684*4882a593Smuzhiyun 	u8 rss_key[RSS_HASH_KEY_LEN];
3685*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
3686*4882a593Smuzhiyun 	int rc, i, j;
3687*4882a593Smuzhiyun 
3688*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i) {
3689*4882a593Smuzhiyun 		rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
3690*4882a593Smuzhiyun 				    sizeof(struct be_eth_rx_d));
3691*4882a593Smuzhiyun 		if (rc)
3692*4882a593Smuzhiyun 			return rc;
3693*4882a593Smuzhiyun 	}
3694*4882a593Smuzhiyun 
3695*4882a593Smuzhiyun 	if (adapter->need_def_rxq || !adapter->num_rss_qs) {
3696*4882a593Smuzhiyun 		rxo = default_rxo(adapter);
3697*4882a593Smuzhiyun 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3698*4882a593Smuzhiyun 				       rx_frag_size, adapter->if_handle,
3699*4882a593Smuzhiyun 				       false, &rxo->rss_id);
3700*4882a593Smuzhiyun 		if (rc)
3701*4882a593Smuzhiyun 			return rc;
3702*4882a593Smuzhiyun 	}
3703*4882a593Smuzhiyun 
3704*4882a593Smuzhiyun 	for_all_rss_queues(adapter, rxo, i) {
3705*4882a593Smuzhiyun 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
3706*4882a593Smuzhiyun 				       rx_frag_size, adapter->if_handle,
3707*4882a593Smuzhiyun 				       true, &rxo->rss_id);
3708*4882a593Smuzhiyun 		if (rc)
3709*4882a593Smuzhiyun 			return rc;
3710*4882a593Smuzhiyun 	}
3711*4882a593Smuzhiyun 
3712*4882a593Smuzhiyun 	if (be_multi_rxq(adapter)) {
3713*4882a593Smuzhiyun 		for (j = 0; j < RSS_INDIR_TABLE_LEN; j += adapter->num_rss_qs) {
3714*4882a593Smuzhiyun 			for_all_rss_queues(adapter, rxo, i) {
3715*4882a593Smuzhiyun 				if ((j + i) >= RSS_INDIR_TABLE_LEN)
3716*4882a593Smuzhiyun 					break;
3717*4882a593Smuzhiyun 				rss->rsstable[j + i] = rxo->rss_id;
3718*4882a593Smuzhiyun 				rss->rss_queue[j + i] = i;
3719*4882a593Smuzhiyun 			}
3720*4882a593Smuzhiyun 		}
3721*4882a593Smuzhiyun 		rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
3722*4882a593Smuzhiyun 			RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
3723*4882a593Smuzhiyun 
3724*4882a593Smuzhiyun 		if (!BEx_chip(adapter))
3725*4882a593Smuzhiyun 			rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
3726*4882a593Smuzhiyun 				RSS_ENABLE_UDP_IPV6;
3727*4882a593Smuzhiyun 
3728*4882a593Smuzhiyun 		netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
3729*4882a593Smuzhiyun 		rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
3730*4882a593Smuzhiyun 				       RSS_INDIR_TABLE_LEN, rss_key);
3731*4882a593Smuzhiyun 		if (rc) {
3732*4882a593Smuzhiyun 			rss->rss_flags = RSS_ENABLE_NONE;
3733*4882a593Smuzhiyun 			return rc;
3734*4882a593Smuzhiyun 		}
3735*4882a593Smuzhiyun 
3736*4882a593Smuzhiyun 		memcpy(rss->rss_hkey, rss_key, RSS_HASH_KEY_LEN);
3737*4882a593Smuzhiyun 	} else {
3738*4882a593Smuzhiyun 		/* Disable RSS, if only default RX Q is created */
3739*4882a593Smuzhiyun 		rss->rss_flags = RSS_ENABLE_NONE;
3740*4882a593Smuzhiyun 	}
3741*4882a593Smuzhiyun 
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun 	/* Post 1 less than RXQ-len to avoid head being equal to tail,
3744*4882a593Smuzhiyun 	 * which is a queue empty condition
3745*4882a593Smuzhiyun 	 */
3746*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i)
3747*4882a593Smuzhiyun 		be_post_rx_frags(rxo, GFP_KERNEL, RX_Q_LEN - 1);
3748*4882a593Smuzhiyun 
3749*4882a593Smuzhiyun 	return 0;
3750*4882a593Smuzhiyun }
3751*4882a593Smuzhiyun 
be_enable_if_filters(struct be_adapter * adapter)3752*4882a593Smuzhiyun static int be_enable_if_filters(struct be_adapter *adapter)
3753*4882a593Smuzhiyun {
3754*4882a593Smuzhiyun 	int status;
3755*4882a593Smuzhiyun 
3756*4882a593Smuzhiyun 	status = be_cmd_rx_filter(adapter, BE_IF_FILT_FLAGS_BASIC, ON);
3757*4882a593Smuzhiyun 	if (status)
3758*4882a593Smuzhiyun 		return status;
3759*4882a593Smuzhiyun 
3760*4882a593Smuzhiyun 	/* Normally this condition usually true as the ->dev_mac is zeroed.
3761*4882a593Smuzhiyun 	 * But on BE3 VFs the initial MAC is pre-programmed by PF and
3762*4882a593Smuzhiyun 	 * subsequent be_dev_mac_add() can fail (after fresh boot)
3763*4882a593Smuzhiyun 	 */
3764*4882a593Smuzhiyun 	if (!ether_addr_equal(adapter->dev_mac, adapter->netdev->dev_addr)) {
3765*4882a593Smuzhiyun 		int old_pmac_id = -1;
3766*4882a593Smuzhiyun 
3767*4882a593Smuzhiyun 		/* Remember old programmed MAC if any - can happen on BE3 VF */
3768*4882a593Smuzhiyun 		if (!is_zero_ether_addr(adapter->dev_mac))
3769*4882a593Smuzhiyun 			old_pmac_id = adapter->pmac_id[0];
3770*4882a593Smuzhiyun 
3771*4882a593Smuzhiyun 		status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
3772*4882a593Smuzhiyun 		if (status)
3773*4882a593Smuzhiyun 			return status;
3774*4882a593Smuzhiyun 
3775*4882a593Smuzhiyun 		/* Delete the old programmed MAC as we successfully programmed
3776*4882a593Smuzhiyun 		 * a new MAC
3777*4882a593Smuzhiyun 		 */
3778*4882a593Smuzhiyun 		if (old_pmac_id >= 0 && old_pmac_id != adapter->pmac_id[0])
3779*4882a593Smuzhiyun 			be_dev_mac_del(adapter, old_pmac_id);
3780*4882a593Smuzhiyun 
3781*4882a593Smuzhiyun 		ether_addr_copy(adapter->dev_mac, adapter->netdev->dev_addr);
3782*4882a593Smuzhiyun 	}
3783*4882a593Smuzhiyun 
3784*4882a593Smuzhiyun 	if (adapter->vlans_added)
3785*4882a593Smuzhiyun 		be_vid_config(adapter);
3786*4882a593Smuzhiyun 
3787*4882a593Smuzhiyun 	__be_set_rx_mode(adapter);
3788*4882a593Smuzhiyun 
3789*4882a593Smuzhiyun 	return 0;
3790*4882a593Smuzhiyun }
3791*4882a593Smuzhiyun 
be_open(struct net_device * netdev)3792*4882a593Smuzhiyun static int be_open(struct net_device *netdev)
3793*4882a593Smuzhiyun {
3794*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
3795*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
3796*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
3797*4882a593Smuzhiyun 	struct be_tx_obj *txo;
3798*4882a593Smuzhiyun 	u8 link_status;
3799*4882a593Smuzhiyun 	int status, i;
3800*4882a593Smuzhiyun 
3801*4882a593Smuzhiyun 	status = be_rx_qs_create(adapter);
3802*4882a593Smuzhiyun 	if (status)
3803*4882a593Smuzhiyun 		goto err;
3804*4882a593Smuzhiyun 
3805*4882a593Smuzhiyun 	status = be_enable_if_filters(adapter);
3806*4882a593Smuzhiyun 	if (status)
3807*4882a593Smuzhiyun 		goto err;
3808*4882a593Smuzhiyun 
3809*4882a593Smuzhiyun 	status = be_irq_register(adapter);
3810*4882a593Smuzhiyun 	if (status)
3811*4882a593Smuzhiyun 		goto err;
3812*4882a593Smuzhiyun 
3813*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i)
3814*4882a593Smuzhiyun 		be_cq_notify(adapter, rxo->cq.id, true, 0);
3815*4882a593Smuzhiyun 
3816*4882a593Smuzhiyun 	for_all_tx_queues(adapter, txo, i)
3817*4882a593Smuzhiyun 		be_cq_notify(adapter, txo->cq.id, true, 0);
3818*4882a593Smuzhiyun 
3819*4882a593Smuzhiyun 	be_async_mcc_enable(adapter);
3820*4882a593Smuzhiyun 
3821*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
3822*4882a593Smuzhiyun 		napi_enable(&eqo->napi);
3823*4882a593Smuzhiyun 		be_eq_notify(adapter, eqo->q.id, true, true, 0, 0);
3824*4882a593Smuzhiyun 	}
3825*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_NAPI_ENABLED;
3826*4882a593Smuzhiyun 
3827*4882a593Smuzhiyun 	status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
3828*4882a593Smuzhiyun 	if (!status)
3829*4882a593Smuzhiyun 		be_link_status_update(adapter, link_status);
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun 	netif_tx_start_all_queues(netdev);
3832*4882a593Smuzhiyun 
3833*4882a593Smuzhiyun 	udp_tunnel_nic_reset_ntf(netdev);
3834*4882a593Smuzhiyun 
3835*4882a593Smuzhiyun 	return 0;
3836*4882a593Smuzhiyun err:
3837*4882a593Smuzhiyun 	be_close(adapter->netdev);
3838*4882a593Smuzhiyun 	return -EIO;
3839*4882a593Smuzhiyun }
3840*4882a593Smuzhiyun 
be_vf_eth_addr_generate(struct be_adapter * adapter,u8 * mac)3841*4882a593Smuzhiyun static void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
3842*4882a593Smuzhiyun {
3843*4882a593Smuzhiyun 	u32 addr;
3844*4882a593Smuzhiyun 
3845*4882a593Smuzhiyun 	addr = jhash(adapter->netdev->dev_addr, ETH_ALEN, 0);
3846*4882a593Smuzhiyun 
3847*4882a593Smuzhiyun 	mac[5] = (u8)(addr & 0xFF);
3848*4882a593Smuzhiyun 	mac[4] = (u8)((addr >> 8) & 0xFF);
3849*4882a593Smuzhiyun 	mac[3] = (u8)((addr >> 16) & 0xFF);
3850*4882a593Smuzhiyun 	/* Use the OUI from the current MAC address */
3851*4882a593Smuzhiyun 	memcpy(mac, adapter->netdev->dev_addr, 3);
3852*4882a593Smuzhiyun }
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun /*
3855*4882a593Smuzhiyun  * Generate a seed MAC address from the PF MAC Address using jhash.
3856*4882a593Smuzhiyun  * MAC Address for VFs are assigned incrementally starting from the seed.
3857*4882a593Smuzhiyun  * These addresses are programmed in the ASIC by the PF and the VF driver
3858*4882a593Smuzhiyun  * queries for the MAC address during its probe.
3859*4882a593Smuzhiyun  */
be_vf_eth_addr_config(struct be_adapter * adapter)3860*4882a593Smuzhiyun static int be_vf_eth_addr_config(struct be_adapter *adapter)
3861*4882a593Smuzhiyun {
3862*4882a593Smuzhiyun 	u32 vf;
3863*4882a593Smuzhiyun 	int status = 0;
3864*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
3865*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg;
3866*4882a593Smuzhiyun 
3867*4882a593Smuzhiyun 	be_vf_eth_addr_generate(adapter, mac);
3868*4882a593Smuzhiyun 
3869*4882a593Smuzhiyun 	for_all_vfs(adapter, vf_cfg, vf) {
3870*4882a593Smuzhiyun 		if (BEx_chip(adapter))
3871*4882a593Smuzhiyun 			status = be_cmd_pmac_add(adapter, mac,
3872*4882a593Smuzhiyun 						 vf_cfg->if_handle,
3873*4882a593Smuzhiyun 						 &vf_cfg->pmac_id, vf + 1);
3874*4882a593Smuzhiyun 		else
3875*4882a593Smuzhiyun 			status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3876*4882a593Smuzhiyun 						vf + 1);
3877*4882a593Smuzhiyun 
3878*4882a593Smuzhiyun 		if (status)
3879*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
3880*4882a593Smuzhiyun 				"Mac address assignment failed for VF %d\n",
3881*4882a593Smuzhiyun 				vf);
3882*4882a593Smuzhiyun 		else
3883*4882a593Smuzhiyun 			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3884*4882a593Smuzhiyun 
3885*4882a593Smuzhiyun 		mac[5] += 1;
3886*4882a593Smuzhiyun 	}
3887*4882a593Smuzhiyun 	return status;
3888*4882a593Smuzhiyun }
3889*4882a593Smuzhiyun 
be_vfs_mac_query(struct be_adapter * adapter)3890*4882a593Smuzhiyun static int be_vfs_mac_query(struct be_adapter *adapter)
3891*4882a593Smuzhiyun {
3892*4882a593Smuzhiyun 	int status, vf;
3893*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
3894*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg;
3895*4882a593Smuzhiyun 
3896*4882a593Smuzhiyun 	for_all_vfs(adapter, vf_cfg, vf) {
3897*4882a593Smuzhiyun 		status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3898*4882a593Smuzhiyun 					       mac, vf_cfg->if_handle,
3899*4882a593Smuzhiyun 					       false, vf+1);
3900*4882a593Smuzhiyun 		if (status)
3901*4882a593Smuzhiyun 			return status;
3902*4882a593Smuzhiyun 		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3903*4882a593Smuzhiyun 	}
3904*4882a593Smuzhiyun 	return 0;
3905*4882a593Smuzhiyun }
3906*4882a593Smuzhiyun 
be_vf_clear(struct be_adapter * adapter)3907*4882a593Smuzhiyun static void be_vf_clear(struct be_adapter *adapter)
3908*4882a593Smuzhiyun {
3909*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg;
3910*4882a593Smuzhiyun 	u32 vf;
3911*4882a593Smuzhiyun 
3912*4882a593Smuzhiyun 	if (pci_vfs_assigned(adapter->pdev)) {
3913*4882a593Smuzhiyun 		dev_warn(&adapter->pdev->dev,
3914*4882a593Smuzhiyun 			 "VFs are assigned to VMs: not disabling VFs\n");
3915*4882a593Smuzhiyun 		goto done;
3916*4882a593Smuzhiyun 	}
3917*4882a593Smuzhiyun 
3918*4882a593Smuzhiyun 	pci_disable_sriov(adapter->pdev);
3919*4882a593Smuzhiyun 
3920*4882a593Smuzhiyun 	for_all_vfs(adapter, vf_cfg, vf) {
3921*4882a593Smuzhiyun 		if (BEx_chip(adapter))
3922*4882a593Smuzhiyun 			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3923*4882a593Smuzhiyun 					vf_cfg->pmac_id, vf + 1);
3924*4882a593Smuzhiyun 		else
3925*4882a593Smuzhiyun 			be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3926*4882a593Smuzhiyun 				       vf + 1);
3927*4882a593Smuzhiyun 
3928*4882a593Smuzhiyun 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3929*4882a593Smuzhiyun 	}
3930*4882a593Smuzhiyun 
3931*4882a593Smuzhiyun 	if (BE3_chip(adapter))
3932*4882a593Smuzhiyun 		be_cmd_set_hsw_config(adapter, 0, 0,
3933*4882a593Smuzhiyun 				      adapter->if_handle,
3934*4882a593Smuzhiyun 				      PORT_FWD_TYPE_PASSTHRU, 0);
3935*4882a593Smuzhiyun done:
3936*4882a593Smuzhiyun 	kfree(adapter->vf_cfg);
3937*4882a593Smuzhiyun 	adapter->num_vfs = 0;
3938*4882a593Smuzhiyun 	adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3939*4882a593Smuzhiyun }
3940*4882a593Smuzhiyun 
be_clear_queues(struct be_adapter * adapter)3941*4882a593Smuzhiyun static void be_clear_queues(struct be_adapter *adapter)
3942*4882a593Smuzhiyun {
3943*4882a593Smuzhiyun 	be_mcc_queues_destroy(adapter);
3944*4882a593Smuzhiyun 	be_rx_cqs_destroy(adapter);
3945*4882a593Smuzhiyun 	be_tx_queues_destroy(adapter);
3946*4882a593Smuzhiyun 	be_evt_queues_destroy(adapter);
3947*4882a593Smuzhiyun }
3948*4882a593Smuzhiyun 
be_cancel_worker(struct be_adapter * adapter)3949*4882a593Smuzhiyun static void be_cancel_worker(struct be_adapter *adapter)
3950*4882a593Smuzhiyun {
3951*4882a593Smuzhiyun 	if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3952*4882a593Smuzhiyun 		cancel_delayed_work_sync(&adapter->work);
3953*4882a593Smuzhiyun 		adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3954*4882a593Smuzhiyun 	}
3955*4882a593Smuzhiyun }
3956*4882a593Smuzhiyun 
be_cancel_err_detection(struct be_adapter * adapter)3957*4882a593Smuzhiyun static void be_cancel_err_detection(struct be_adapter *adapter)
3958*4882a593Smuzhiyun {
3959*4882a593Smuzhiyun 	struct be_error_recovery *err_rec = &adapter->error_recovery;
3960*4882a593Smuzhiyun 
3961*4882a593Smuzhiyun 	if (!be_err_recovery_workq)
3962*4882a593Smuzhiyun 		return;
3963*4882a593Smuzhiyun 
3964*4882a593Smuzhiyun 	if (adapter->flags & BE_FLAGS_ERR_DETECTION_SCHEDULED) {
3965*4882a593Smuzhiyun 		cancel_delayed_work_sync(&err_rec->err_detection_work);
3966*4882a593Smuzhiyun 		adapter->flags &= ~BE_FLAGS_ERR_DETECTION_SCHEDULED;
3967*4882a593Smuzhiyun 	}
3968*4882a593Smuzhiyun }
3969*4882a593Smuzhiyun 
3970*4882a593Smuzhiyun /* VxLAN offload Notes:
3971*4882a593Smuzhiyun  *
3972*4882a593Smuzhiyun  * The stack defines tunnel offload flags (hw_enc_features) for IP and doesn't
3973*4882a593Smuzhiyun  * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
3974*4882a593Smuzhiyun  * is expected to work across all types of IP tunnels once exported. Skyhawk
3975*4882a593Smuzhiyun  * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
3976*4882a593Smuzhiyun  * offloads in hw_enc_features only when a VxLAN port is added. If other (non
3977*4882a593Smuzhiyun  * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
3978*4882a593Smuzhiyun  * those other tunnels are unexported on the fly through ndo_features_check().
3979*4882a593Smuzhiyun  */
be_vxlan_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)3980*4882a593Smuzhiyun static int be_vxlan_set_port(struct net_device *netdev, unsigned int table,
3981*4882a593Smuzhiyun 			     unsigned int entry, struct udp_tunnel_info *ti)
3982*4882a593Smuzhiyun {
3983*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
3984*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
3985*4882a593Smuzhiyun 	int status;
3986*4882a593Smuzhiyun 
3987*4882a593Smuzhiyun 	status = be_cmd_manage_iface(adapter, adapter->if_handle,
3988*4882a593Smuzhiyun 				     OP_CONVERT_NORMAL_TO_TUNNEL);
3989*4882a593Smuzhiyun 	if (status) {
3990*4882a593Smuzhiyun 		dev_warn(dev, "Failed to convert normal interface to tunnel\n");
3991*4882a593Smuzhiyun 		return status;
3992*4882a593Smuzhiyun 	}
3993*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
3994*4882a593Smuzhiyun 
3995*4882a593Smuzhiyun 	status = be_cmd_set_vxlan_port(adapter, ti->port);
3996*4882a593Smuzhiyun 	if (status) {
3997*4882a593Smuzhiyun 		dev_warn(dev, "Failed to add VxLAN port\n");
3998*4882a593Smuzhiyun 		return status;
3999*4882a593Smuzhiyun 	}
4000*4882a593Smuzhiyun 	adapter->vxlan_port = ti->port;
4001*4882a593Smuzhiyun 
4002*4882a593Smuzhiyun 	netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4003*4882a593Smuzhiyun 				   NETIF_F_TSO | NETIF_F_TSO6 |
4004*4882a593Smuzhiyun 				   NETIF_F_GSO_UDP_TUNNEL;
4005*4882a593Smuzhiyun 
4006*4882a593Smuzhiyun 	dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4007*4882a593Smuzhiyun 		 be16_to_cpu(ti->port));
4008*4882a593Smuzhiyun 	return 0;
4009*4882a593Smuzhiyun }
4010*4882a593Smuzhiyun 
be_vxlan_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)4011*4882a593Smuzhiyun static int be_vxlan_unset_port(struct net_device *netdev, unsigned int table,
4012*4882a593Smuzhiyun 			       unsigned int entry, struct udp_tunnel_info *ti)
4013*4882a593Smuzhiyun {
4014*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
4015*4882a593Smuzhiyun 
4016*4882a593Smuzhiyun 	if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
4017*4882a593Smuzhiyun 		be_cmd_manage_iface(adapter, adapter->if_handle,
4018*4882a593Smuzhiyun 				    OP_CONVERT_TUNNEL_TO_NORMAL);
4019*4882a593Smuzhiyun 
4020*4882a593Smuzhiyun 	if (adapter->vxlan_port)
4021*4882a593Smuzhiyun 		be_cmd_set_vxlan_port(adapter, 0);
4022*4882a593Smuzhiyun 
4023*4882a593Smuzhiyun 	adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
4024*4882a593Smuzhiyun 	adapter->vxlan_port = 0;
4025*4882a593Smuzhiyun 
4026*4882a593Smuzhiyun 	netdev->hw_enc_features = 0;
4027*4882a593Smuzhiyun 	return 0;
4028*4882a593Smuzhiyun }
4029*4882a593Smuzhiyun 
4030*4882a593Smuzhiyun static const struct udp_tunnel_nic_info be_udp_tunnels = {
4031*4882a593Smuzhiyun 	.set_port	= be_vxlan_set_port,
4032*4882a593Smuzhiyun 	.unset_port	= be_vxlan_unset_port,
4033*4882a593Smuzhiyun 	.flags		= UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
4034*4882a593Smuzhiyun 			  UDP_TUNNEL_NIC_INFO_OPEN_ONLY,
4035*4882a593Smuzhiyun 	.tables		= {
4036*4882a593Smuzhiyun 		{ .n_entries = 1, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
4037*4882a593Smuzhiyun 	},
4038*4882a593Smuzhiyun };
4039*4882a593Smuzhiyun 
be_calculate_vf_res(struct be_adapter * adapter,u16 num_vfs,struct be_resources * vft_res)4040*4882a593Smuzhiyun static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
4041*4882a593Smuzhiyun 				struct be_resources *vft_res)
4042*4882a593Smuzhiyun {
4043*4882a593Smuzhiyun 	struct be_resources res = adapter->pool_res;
4044*4882a593Smuzhiyun 	u32 vf_if_cap_flags = res.vf_if_cap_flags;
4045*4882a593Smuzhiyun 	struct be_resources res_mod = {0};
4046*4882a593Smuzhiyun 	u16 num_vf_qs = 1;
4047*4882a593Smuzhiyun 
4048*4882a593Smuzhiyun 	/* Distribute the queue resources among the PF and it's VFs */
4049*4882a593Smuzhiyun 	if (num_vfs) {
4050*4882a593Smuzhiyun 		/* Divide the rx queues evenly among the VFs and the PF, capped
4051*4882a593Smuzhiyun 		 * at VF-EQ-count. Any remainder queues belong to the PF.
4052*4882a593Smuzhiyun 		 */
4053*4882a593Smuzhiyun 		num_vf_qs = min(SH_VF_MAX_NIC_EQS,
4054*4882a593Smuzhiyun 				res.max_rss_qs / (num_vfs + 1));
4055*4882a593Smuzhiyun 
4056*4882a593Smuzhiyun 		/* Skyhawk-R chip supports only MAX_PORT_RSS_TABLES
4057*4882a593Smuzhiyun 		 * RSS Tables per port. Provide RSS on VFs, only if number of
4058*4882a593Smuzhiyun 		 * VFs requested is less than it's PF Pool's RSS Tables limit.
4059*4882a593Smuzhiyun 		 */
4060*4882a593Smuzhiyun 		if (num_vfs >= be_max_pf_pool_rss_tables(adapter))
4061*4882a593Smuzhiyun 			num_vf_qs = 1;
4062*4882a593Smuzhiyun 	}
4063*4882a593Smuzhiyun 
4064*4882a593Smuzhiyun 	/* Resource with fields set to all '1's by GET_PROFILE_CONFIG cmd,
4065*4882a593Smuzhiyun 	 * which are modifiable using SET_PROFILE_CONFIG cmd.
4066*4882a593Smuzhiyun 	 */
4067*4882a593Smuzhiyun 	be_cmd_get_profile_config(adapter, &res_mod, NULL, ACTIVE_PROFILE_TYPE,
4068*4882a593Smuzhiyun 				  RESOURCE_MODIFIABLE, 0);
4069*4882a593Smuzhiyun 
4070*4882a593Smuzhiyun 	/* If RSS IFACE capability flags are modifiable for a VF, set the
4071*4882a593Smuzhiyun 	 * capability flag as valid and set RSS and DEFQ_RSS IFACE flags if
4072*4882a593Smuzhiyun 	 * more than 1 RSSQ is available for a VF.
4073*4882a593Smuzhiyun 	 * Otherwise, provision only 1 queue pair for VF.
4074*4882a593Smuzhiyun 	 */
4075*4882a593Smuzhiyun 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
4076*4882a593Smuzhiyun 		vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4077*4882a593Smuzhiyun 		if (num_vf_qs > 1) {
4078*4882a593Smuzhiyun 			vf_if_cap_flags |= BE_IF_FLAGS_RSS;
4079*4882a593Smuzhiyun 			if (res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS)
4080*4882a593Smuzhiyun 				vf_if_cap_flags |= BE_IF_FLAGS_DEFQ_RSS;
4081*4882a593Smuzhiyun 		} else {
4082*4882a593Smuzhiyun 			vf_if_cap_flags &= ~(BE_IF_FLAGS_RSS |
4083*4882a593Smuzhiyun 					     BE_IF_FLAGS_DEFQ_RSS);
4084*4882a593Smuzhiyun 		}
4085*4882a593Smuzhiyun 	} else {
4086*4882a593Smuzhiyun 		num_vf_qs = 1;
4087*4882a593Smuzhiyun 	}
4088*4882a593Smuzhiyun 
4089*4882a593Smuzhiyun 	if (res_mod.vf_if_cap_flags & BE_IF_FLAGS_VLAN_PROMISCUOUS) {
4090*4882a593Smuzhiyun 		vft_res->flags |= BIT(IF_CAPS_FLAGS_VALID_SHIFT);
4091*4882a593Smuzhiyun 		vf_if_cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4092*4882a593Smuzhiyun 	}
4093*4882a593Smuzhiyun 
4094*4882a593Smuzhiyun 	vft_res->vf_if_cap_flags = vf_if_cap_flags;
4095*4882a593Smuzhiyun 	vft_res->max_rx_qs = num_vf_qs;
4096*4882a593Smuzhiyun 	vft_res->max_rss_qs = num_vf_qs;
4097*4882a593Smuzhiyun 	vft_res->max_tx_qs = res.max_tx_qs / (num_vfs + 1);
4098*4882a593Smuzhiyun 	vft_res->max_cq_count = res.max_cq_count / (num_vfs + 1);
4099*4882a593Smuzhiyun 
4100*4882a593Smuzhiyun 	/* Distribute unicast MACs, VLANs, IFACE count and MCCQ count equally
4101*4882a593Smuzhiyun 	 * among the PF and it's VFs, if the fields are changeable
4102*4882a593Smuzhiyun 	 */
4103*4882a593Smuzhiyun 	if (res_mod.max_uc_mac == FIELD_MODIFIABLE)
4104*4882a593Smuzhiyun 		vft_res->max_uc_mac = res.max_uc_mac / (num_vfs + 1);
4105*4882a593Smuzhiyun 
4106*4882a593Smuzhiyun 	if (res_mod.max_vlans == FIELD_MODIFIABLE)
4107*4882a593Smuzhiyun 		vft_res->max_vlans = res.max_vlans / (num_vfs + 1);
4108*4882a593Smuzhiyun 
4109*4882a593Smuzhiyun 	if (res_mod.max_iface_count == FIELD_MODIFIABLE)
4110*4882a593Smuzhiyun 		vft_res->max_iface_count = res.max_iface_count / (num_vfs + 1);
4111*4882a593Smuzhiyun 
4112*4882a593Smuzhiyun 	if (res_mod.max_mcc_count == FIELD_MODIFIABLE)
4113*4882a593Smuzhiyun 		vft_res->max_mcc_count = res.max_mcc_count / (num_vfs + 1);
4114*4882a593Smuzhiyun }
4115*4882a593Smuzhiyun 
be_if_destroy(struct be_adapter * adapter)4116*4882a593Smuzhiyun static void be_if_destroy(struct be_adapter *adapter)
4117*4882a593Smuzhiyun {
4118*4882a593Smuzhiyun 	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
4119*4882a593Smuzhiyun 
4120*4882a593Smuzhiyun 	kfree(adapter->pmac_id);
4121*4882a593Smuzhiyun 	adapter->pmac_id = NULL;
4122*4882a593Smuzhiyun 
4123*4882a593Smuzhiyun 	kfree(adapter->mc_list);
4124*4882a593Smuzhiyun 	adapter->mc_list = NULL;
4125*4882a593Smuzhiyun 
4126*4882a593Smuzhiyun 	kfree(adapter->uc_list);
4127*4882a593Smuzhiyun 	adapter->uc_list = NULL;
4128*4882a593Smuzhiyun }
4129*4882a593Smuzhiyun 
be_clear(struct be_adapter * adapter)4130*4882a593Smuzhiyun static int be_clear(struct be_adapter *adapter)
4131*4882a593Smuzhiyun {
4132*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
4133*4882a593Smuzhiyun 	struct  be_resources vft_res = {0};
4134*4882a593Smuzhiyun 
4135*4882a593Smuzhiyun 	be_cancel_worker(adapter);
4136*4882a593Smuzhiyun 
4137*4882a593Smuzhiyun 	flush_workqueue(be_wq);
4138*4882a593Smuzhiyun 
4139*4882a593Smuzhiyun 	if (sriov_enabled(adapter))
4140*4882a593Smuzhiyun 		be_vf_clear(adapter);
4141*4882a593Smuzhiyun 
4142*4882a593Smuzhiyun 	/* Re-configure FW to distribute resources evenly across max-supported
4143*4882a593Smuzhiyun 	 * number of VFs, only when VFs are not already enabled.
4144*4882a593Smuzhiyun 	 */
4145*4882a593Smuzhiyun 	if (skyhawk_chip(adapter) && be_physfn(adapter) &&
4146*4882a593Smuzhiyun 	    !pci_vfs_assigned(pdev)) {
4147*4882a593Smuzhiyun 		be_calculate_vf_res(adapter,
4148*4882a593Smuzhiyun 				    pci_sriov_get_totalvfs(pdev),
4149*4882a593Smuzhiyun 				    &vft_res);
4150*4882a593Smuzhiyun 		be_cmd_set_sriov_config(adapter, adapter->pool_res,
4151*4882a593Smuzhiyun 					pci_sriov_get_totalvfs(pdev),
4152*4882a593Smuzhiyun 					&vft_res);
4153*4882a593Smuzhiyun 	}
4154*4882a593Smuzhiyun 
4155*4882a593Smuzhiyun 	be_vxlan_unset_port(adapter->netdev, 0, 0, NULL);
4156*4882a593Smuzhiyun 
4157*4882a593Smuzhiyun 	be_if_destroy(adapter);
4158*4882a593Smuzhiyun 
4159*4882a593Smuzhiyun 	be_clear_queues(adapter);
4160*4882a593Smuzhiyun 
4161*4882a593Smuzhiyun 	be_msix_disable(adapter);
4162*4882a593Smuzhiyun 	adapter->flags &= ~BE_FLAGS_SETUP_DONE;
4163*4882a593Smuzhiyun 	return 0;
4164*4882a593Smuzhiyun }
4165*4882a593Smuzhiyun 
be_vfs_if_create(struct be_adapter * adapter)4166*4882a593Smuzhiyun static int be_vfs_if_create(struct be_adapter *adapter)
4167*4882a593Smuzhiyun {
4168*4882a593Smuzhiyun 	struct be_resources res = {0};
4169*4882a593Smuzhiyun 	u32 cap_flags, en_flags, vf;
4170*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg;
4171*4882a593Smuzhiyun 	int status;
4172*4882a593Smuzhiyun 
4173*4882a593Smuzhiyun 	/* If a FW profile exists, then cap_flags are updated */
4174*4882a593Smuzhiyun 	cap_flags = BE_VF_IF_EN_FLAGS;
4175*4882a593Smuzhiyun 
4176*4882a593Smuzhiyun 	for_all_vfs(adapter, vf_cfg, vf) {
4177*4882a593Smuzhiyun 		if (!BE3_chip(adapter)) {
4178*4882a593Smuzhiyun 			status = be_cmd_get_profile_config(adapter, &res, NULL,
4179*4882a593Smuzhiyun 							   ACTIVE_PROFILE_TYPE,
4180*4882a593Smuzhiyun 							   RESOURCE_LIMITS,
4181*4882a593Smuzhiyun 							   vf + 1);
4182*4882a593Smuzhiyun 			if (!status) {
4183*4882a593Smuzhiyun 				cap_flags = res.if_cap_flags;
4184*4882a593Smuzhiyun 				/* Prevent VFs from enabling VLAN promiscuous
4185*4882a593Smuzhiyun 				 * mode
4186*4882a593Smuzhiyun 				 */
4187*4882a593Smuzhiyun 				cap_flags &= ~BE_IF_FLAGS_VLAN_PROMISCUOUS;
4188*4882a593Smuzhiyun 			}
4189*4882a593Smuzhiyun 		}
4190*4882a593Smuzhiyun 
4191*4882a593Smuzhiyun 		/* PF should enable IF flags during proxy if_create call */
4192*4882a593Smuzhiyun 		en_flags = cap_flags & BE_VF_IF_EN_FLAGS;
4193*4882a593Smuzhiyun 		status = be_cmd_if_create(adapter, cap_flags, en_flags,
4194*4882a593Smuzhiyun 					  &vf_cfg->if_handle, vf + 1);
4195*4882a593Smuzhiyun 		if (status)
4196*4882a593Smuzhiyun 			return status;
4197*4882a593Smuzhiyun 	}
4198*4882a593Smuzhiyun 
4199*4882a593Smuzhiyun 	return 0;
4200*4882a593Smuzhiyun }
4201*4882a593Smuzhiyun 
be_vf_setup_init(struct be_adapter * adapter)4202*4882a593Smuzhiyun static int be_vf_setup_init(struct be_adapter *adapter)
4203*4882a593Smuzhiyun {
4204*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg;
4205*4882a593Smuzhiyun 	int vf;
4206*4882a593Smuzhiyun 
4207*4882a593Smuzhiyun 	adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
4208*4882a593Smuzhiyun 				  GFP_KERNEL);
4209*4882a593Smuzhiyun 	if (!adapter->vf_cfg)
4210*4882a593Smuzhiyun 		return -ENOMEM;
4211*4882a593Smuzhiyun 
4212*4882a593Smuzhiyun 	for_all_vfs(adapter, vf_cfg, vf) {
4213*4882a593Smuzhiyun 		vf_cfg->if_handle = -1;
4214*4882a593Smuzhiyun 		vf_cfg->pmac_id = -1;
4215*4882a593Smuzhiyun 	}
4216*4882a593Smuzhiyun 	return 0;
4217*4882a593Smuzhiyun }
4218*4882a593Smuzhiyun 
be_vf_setup(struct be_adapter * adapter)4219*4882a593Smuzhiyun static int be_vf_setup(struct be_adapter *adapter)
4220*4882a593Smuzhiyun {
4221*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
4222*4882a593Smuzhiyun 	struct be_vf_cfg *vf_cfg;
4223*4882a593Smuzhiyun 	int status, old_vfs, vf;
4224*4882a593Smuzhiyun 	bool spoofchk;
4225*4882a593Smuzhiyun 
4226*4882a593Smuzhiyun 	old_vfs = pci_num_vf(adapter->pdev);
4227*4882a593Smuzhiyun 
4228*4882a593Smuzhiyun 	status = be_vf_setup_init(adapter);
4229*4882a593Smuzhiyun 	if (status)
4230*4882a593Smuzhiyun 		goto err;
4231*4882a593Smuzhiyun 
4232*4882a593Smuzhiyun 	if (old_vfs) {
4233*4882a593Smuzhiyun 		for_all_vfs(adapter, vf_cfg, vf) {
4234*4882a593Smuzhiyun 			status = be_cmd_get_if_id(adapter, vf_cfg, vf);
4235*4882a593Smuzhiyun 			if (status)
4236*4882a593Smuzhiyun 				goto err;
4237*4882a593Smuzhiyun 		}
4238*4882a593Smuzhiyun 
4239*4882a593Smuzhiyun 		status = be_vfs_mac_query(adapter);
4240*4882a593Smuzhiyun 		if (status)
4241*4882a593Smuzhiyun 			goto err;
4242*4882a593Smuzhiyun 	} else {
4243*4882a593Smuzhiyun 		status = be_vfs_if_create(adapter);
4244*4882a593Smuzhiyun 		if (status)
4245*4882a593Smuzhiyun 			goto err;
4246*4882a593Smuzhiyun 
4247*4882a593Smuzhiyun 		status = be_vf_eth_addr_config(adapter);
4248*4882a593Smuzhiyun 		if (status)
4249*4882a593Smuzhiyun 			goto err;
4250*4882a593Smuzhiyun 	}
4251*4882a593Smuzhiyun 
4252*4882a593Smuzhiyun 	for_all_vfs(adapter, vf_cfg, vf) {
4253*4882a593Smuzhiyun 		/* Allow VFs to programs MAC/VLAN filters */
4254*4882a593Smuzhiyun 		status = be_cmd_get_fn_privileges(adapter, &vf_cfg->privileges,
4255*4882a593Smuzhiyun 						  vf + 1);
4256*4882a593Smuzhiyun 		if (!status && !(vf_cfg->privileges & BE_PRIV_FILTMGMT)) {
4257*4882a593Smuzhiyun 			status = be_cmd_set_fn_privileges(adapter,
4258*4882a593Smuzhiyun 							  vf_cfg->privileges |
4259*4882a593Smuzhiyun 							  BE_PRIV_FILTMGMT,
4260*4882a593Smuzhiyun 							  vf + 1);
4261*4882a593Smuzhiyun 			if (!status) {
4262*4882a593Smuzhiyun 				vf_cfg->privileges |= BE_PRIV_FILTMGMT;
4263*4882a593Smuzhiyun 				dev_info(dev, "VF%d has FILTMGMT privilege\n",
4264*4882a593Smuzhiyun 					 vf);
4265*4882a593Smuzhiyun 			}
4266*4882a593Smuzhiyun 		}
4267*4882a593Smuzhiyun 
4268*4882a593Smuzhiyun 		/* Allow full available bandwidth */
4269*4882a593Smuzhiyun 		if (!old_vfs)
4270*4882a593Smuzhiyun 			be_cmd_config_qos(adapter, 0, 0, vf + 1);
4271*4882a593Smuzhiyun 
4272*4882a593Smuzhiyun 		status = be_cmd_get_hsw_config(adapter, NULL, vf + 1,
4273*4882a593Smuzhiyun 					       vf_cfg->if_handle, NULL,
4274*4882a593Smuzhiyun 					       &spoofchk);
4275*4882a593Smuzhiyun 		if (!status)
4276*4882a593Smuzhiyun 			vf_cfg->spoofchk = spoofchk;
4277*4882a593Smuzhiyun 
4278*4882a593Smuzhiyun 		if (!old_vfs) {
4279*4882a593Smuzhiyun 			be_cmd_enable_vf(adapter, vf + 1);
4280*4882a593Smuzhiyun 			be_cmd_set_logical_link_config(adapter,
4281*4882a593Smuzhiyun 						       IFLA_VF_LINK_STATE_AUTO,
4282*4882a593Smuzhiyun 						       vf+1);
4283*4882a593Smuzhiyun 		}
4284*4882a593Smuzhiyun 	}
4285*4882a593Smuzhiyun 
4286*4882a593Smuzhiyun 	if (!old_vfs) {
4287*4882a593Smuzhiyun 		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
4288*4882a593Smuzhiyun 		if (status) {
4289*4882a593Smuzhiyun 			dev_err(dev, "SRIOV enable failed\n");
4290*4882a593Smuzhiyun 			adapter->num_vfs = 0;
4291*4882a593Smuzhiyun 			goto err;
4292*4882a593Smuzhiyun 		}
4293*4882a593Smuzhiyun 	}
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 	if (BE3_chip(adapter)) {
4296*4882a593Smuzhiyun 		/* On BE3, enable VEB only when SRIOV is enabled */
4297*4882a593Smuzhiyun 		status = be_cmd_set_hsw_config(adapter, 0, 0,
4298*4882a593Smuzhiyun 					       adapter->if_handle,
4299*4882a593Smuzhiyun 					       PORT_FWD_TYPE_VEB, 0);
4300*4882a593Smuzhiyun 		if (status)
4301*4882a593Smuzhiyun 			goto err;
4302*4882a593Smuzhiyun 	}
4303*4882a593Smuzhiyun 
4304*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
4305*4882a593Smuzhiyun 	return 0;
4306*4882a593Smuzhiyun err:
4307*4882a593Smuzhiyun 	dev_err(dev, "VF setup failed\n");
4308*4882a593Smuzhiyun 	be_vf_clear(adapter);
4309*4882a593Smuzhiyun 	return status;
4310*4882a593Smuzhiyun }
4311*4882a593Smuzhiyun 
4312*4882a593Smuzhiyun /* Converting function_mode bits on BE3 to SH mc_type enums */
4313*4882a593Smuzhiyun 
be_convert_mc_type(u32 function_mode)4314*4882a593Smuzhiyun static u8 be_convert_mc_type(u32 function_mode)
4315*4882a593Smuzhiyun {
4316*4882a593Smuzhiyun 	if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
4317*4882a593Smuzhiyun 		return vNIC1;
4318*4882a593Smuzhiyun 	else if (function_mode & QNQ_MODE)
4319*4882a593Smuzhiyun 		return FLEX10;
4320*4882a593Smuzhiyun 	else if (function_mode & VNIC_MODE)
4321*4882a593Smuzhiyun 		return vNIC2;
4322*4882a593Smuzhiyun 	else if (function_mode & UMC_ENABLED)
4323*4882a593Smuzhiyun 		return UMC;
4324*4882a593Smuzhiyun 	else
4325*4882a593Smuzhiyun 		return MC_NONE;
4326*4882a593Smuzhiyun }
4327*4882a593Smuzhiyun 
4328*4882a593Smuzhiyun /* On BE2/BE3 FW does not suggest the supported limits */
BEx_get_resources(struct be_adapter * adapter,struct be_resources * res)4329*4882a593Smuzhiyun static void BEx_get_resources(struct be_adapter *adapter,
4330*4882a593Smuzhiyun 			      struct be_resources *res)
4331*4882a593Smuzhiyun {
4332*4882a593Smuzhiyun 	bool use_sriov = adapter->num_vfs ? 1 : 0;
4333*4882a593Smuzhiyun 
4334*4882a593Smuzhiyun 	if (be_physfn(adapter))
4335*4882a593Smuzhiyun 		res->max_uc_mac = BE_UC_PMAC_COUNT;
4336*4882a593Smuzhiyun 	else
4337*4882a593Smuzhiyun 		res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
4338*4882a593Smuzhiyun 
4339*4882a593Smuzhiyun 	adapter->mc_type = be_convert_mc_type(adapter->function_mode);
4340*4882a593Smuzhiyun 
4341*4882a593Smuzhiyun 	if (be_is_mc(adapter)) {
4342*4882a593Smuzhiyun 		/* Assuming that there are 4 channels per port,
4343*4882a593Smuzhiyun 		 * when multi-channel is enabled
4344*4882a593Smuzhiyun 		 */
4345*4882a593Smuzhiyun 		if (be_is_qnq_mode(adapter))
4346*4882a593Smuzhiyun 			res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
4347*4882a593Smuzhiyun 		else
4348*4882a593Smuzhiyun 			/* In a non-qnq multichannel mode, the pvid
4349*4882a593Smuzhiyun 			 * takes up one vlan entry
4350*4882a593Smuzhiyun 			 */
4351*4882a593Smuzhiyun 			res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
4352*4882a593Smuzhiyun 	} else {
4353*4882a593Smuzhiyun 		res->max_vlans = BE_NUM_VLANS_SUPPORTED;
4354*4882a593Smuzhiyun 	}
4355*4882a593Smuzhiyun 
4356*4882a593Smuzhiyun 	res->max_mcast_mac = BE_MAX_MC;
4357*4882a593Smuzhiyun 
4358*4882a593Smuzhiyun 	/* 1) For BE3 1Gb ports, FW does not support multiple TXQs
4359*4882a593Smuzhiyun 	 * 2) Create multiple TX rings on a BE3-R multi-channel interface
4360*4882a593Smuzhiyun 	 *    *only* if it is RSS-capable.
4361*4882a593Smuzhiyun 	 */
4362*4882a593Smuzhiyun 	if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
4363*4882a593Smuzhiyun 	    be_virtfn(adapter) ||
4364*4882a593Smuzhiyun 	    (be_is_mc(adapter) &&
4365*4882a593Smuzhiyun 	     !(adapter->function_caps & BE_FUNCTION_CAPS_RSS))) {
4366*4882a593Smuzhiyun 		res->max_tx_qs = 1;
4367*4882a593Smuzhiyun 	} else if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) {
4368*4882a593Smuzhiyun 		struct be_resources super_nic_res = {0};
4369*4882a593Smuzhiyun 
4370*4882a593Smuzhiyun 		/* On a SuperNIC profile, the driver needs to use the
4371*4882a593Smuzhiyun 		 * GET_PROFILE_CONFIG cmd to query the per-function TXQ limits
4372*4882a593Smuzhiyun 		 */
4373*4882a593Smuzhiyun 		be_cmd_get_profile_config(adapter, &super_nic_res, NULL,
4374*4882a593Smuzhiyun 					  ACTIVE_PROFILE_TYPE, RESOURCE_LIMITS,
4375*4882a593Smuzhiyun 					  0);
4376*4882a593Smuzhiyun 		/* Some old versions of BE3 FW don't report max_tx_qs value */
4377*4882a593Smuzhiyun 		res->max_tx_qs = super_nic_res.max_tx_qs ? : BE3_MAX_TX_QS;
4378*4882a593Smuzhiyun 	} else {
4379*4882a593Smuzhiyun 		res->max_tx_qs = BE3_MAX_TX_QS;
4380*4882a593Smuzhiyun 	}
4381*4882a593Smuzhiyun 
4382*4882a593Smuzhiyun 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
4383*4882a593Smuzhiyun 	    !use_sriov && be_physfn(adapter))
4384*4882a593Smuzhiyun 		res->max_rss_qs = (adapter->be3_native) ?
4385*4882a593Smuzhiyun 					   BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
4386*4882a593Smuzhiyun 	res->max_rx_qs = res->max_rss_qs + 1;
4387*4882a593Smuzhiyun 
4388*4882a593Smuzhiyun 	if (be_physfn(adapter))
4389*4882a593Smuzhiyun 		res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
4390*4882a593Smuzhiyun 					BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
4391*4882a593Smuzhiyun 	else
4392*4882a593Smuzhiyun 		res->max_evt_qs = 1;
4393*4882a593Smuzhiyun 
4394*4882a593Smuzhiyun 	res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
4395*4882a593Smuzhiyun 	res->if_cap_flags &= ~BE_IF_FLAGS_DEFQ_RSS;
4396*4882a593Smuzhiyun 	if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
4397*4882a593Smuzhiyun 		res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
4398*4882a593Smuzhiyun }
4399*4882a593Smuzhiyun 
be_setup_init(struct be_adapter * adapter)4400*4882a593Smuzhiyun static void be_setup_init(struct be_adapter *adapter)
4401*4882a593Smuzhiyun {
4402*4882a593Smuzhiyun 	adapter->vlan_prio_bmap = 0xff;
4403*4882a593Smuzhiyun 	adapter->phy.link_speed = -1;
4404*4882a593Smuzhiyun 	adapter->if_handle = -1;
4405*4882a593Smuzhiyun 	adapter->be3_native = false;
4406*4882a593Smuzhiyun 	adapter->if_flags = 0;
4407*4882a593Smuzhiyun 	adapter->phy_state = BE_UNKNOWN_PHY_STATE;
4408*4882a593Smuzhiyun 	if (be_physfn(adapter))
4409*4882a593Smuzhiyun 		adapter->cmd_privileges = MAX_PRIVILEGES;
4410*4882a593Smuzhiyun 	else
4411*4882a593Smuzhiyun 		adapter->cmd_privileges = MIN_PRIVILEGES;
4412*4882a593Smuzhiyun }
4413*4882a593Smuzhiyun 
4414*4882a593Smuzhiyun /* HW supports only MAX_PORT_RSS_TABLES RSS Policy Tables per port.
4415*4882a593Smuzhiyun  * However, this HW limitation is not exposed to the host via any SLI cmd.
4416*4882a593Smuzhiyun  * As a result, in the case of SRIOV and in particular multi-partition configs
4417*4882a593Smuzhiyun  * the driver needs to calcuate a proportional share of RSS Tables per PF-pool
4418*4882a593Smuzhiyun  * for distribution between the VFs. This self-imposed limit will determine the
4419*4882a593Smuzhiyun  * no: of VFs for which RSS can be enabled.
4420*4882a593Smuzhiyun  */
be_calculate_pf_pool_rss_tables(struct be_adapter * adapter)4421*4882a593Smuzhiyun static void be_calculate_pf_pool_rss_tables(struct be_adapter *adapter)
4422*4882a593Smuzhiyun {
4423*4882a593Smuzhiyun 	struct be_port_resources port_res = {0};
4424*4882a593Smuzhiyun 	u8 rss_tables_on_port;
4425*4882a593Smuzhiyun 	u16 max_vfs = be_max_vfs(adapter);
4426*4882a593Smuzhiyun 
4427*4882a593Smuzhiyun 	be_cmd_get_profile_config(adapter, NULL, &port_res, SAVED_PROFILE_TYPE,
4428*4882a593Smuzhiyun 				  RESOURCE_LIMITS, 0);
4429*4882a593Smuzhiyun 
4430*4882a593Smuzhiyun 	rss_tables_on_port = MAX_PORT_RSS_TABLES - port_res.nic_pfs;
4431*4882a593Smuzhiyun 
4432*4882a593Smuzhiyun 	/* Each PF Pool's RSS Tables limit =
4433*4882a593Smuzhiyun 	 * PF's Max VFs / Total_Max_VFs on Port * RSS Tables on Port
4434*4882a593Smuzhiyun 	 */
4435*4882a593Smuzhiyun 	adapter->pool_res.max_rss_tables =
4436*4882a593Smuzhiyun 		max_vfs * rss_tables_on_port / port_res.max_vfs;
4437*4882a593Smuzhiyun }
4438*4882a593Smuzhiyun 
be_get_sriov_config(struct be_adapter * adapter)4439*4882a593Smuzhiyun static int be_get_sriov_config(struct be_adapter *adapter)
4440*4882a593Smuzhiyun {
4441*4882a593Smuzhiyun 	struct be_resources res = {0};
4442*4882a593Smuzhiyun 	int max_vfs, old_vfs;
4443*4882a593Smuzhiyun 
4444*4882a593Smuzhiyun 	be_cmd_get_profile_config(adapter, &res, NULL, ACTIVE_PROFILE_TYPE,
4445*4882a593Smuzhiyun 				  RESOURCE_LIMITS, 0);
4446*4882a593Smuzhiyun 
4447*4882a593Smuzhiyun 	/* Some old versions of BE3 FW don't report max_vfs value */
4448*4882a593Smuzhiyun 	if (BE3_chip(adapter) && !res.max_vfs) {
4449*4882a593Smuzhiyun 		max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
4450*4882a593Smuzhiyun 		res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
4451*4882a593Smuzhiyun 	}
4452*4882a593Smuzhiyun 
4453*4882a593Smuzhiyun 	adapter->pool_res = res;
4454*4882a593Smuzhiyun 
4455*4882a593Smuzhiyun 	/* If during previous unload of the driver, the VFs were not disabled,
4456*4882a593Smuzhiyun 	 * then we cannot rely on the PF POOL limits for the TotalVFs value.
4457*4882a593Smuzhiyun 	 * Instead use the TotalVFs value stored in the pci-dev struct.
4458*4882a593Smuzhiyun 	 */
4459*4882a593Smuzhiyun 	old_vfs = pci_num_vf(adapter->pdev);
4460*4882a593Smuzhiyun 	if (old_vfs) {
4461*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev, "%d VFs are already enabled\n",
4462*4882a593Smuzhiyun 			 old_vfs);
4463*4882a593Smuzhiyun 
4464*4882a593Smuzhiyun 		adapter->pool_res.max_vfs =
4465*4882a593Smuzhiyun 			pci_sriov_get_totalvfs(adapter->pdev);
4466*4882a593Smuzhiyun 		adapter->num_vfs = old_vfs;
4467*4882a593Smuzhiyun 	}
4468*4882a593Smuzhiyun 
4469*4882a593Smuzhiyun 	if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4470*4882a593Smuzhiyun 		be_calculate_pf_pool_rss_tables(adapter);
4471*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev,
4472*4882a593Smuzhiyun 			 "RSS can be enabled for all VFs if num_vfs <= %d\n",
4473*4882a593Smuzhiyun 			 be_max_pf_pool_rss_tables(adapter));
4474*4882a593Smuzhiyun 	}
4475*4882a593Smuzhiyun 	return 0;
4476*4882a593Smuzhiyun }
4477*4882a593Smuzhiyun 
be_alloc_sriov_res(struct be_adapter * adapter)4478*4882a593Smuzhiyun static void be_alloc_sriov_res(struct be_adapter *adapter)
4479*4882a593Smuzhiyun {
4480*4882a593Smuzhiyun 	int old_vfs = pci_num_vf(adapter->pdev);
4481*4882a593Smuzhiyun 	struct  be_resources vft_res = {0};
4482*4882a593Smuzhiyun 	int status;
4483*4882a593Smuzhiyun 
4484*4882a593Smuzhiyun 	be_get_sriov_config(adapter);
4485*4882a593Smuzhiyun 
4486*4882a593Smuzhiyun 	if (!old_vfs)
4487*4882a593Smuzhiyun 		pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
4488*4882a593Smuzhiyun 
4489*4882a593Smuzhiyun 	/* When the HW is in SRIOV capable configuration, the PF-pool
4490*4882a593Smuzhiyun 	 * resources are given to PF during driver load, if there are no
4491*4882a593Smuzhiyun 	 * old VFs. This facility is not available in BE3 FW.
4492*4882a593Smuzhiyun 	 * Also, this is done by FW in Lancer chip.
4493*4882a593Smuzhiyun 	 */
4494*4882a593Smuzhiyun 	if (skyhawk_chip(adapter) && be_max_vfs(adapter) && !old_vfs) {
4495*4882a593Smuzhiyun 		be_calculate_vf_res(adapter, 0, &vft_res);
4496*4882a593Smuzhiyun 		status = be_cmd_set_sriov_config(adapter, adapter->pool_res, 0,
4497*4882a593Smuzhiyun 						 &vft_res);
4498*4882a593Smuzhiyun 		if (status)
4499*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
4500*4882a593Smuzhiyun 				"Failed to optimize SRIOV resources\n");
4501*4882a593Smuzhiyun 	}
4502*4882a593Smuzhiyun }
4503*4882a593Smuzhiyun 
be_get_resources(struct be_adapter * adapter)4504*4882a593Smuzhiyun static int be_get_resources(struct be_adapter *adapter)
4505*4882a593Smuzhiyun {
4506*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
4507*4882a593Smuzhiyun 	struct be_resources res = {0};
4508*4882a593Smuzhiyun 	int status;
4509*4882a593Smuzhiyun 
4510*4882a593Smuzhiyun 	/* For Lancer, SH etc read per-function resource limits from FW.
4511*4882a593Smuzhiyun 	 * GET_FUNC_CONFIG returns per function guaranteed limits.
4512*4882a593Smuzhiyun 	 * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
4513*4882a593Smuzhiyun 	 */
4514*4882a593Smuzhiyun 	if (BEx_chip(adapter)) {
4515*4882a593Smuzhiyun 		BEx_get_resources(adapter, &res);
4516*4882a593Smuzhiyun 	} else {
4517*4882a593Smuzhiyun 		status = be_cmd_get_func_config(adapter, &res);
4518*4882a593Smuzhiyun 		if (status)
4519*4882a593Smuzhiyun 			return status;
4520*4882a593Smuzhiyun 
4521*4882a593Smuzhiyun 		/* If a deafault RXQ must be created, we'll use up one RSSQ*/
4522*4882a593Smuzhiyun 		if (res.max_rss_qs && res.max_rss_qs == res.max_rx_qs &&
4523*4882a593Smuzhiyun 		    !(res.if_cap_flags & BE_IF_FLAGS_DEFQ_RSS))
4524*4882a593Smuzhiyun 			res.max_rss_qs -= 1;
4525*4882a593Smuzhiyun 	}
4526*4882a593Smuzhiyun 
4527*4882a593Smuzhiyun 	/* If RoCE is supported stash away half the EQs for RoCE */
4528*4882a593Smuzhiyun 	res.max_nic_evt_qs = be_roce_supported(adapter) ?
4529*4882a593Smuzhiyun 				res.max_evt_qs / 2 : res.max_evt_qs;
4530*4882a593Smuzhiyun 	adapter->res = res;
4531*4882a593Smuzhiyun 
4532*4882a593Smuzhiyun 	/* If FW supports RSS default queue, then skip creating non-RSS
4533*4882a593Smuzhiyun 	 * queue for non-IP traffic.
4534*4882a593Smuzhiyun 	 */
4535*4882a593Smuzhiyun 	adapter->need_def_rxq = (be_if_cap_flags(adapter) &
4536*4882a593Smuzhiyun 				 BE_IF_FLAGS_DEFQ_RSS) ? 0 : 1;
4537*4882a593Smuzhiyun 
4538*4882a593Smuzhiyun 	dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
4539*4882a593Smuzhiyun 		 be_max_txqs(adapter), be_max_rxqs(adapter),
4540*4882a593Smuzhiyun 		 be_max_rss(adapter), be_max_nic_eqs(adapter),
4541*4882a593Smuzhiyun 		 be_max_vfs(adapter));
4542*4882a593Smuzhiyun 	dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
4543*4882a593Smuzhiyun 		 be_max_uc(adapter), be_max_mc(adapter),
4544*4882a593Smuzhiyun 		 be_max_vlans(adapter));
4545*4882a593Smuzhiyun 
4546*4882a593Smuzhiyun 	/* Ensure RX and TX queues are created in pairs at init time */
4547*4882a593Smuzhiyun 	adapter->cfg_num_rx_irqs =
4548*4882a593Smuzhiyun 				min_t(u16, netif_get_num_default_rss_queues(),
4549*4882a593Smuzhiyun 				      be_max_qp_irqs(adapter));
4550*4882a593Smuzhiyun 	adapter->cfg_num_tx_irqs = adapter->cfg_num_rx_irqs;
4551*4882a593Smuzhiyun 	return 0;
4552*4882a593Smuzhiyun }
4553*4882a593Smuzhiyun 
be_get_config(struct be_adapter * adapter)4554*4882a593Smuzhiyun static int be_get_config(struct be_adapter *adapter)
4555*4882a593Smuzhiyun {
4556*4882a593Smuzhiyun 	int status, level;
4557*4882a593Smuzhiyun 	u16 profile_id;
4558*4882a593Smuzhiyun 
4559*4882a593Smuzhiyun 	status = be_cmd_get_cntl_attributes(adapter);
4560*4882a593Smuzhiyun 	if (status)
4561*4882a593Smuzhiyun 		return status;
4562*4882a593Smuzhiyun 
4563*4882a593Smuzhiyun 	status = be_cmd_query_fw_cfg(adapter);
4564*4882a593Smuzhiyun 	if (status)
4565*4882a593Smuzhiyun 		return status;
4566*4882a593Smuzhiyun 
4567*4882a593Smuzhiyun 	if (!lancer_chip(adapter) && be_physfn(adapter))
4568*4882a593Smuzhiyun 		be_cmd_get_fat_dump_len(adapter, &adapter->fat_dump_len);
4569*4882a593Smuzhiyun 
4570*4882a593Smuzhiyun 	if (BEx_chip(adapter)) {
4571*4882a593Smuzhiyun 		level = be_cmd_get_fw_log_level(adapter);
4572*4882a593Smuzhiyun 		adapter->msg_enable =
4573*4882a593Smuzhiyun 			level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4574*4882a593Smuzhiyun 	}
4575*4882a593Smuzhiyun 
4576*4882a593Smuzhiyun 	be_cmd_get_acpi_wol_cap(adapter);
4577*4882a593Smuzhiyun 	pci_enable_wake(adapter->pdev, PCI_D3hot, adapter->wol_en);
4578*4882a593Smuzhiyun 	pci_enable_wake(adapter->pdev, PCI_D3cold, adapter->wol_en);
4579*4882a593Smuzhiyun 
4580*4882a593Smuzhiyun 	be_cmd_query_port_name(adapter);
4581*4882a593Smuzhiyun 
4582*4882a593Smuzhiyun 	if (be_physfn(adapter)) {
4583*4882a593Smuzhiyun 		status = be_cmd_get_active_profile(adapter, &profile_id);
4584*4882a593Smuzhiyun 		if (!status)
4585*4882a593Smuzhiyun 			dev_info(&adapter->pdev->dev,
4586*4882a593Smuzhiyun 				 "Using profile 0x%x\n", profile_id);
4587*4882a593Smuzhiyun 	}
4588*4882a593Smuzhiyun 
4589*4882a593Smuzhiyun 	return 0;
4590*4882a593Smuzhiyun }
4591*4882a593Smuzhiyun 
be_mac_setup(struct be_adapter * adapter)4592*4882a593Smuzhiyun static int be_mac_setup(struct be_adapter *adapter)
4593*4882a593Smuzhiyun {
4594*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
4595*4882a593Smuzhiyun 	int status;
4596*4882a593Smuzhiyun 
4597*4882a593Smuzhiyun 	if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
4598*4882a593Smuzhiyun 		status = be_cmd_get_perm_mac(adapter, mac);
4599*4882a593Smuzhiyun 		if (status)
4600*4882a593Smuzhiyun 			return status;
4601*4882a593Smuzhiyun 
4602*4882a593Smuzhiyun 		memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4603*4882a593Smuzhiyun 		memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4604*4882a593Smuzhiyun 
4605*4882a593Smuzhiyun 		/* Initial MAC for BE3 VFs is already programmed by PF */
4606*4882a593Smuzhiyun 		if (BEx_chip(adapter) && be_virtfn(adapter))
4607*4882a593Smuzhiyun 			memcpy(adapter->dev_mac, mac, ETH_ALEN);
4608*4882a593Smuzhiyun 	}
4609*4882a593Smuzhiyun 
4610*4882a593Smuzhiyun 	return 0;
4611*4882a593Smuzhiyun }
4612*4882a593Smuzhiyun 
be_schedule_worker(struct be_adapter * adapter)4613*4882a593Smuzhiyun static void be_schedule_worker(struct be_adapter *adapter)
4614*4882a593Smuzhiyun {
4615*4882a593Smuzhiyun 	queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
4616*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
4617*4882a593Smuzhiyun }
4618*4882a593Smuzhiyun 
be_destroy_err_recovery_workq(void)4619*4882a593Smuzhiyun static void be_destroy_err_recovery_workq(void)
4620*4882a593Smuzhiyun {
4621*4882a593Smuzhiyun 	if (!be_err_recovery_workq)
4622*4882a593Smuzhiyun 		return;
4623*4882a593Smuzhiyun 
4624*4882a593Smuzhiyun 	flush_workqueue(be_err_recovery_workq);
4625*4882a593Smuzhiyun 	destroy_workqueue(be_err_recovery_workq);
4626*4882a593Smuzhiyun 	be_err_recovery_workq = NULL;
4627*4882a593Smuzhiyun }
4628*4882a593Smuzhiyun 
be_schedule_err_detection(struct be_adapter * adapter,u32 delay)4629*4882a593Smuzhiyun static void be_schedule_err_detection(struct be_adapter *adapter, u32 delay)
4630*4882a593Smuzhiyun {
4631*4882a593Smuzhiyun 	struct be_error_recovery *err_rec = &adapter->error_recovery;
4632*4882a593Smuzhiyun 
4633*4882a593Smuzhiyun 	if (!be_err_recovery_workq)
4634*4882a593Smuzhiyun 		return;
4635*4882a593Smuzhiyun 
4636*4882a593Smuzhiyun 	queue_delayed_work(be_err_recovery_workq, &err_rec->err_detection_work,
4637*4882a593Smuzhiyun 			   msecs_to_jiffies(delay));
4638*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_ERR_DETECTION_SCHEDULED;
4639*4882a593Smuzhiyun }
4640*4882a593Smuzhiyun 
be_setup_queues(struct be_adapter * adapter)4641*4882a593Smuzhiyun static int be_setup_queues(struct be_adapter *adapter)
4642*4882a593Smuzhiyun {
4643*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4644*4882a593Smuzhiyun 	int status;
4645*4882a593Smuzhiyun 
4646*4882a593Smuzhiyun 	status = be_evt_queues_create(adapter);
4647*4882a593Smuzhiyun 	if (status)
4648*4882a593Smuzhiyun 		goto err;
4649*4882a593Smuzhiyun 
4650*4882a593Smuzhiyun 	status = be_tx_qs_create(adapter);
4651*4882a593Smuzhiyun 	if (status)
4652*4882a593Smuzhiyun 		goto err;
4653*4882a593Smuzhiyun 
4654*4882a593Smuzhiyun 	status = be_rx_cqs_create(adapter);
4655*4882a593Smuzhiyun 	if (status)
4656*4882a593Smuzhiyun 		goto err;
4657*4882a593Smuzhiyun 
4658*4882a593Smuzhiyun 	status = be_mcc_queues_create(adapter);
4659*4882a593Smuzhiyun 	if (status)
4660*4882a593Smuzhiyun 		goto err;
4661*4882a593Smuzhiyun 
4662*4882a593Smuzhiyun 	status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
4663*4882a593Smuzhiyun 	if (status)
4664*4882a593Smuzhiyun 		goto err;
4665*4882a593Smuzhiyun 
4666*4882a593Smuzhiyun 	status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
4667*4882a593Smuzhiyun 	if (status)
4668*4882a593Smuzhiyun 		goto err;
4669*4882a593Smuzhiyun 
4670*4882a593Smuzhiyun 	return 0;
4671*4882a593Smuzhiyun err:
4672*4882a593Smuzhiyun 	dev_err(&adapter->pdev->dev, "queue_setup failed\n");
4673*4882a593Smuzhiyun 	return status;
4674*4882a593Smuzhiyun }
4675*4882a593Smuzhiyun 
be_if_create(struct be_adapter * adapter)4676*4882a593Smuzhiyun static int be_if_create(struct be_adapter *adapter)
4677*4882a593Smuzhiyun {
4678*4882a593Smuzhiyun 	u32 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4679*4882a593Smuzhiyun 	u32 cap_flags = be_if_cap_flags(adapter);
4680*4882a593Smuzhiyun 	int status;
4681*4882a593Smuzhiyun 
4682*4882a593Smuzhiyun 	/* alloc required memory for other filtering fields */
4683*4882a593Smuzhiyun 	adapter->pmac_id = kcalloc(be_max_uc(adapter),
4684*4882a593Smuzhiyun 				   sizeof(*adapter->pmac_id), GFP_KERNEL);
4685*4882a593Smuzhiyun 	if (!adapter->pmac_id)
4686*4882a593Smuzhiyun 		return -ENOMEM;
4687*4882a593Smuzhiyun 
4688*4882a593Smuzhiyun 	adapter->mc_list = kcalloc(be_max_mc(adapter),
4689*4882a593Smuzhiyun 				   sizeof(*adapter->mc_list), GFP_KERNEL);
4690*4882a593Smuzhiyun 	if (!adapter->mc_list)
4691*4882a593Smuzhiyun 		return -ENOMEM;
4692*4882a593Smuzhiyun 
4693*4882a593Smuzhiyun 	adapter->uc_list = kcalloc(be_max_uc(adapter),
4694*4882a593Smuzhiyun 				   sizeof(*adapter->uc_list), GFP_KERNEL);
4695*4882a593Smuzhiyun 	if (!adapter->uc_list)
4696*4882a593Smuzhiyun 		return -ENOMEM;
4697*4882a593Smuzhiyun 
4698*4882a593Smuzhiyun 	if (adapter->cfg_num_rx_irqs == 1)
4699*4882a593Smuzhiyun 		cap_flags &= ~(BE_IF_FLAGS_DEFQ_RSS | BE_IF_FLAGS_RSS);
4700*4882a593Smuzhiyun 
4701*4882a593Smuzhiyun 	en_flags &= cap_flags;
4702*4882a593Smuzhiyun 	/* will enable all the needed filter flags in be_open() */
4703*4882a593Smuzhiyun 	status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4704*4882a593Smuzhiyun 				  &adapter->if_handle, 0);
4705*4882a593Smuzhiyun 
4706*4882a593Smuzhiyun 	if (status)
4707*4882a593Smuzhiyun 		return status;
4708*4882a593Smuzhiyun 
4709*4882a593Smuzhiyun 	return 0;
4710*4882a593Smuzhiyun }
4711*4882a593Smuzhiyun 
be_update_queues(struct be_adapter * adapter)4712*4882a593Smuzhiyun int be_update_queues(struct be_adapter *adapter)
4713*4882a593Smuzhiyun {
4714*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
4715*4882a593Smuzhiyun 	int status;
4716*4882a593Smuzhiyun 
4717*4882a593Smuzhiyun 	if (netif_running(netdev)) {
4718*4882a593Smuzhiyun 		/* be_tx_timeout() must not run concurrently with this
4719*4882a593Smuzhiyun 		 * function, synchronize with an already-running dev_watchdog
4720*4882a593Smuzhiyun 		 */
4721*4882a593Smuzhiyun 		netif_tx_lock_bh(netdev);
4722*4882a593Smuzhiyun 		/* device cannot transmit now, avoid dev_watchdog timeouts */
4723*4882a593Smuzhiyun 		netif_carrier_off(netdev);
4724*4882a593Smuzhiyun 		netif_tx_unlock_bh(netdev);
4725*4882a593Smuzhiyun 
4726*4882a593Smuzhiyun 		be_close(netdev);
4727*4882a593Smuzhiyun 	}
4728*4882a593Smuzhiyun 
4729*4882a593Smuzhiyun 	be_cancel_worker(adapter);
4730*4882a593Smuzhiyun 
4731*4882a593Smuzhiyun 	/* If any vectors have been shared with RoCE we cannot re-program
4732*4882a593Smuzhiyun 	 * the MSIx table.
4733*4882a593Smuzhiyun 	 */
4734*4882a593Smuzhiyun 	if (!adapter->num_msix_roce_vec)
4735*4882a593Smuzhiyun 		be_msix_disable(adapter);
4736*4882a593Smuzhiyun 
4737*4882a593Smuzhiyun 	be_clear_queues(adapter);
4738*4882a593Smuzhiyun 	status = be_cmd_if_destroy(adapter, adapter->if_handle,  0);
4739*4882a593Smuzhiyun 	if (status)
4740*4882a593Smuzhiyun 		return status;
4741*4882a593Smuzhiyun 
4742*4882a593Smuzhiyun 	if (!msix_enabled(adapter)) {
4743*4882a593Smuzhiyun 		status = be_msix_enable(adapter);
4744*4882a593Smuzhiyun 		if (status)
4745*4882a593Smuzhiyun 			return status;
4746*4882a593Smuzhiyun 	}
4747*4882a593Smuzhiyun 
4748*4882a593Smuzhiyun 	status = be_if_create(adapter);
4749*4882a593Smuzhiyun 	if (status)
4750*4882a593Smuzhiyun 		return status;
4751*4882a593Smuzhiyun 
4752*4882a593Smuzhiyun 	status = be_setup_queues(adapter);
4753*4882a593Smuzhiyun 	if (status)
4754*4882a593Smuzhiyun 		return status;
4755*4882a593Smuzhiyun 
4756*4882a593Smuzhiyun 	be_schedule_worker(adapter);
4757*4882a593Smuzhiyun 
4758*4882a593Smuzhiyun 	/* The IF was destroyed and re-created. We need to clear
4759*4882a593Smuzhiyun 	 * all promiscuous flags valid for the destroyed IF.
4760*4882a593Smuzhiyun 	 * Without this promisc mode is not restored during
4761*4882a593Smuzhiyun 	 * be_open() because the driver thinks that it is
4762*4882a593Smuzhiyun 	 * already enabled in HW.
4763*4882a593Smuzhiyun 	 */
4764*4882a593Smuzhiyun 	adapter->if_flags &= ~BE_IF_FLAGS_ALL_PROMISCUOUS;
4765*4882a593Smuzhiyun 
4766*4882a593Smuzhiyun 	if (netif_running(netdev))
4767*4882a593Smuzhiyun 		status = be_open(netdev);
4768*4882a593Smuzhiyun 
4769*4882a593Smuzhiyun 	return status;
4770*4882a593Smuzhiyun }
4771*4882a593Smuzhiyun 
fw_major_num(const char * fw_ver)4772*4882a593Smuzhiyun static inline int fw_major_num(const char *fw_ver)
4773*4882a593Smuzhiyun {
4774*4882a593Smuzhiyun 	int fw_major = 0, i;
4775*4882a593Smuzhiyun 
4776*4882a593Smuzhiyun 	i = sscanf(fw_ver, "%d.", &fw_major);
4777*4882a593Smuzhiyun 	if (i != 1)
4778*4882a593Smuzhiyun 		return 0;
4779*4882a593Smuzhiyun 
4780*4882a593Smuzhiyun 	return fw_major;
4781*4882a593Smuzhiyun }
4782*4882a593Smuzhiyun 
4783*4882a593Smuzhiyun /* If it is error recovery, FLR the PF
4784*4882a593Smuzhiyun  * Else if any VFs are already enabled don't FLR the PF
4785*4882a593Smuzhiyun  */
be_reset_required(struct be_adapter * adapter)4786*4882a593Smuzhiyun static bool be_reset_required(struct be_adapter *adapter)
4787*4882a593Smuzhiyun {
4788*4882a593Smuzhiyun 	if (be_error_recovering(adapter))
4789*4882a593Smuzhiyun 		return true;
4790*4882a593Smuzhiyun 	else
4791*4882a593Smuzhiyun 		return pci_num_vf(adapter->pdev) == 0;
4792*4882a593Smuzhiyun }
4793*4882a593Smuzhiyun 
4794*4882a593Smuzhiyun /* Wait for the FW to be ready and perform the required initialization */
be_func_init(struct be_adapter * adapter)4795*4882a593Smuzhiyun static int be_func_init(struct be_adapter *adapter)
4796*4882a593Smuzhiyun {
4797*4882a593Smuzhiyun 	int status;
4798*4882a593Smuzhiyun 
4799*4882a593Smuzhiyun 	status = be_fw_wait_ready(adapter);
4800*4882a593Smuzhiyun 	if (status)
4801*4882a593Smuzhiyun 		return status;
4802*4882a593Smuzhiyun 
4803*4882a593Smuzhiyun 	/* FW is now ready; clear errors to allow cmds/doorbell */
4804*4882a593Smuzhiyun 	be_clear_error(adapter, BE_CLEAR_ALL);
4805*4882a593Smuzhiyun 
4806*4882a593Smuzhiyun 	if (be_reset_required(adapter)) {
4807*4882a593Smuzhiyun 		status = be_cmd_reset_function(adapter);
4808*4882a593Smuzhiyun 		if (status)
4809*4882a593Smuzhiyun 			return status;
4810*4882a593Smuzhiyun 
4811*4882a593Smuzhiyun 		/* Wait for interrupts to quiesce after an FLR */
4812*4882a593Smuzhiyun 		msleep(100);
4813*4882a593Smuzhiyun 	}
4814*4882a593Smuzhiyun 
4815*4882a593Smuzhiyun 	/* Tell FW we're ready to fire cmds */
4816*4882a593Smuzhiyun 	status = be_cmd_fw_init(adapter);
4817*4882a593Smuzhiyun 	if (status)
4818*4882a593Smuzhiyun 		return status;
4819*4882a593Smuzhiyun 
4820*4882a593Smuzhiyun 	/* Allow interrupts for other ULPs running on NIC function */
4821*4882a593Smuzhiyun 	be_intr_set(adapter, true);
4822*4882a593Smuzhiyun 
4823*4882a593Smuzhiyun 	return 0;
4824*4882a593Smuzhiyun }
4825*4882a593Smuzhiyun 
be_setup(struct be_adapter * adapter)4826*4882a593Smuzhiyun static int be_setup(struct be_adapter *adapter)
4827*4882a593Smuzhiyun {
4828*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
4829*4882a593Smuzhiyun 	int status;
4830*4882a593Smuzhiyun 
4831*4882a593Smuzhiyun 	status = be_func_init(adapter);
4832*4882a593Smuzhiyun 	if (status)
4833*4882a593Smuzhiyun 		return status;
4834*4882a593Smuzhiyun 
4835*4882a593Smuzhiyun 	be_setup_init(adapter);
4836*4882a593Smuzhiyun 
4837*4882a593Smuzhiyun 	if (!lancer_chip(adapter))
4838*4882a593Smuzhiyun 		be_cmd_req_native_mode(adapter);
4839*4882a593Smuzhiyun 
4840*4882a593Smuzhiyun 	/* invoke this cmd first to get pf_num and vf_num which are needed
4841*4882a593Smuzhiyun 	 * for issuing profile related cmds
4842*4882a593Smuzhiyun 	 */
4843*4882a593Smuzhiyun 	if (!BEx_chip(adapter)) {
4844*4882a593Smuzhiyun 		status = be_cmd_get_func_config(adapter, NULL);
4845*4882a593Smuzhiyun 		if (status)
4846*4882a593Smuzhiyun 			return status;
4847*4882a593Smuzhiyun 	}
4848*4882a593Smuzhiyun 
4849*4882a593Smuzhiyun 	status = be_get_config(adapter);
4850*4882a593Smuzhiyun 	if (status)
4851*4882a593Smuzhiyun 		goto err;
4852*4882a593Smuzhiyun 
4853*4882a593Smuzhiyun 	if (!BE2_chip(adapter) && be_physfn(adapter))
4854*4882a593Smuzhiyun 		be_alloc_sriov_res(adapter);
4855*4882a593Smuzhiyun 
4856*4882a593Smuzhiyun 	status = be_get_resources(adapter);
4857*4882a593Smuzhiyun 	if (status)
4858*4882a593Smuzhiyun 		goto err;
4859*4882a593Smuzhiyun 
4860*4882a593Smuzhiyun 	status = be_msix_enable(adapter);
4861*4882a593Smuzhiyun 	if (status)
4862*4882a593Smuzhiyun 		goto err;
4863*4882a593Smuzhiyun 
4864*4882a593Smuzhiyun 	/* will enable all the needed filter flags in be_open() */
4865*4882a593Smuzhiyun 	status = be_if_create(adapter);
4866*4882a593Smuzhiyun 	if (status)
4867*4882a593Smuzhiyun 		goto err;
4868*4882a593Smuzhiyun 
4869*4882a593Smuzhiyun 	/* Updating real_num_tx/rx_queues() requires rtnl_lock() */
4870*4882a593Smuzhiyun 	rtnl_lock();
4871*4882a593Smuzhiyun 	status = be_setup_queues(adapter);
4872*4882a593Smuzhiyun 	rtnl_unlock();
4873*4882a593Smuzhiyun 	if (status)
4874*4882a593Smuzhiyun 		goto err;
4875*4882a593Smuzhiyun 
4876*4882a593Smuzhiyun 	be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
4877*4882a593Smuzhiyun 
4878*4882a593Smuzhiyun 	status = be_mac_setup(adapter);
4879*4882a593Smuzhiyun 	if (status)
4880*4882a593Smuzhiyun 		goto err;
4881*4882a593Smuzhiyun 
4882*4882a593Smuzhiyun 	be_cmd_get_fw_ver(adapter);
4883*4882a593Smuzhiyun 	dev_info(dev, "FW version is %s\n", adapter->fw_ver);
4884*4882a593Smuzhiyun 
4885*4882a593Smuzhiyun 	if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
4886*4882a593Smuzhiyun 		dev_err(dev, "Firmware on card is old(%s), IRQs may not work",
4887*4882a593Smuzhiyun 			adapter->fw_ver);
4888*4882a593Smuzhiyun 		dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4889*4882a593Smuzhiyun 	}
4890*4882a593Smuzhiyun 
4891*4882a593Smuzhiyun 	status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4892*4882a593Smuzhiyun 					 adapter->rx_fc);
4893*4882a593Smuzhiyun 	if (status)
4894*4882a593Smuzhiyun 		be_cmd_get_flow_control(adapter, &adapter->tx_fc,
4895*4882a593Smuzhiyun 					&adapter->rx_fc);
4896*4882a593Smuzhiyun 
4897*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "HW Flow control - TX:%d RX:%d\n",
4898*4882a593Smuzhiyun 		 adapter->tx_fc, adapter->rx_fc);
4899*4882a593Smuzhiyun 
4900*4882a593Smuzhiyun 	if (be_physfn(adapter))
4901*4882a593Smuzhiyun 		be_cmd_set_logical_link_config(adapter,
4902*4882a593Smuzhiyun 					       IFLA_VF_LINK_STATE_AUTO, 0);
4903*4882a593Smuzhiyun 
4904*4882a593Smuzhiyun 	/* BE3 EVB echoes broadcast/multicast packets back to PF's vport
4905*4882a593Smuzhiyun 	 * confusing a linux bridge or OVS that it might be connected to.
4906*4882a593Smuzhiyun 	 * Set the EVB to PASSTHRU mode which effectively disables the EVB
4907*4882a593Smuzhiyun 	 * when SRIOV is not enabled.
4908*4882a593Smuzhiyun 	 */
4909*4882a593Smuzhiyun 	if (BE3_chip(adapter))
4910*4882a593Smuzhiyun 		be_cmd_set_hsw_config(adapter, 0, 0, adapter->if_handle,
4911*4882a593Smuzhiyun 				      PORT_FWD_TYPE_PASSTHRU, 0);
4912*4882a593Smuzhiyun 
4913*4882a593Smuzhiyun 	if (adapter->num_vfs)
4914*4882a593Smuzhiyun 		be_vf_setup(adapter);
4915*4882a593Smuzhiyun 
4916*4882a593Smuzhiyun 	status = be_cmd_get_phy_info(adapter);
4917*4882a593Smuzhiyun 	if (!status && be_pause_supported(adapter))
4918*4882a593Smuzhiyun 		adapter->phy.fc_autoneg = 1;
4919*4882a593Smuzhiyun 
4920*4882a593Smuzhiyun 	if (be_physfn(adapter) && !lancer_chip(adapter))
4921*4882a593Smuzhiyun 		be_cmd_set_features(adapter);
4922*4882a593Smuzhiyun 
4923*4882a593Smuzhiyun 	be_schedule_worker(adapter);
4924*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_SETUP_DONE;
4925*4882a593Smuzhiyun 	return 0;
4926*4882a593Smuzhiyun err:
4927*4882a593Smuzhiyun 	be_clear(adapter);
4928*4882a593Smuzhiyun 	return status;
4929*4882a593Smuzhiyun }
4930*4882a593Smuzhiyun 
4931*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
be_netpoll(struct net_device * netdev)4932*4882a593Smuzhiyun static void be_netpoll(struct net_device *netdev)
4933*4882a593Smuzhiyun {
4934*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
4935*4882a593Smuzhiyun 	struct be_eq_obj *eqo;
4936*4882a593Smuzhiyun 	int i;
4937*4882a593Smuzhiyun 
4938*4882a593Smuzhiyun 	for_all_evt_queues(adapter, eqo, i) {
4939*4882a593Smuzhiyun 		be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0, 0);
4940*4882a593Smuzhiyun 		napi_schedule(&eqo->napi);
4941*4882a593Smuzhiyun 	}
4942*4882a593Smuzhiyun }
4943*4882a593Smuzhiyun #endif
4944*4882a593Smuzhiyun 
be_load_fw(struct be_adapter * adapter,u8 * fw_file)4945*4882a593Smuzhiyun int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4946*4882a593Smuzhiyun {
4947*4882a593Smuzhiyun 	const struct firmware *fw;
4948*4882a593Smuzhiyun 	int status;
4949*4882a593Smuzhiyun 
4950*4882a593Smuzhiyun 	if (!netif_running(adapter->netdev)) {
4951*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
4952*4882a593Smuzhiyun 			"Firmware load not allowed (interface is down)\n");
4953*4882a593Smuzhiyun 		return -ENETDOWN;
4954*4882a593Smuzhiyun 	}
4955*4882a593Smuzhiyun 
4956*4882a593Smuzhiyun 	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4957*4882a593Smuzhiyun 	if (status)
4958*4882a593Smuzhiyun 		goto fw_exit;
4959*4882a593Smuzhiyun 
4960*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4961*4882a593Smuzhiyun 
4962*4882a593Smuzhiyun 	if (lancer_chip(adapter))
4963*4882a593Smuzhiyun 		status = lancer_fw_download(adapter, fw);
4964*4882a593Smuzhiyun 	else
4965*4882a593Smuzhiyun 		status = be_fw_download(adapter, fw);
4966*4882a593Smuzhiyun 
4967*4882a593Smuzhiyun 	if (!status)
4968*4882a593Smuzhiyun 		be_cmd_get_fw_ver(adapter);
4969*4882a593Smuzhiyun 
4970*4882a593Smuzhiyun fw_exit:
4971*4882a593Smuzhiyun 	release_firmware(fw);
4972*4882a593Smuzhiyun 	return status;
4973*4882a593Smuzhiyun }
4974*4882a593Smuzhiyun 
be_ndo_bridge_setlink(struct net_device * dev,struct nlmsghdr * nlh,u16 flags,struct netlink_ext_ack * extack)4975*4882a593Smuzhiyun static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
4976*4882a593Smuzhiyun 				 u16 flags, struct netlink_ext_ack *extack)
4977*4882a593Smuzhiyun {
4978*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(dev);
4979*4882a593Smuzhiyun 	struct nlattr *attr, *br_spec;
4980*4882a593Smuzhiyun 	int rem;
4981*4882a593Smuzhiyun 	int status = 0;
4982*4882a593Smuzhiyun 	u16 mode = 0;
4983*4882a593Smuzhiyun 
4984*4882a593Smuzhiyun 	if (!sriov_enabled(adapter))
4985*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4986*4882a593Smuzhiyun 
4987*4882a593Smuzhiyun 	br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4988*4882a593Smuzhiyun 	if (!br_spec)
4989*4882a593Smuzhiyun 		return -EINVAL;
4990*4882a593Smuzhiyun 
4991*4882a593Smuzhiyun 	nla_for_each_nested(attr, br_spec, rem) {
4992*4882a593Smuzhiyun 		if (nla_type(attr) != IFLA_BRIDGE_MODE)
4993*4882a593Smuzhiyun 			continue;
4994*4882a593Smuzhiyun 
4995*4882a593Smuzhiyun 		if (nla_len(attr) < sizeof(mode))
4996*4882a593Smuzhiyun 			return -EINVAL;
4997*4882a593Smuzhiyun 
4998*4882a593Smuzhiyun 		mode = nla_get_u16(attr);
4999*4882a593Smuzhiyun 		if (BE3_chip(adapter) && mode == BRIDGE_MODE_VEPA)
5000*4882a593Smuzhiyun 			return -EOPNOTSUPP;
5001*4882a593Smuzhiyun 
5002*4882a593Smuzhiyun 		if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
5003*4882a593Smuzhiyun 			return -EINVAL;
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 		status = be_cmd_set_hsw_config(adapter, 0, 0,
5006*4882a593Smuzhiyun 					       adapter->if_handle,
5007*4882a593Smuzhiyun 					       mode == BRIDGE_MODE_VEPA ?
5008*4882a593Smuzhiyun 					       PORT_FWD_TYPE_VEPA :
5009*4882a593Smuzhiyun 					       PORT_FWD_TYPE_VEB, 0);
5010*4882a593Smuzhiyun 		if (status)
5011*4882a593Smuzhiyun 			goto err;
5012*4882a593Smuzhiyun 
5013*4882a593Smuzhiyun 		dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
5014*4882a593Smuzhiyun 			 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5015*4882a593Smuzhiyun 
5016*4882a593Smuzhiyun 		return status;
5017*4882a593Smuzhiyun 	}
5018*4882a593Smuzhiyun err:
5019*4882a593Smuzhiyun 	dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
5020*4882a593Smuzhiyun 		mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
5021*4882a593Smuzhiyun 
5022*4882a593Smuzhiyun 	return status;
5023*4882a593Smuzhiyun }
5024*4882a593Smuzhiyun 
be_ndo_bridge_getlink(struct sk_buff * skb,u32 pid,u32 seq,struct net_device * dev,u32 filter_mask,int nlflags)5025*4882a593Smuzhiyun static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
5026*4882a593Smuzhiyun 				 struct net_device *dev, u32 filter_mask,
5027*4882a593Smuzhiyun 				 int nlflags)
5028*4882a593Smuzhiyun {
5029*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(dev);
5030*4882a593Smuzhiyun 	int status = 0;
5031*4882a593Smuzhiyun 	u8 hsw_mode;
5032*4882a593Smuzhiyun 
5033*4882a593Smuzhiyun 	/* BE and Lancer chips support VEB mode only */
5034*4882a593Smuzhiyun 	if (BEx_chip(adapter) || lancer_chip(adapter)) {
5035*4882a593Smuzhiyun 		/* VEB is disabled in non-SR-IOV profiles on BE3/Lancer */
5036*4882a593Smuzhiyun 		if (!pci_sriov_get_totalvfs(adapter->pdev))
5037*4882a593Smuzhiyun 			return 0;
5038*4882a593Smuzhiyun 		hsw_mode = PORT_FWD_TYPE_VEB;
5039*4882a593Smuzhiyun 	} else {
5040*4882a593Smuzhiyun 		status = be_cmd_get_hsw_config(adapter, NULL, 0,
5041*4882a593Smuzhiyun 					       adapter->if_handle, &hsw_mode,
5042*4882a593Smuzhiyun 					       NULL);
5043*4882a593Smuzhiyun 		if (status)
5044*4882a593Smuzhiyun 			return 0;
5045*4882a593Smuzhiyun 
5046*4882a593Smuzhiyun 		if (hsw_mode == PORT_FWD_TYPE_PASSTHRU)
5047*4882a593Smuzhiyun 			return 0;
5048*4882a593Smuzhiyun 	}
5049*4882a593Smuzhiyun 
5050*4882a593Smuzhiyun 	return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
5051*4882a593Smuzhiyun 				       hsw_mode == PORT_FWD_TYPE_VEPA ?
5052*4882a593Smuzhiyun 				       BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
5053*4882a593Smuzhiyun 				       0, 0, nlflags, filter_mask, NULL);
5054*4882a593Smuzhiyun }
5055*4882a593Smuzhiyun 
be_alloc_work(struct be_adapter * adapter,void (* func)(struct work_struct *))5056*4882a593Smuzhiyun static struct be_cmd_work *be_alloc_work(struct be_adapter *adapter,
5057*4882a593Smuzhiyun 					 void (*func)(struct work_struct *))
5058*4882a593Smuzhiyun {
5059*4882a593Smuzhiyun 	struct be_cmd_work *work;
5060*4882a593Smuzhiyun 
5061*4882a593Smuzhiyun 	work = kzalloc(sizeof(*work), GFP_ATOMIC);
5062*4882a593Smuzhiyun 	if (!work) {
5063*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
5064*4882a593Smuzhiyun 			"be_work memory allocation failed\n");
5065*4882a593Smuzhiyun 		return NULL;
5066*4882a593Smuzhiyun 	}
5067*4882a593Smuzhiyun 
5068*4882a593Smuzhiyun 	INIT_WORK(&work->work, func);
5069*4882a593Smuzhiyun 	work->adapter = adapter;
5070*4882a593Smuzhiyun 	return work;
5071*4882a593Smuzhiyun }
5072*4882a593Smuzhiyun 
be_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)5073*4882a593Smuzhiyun static netdev_features_t be_features_check(struct sk_buff *skb,
5074*4882a593Smuzhiyun 					   struct net_device *dev,
5075*4882a593Smuzhiyun 					   netdev_features_t features)
5076*4882a593Smuzhiyun {
5077*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(dev);
5078*4882a593Smuzhiyun 	u8 l4_hdr = 0;
5079*4882a593Smuzhiyun 
5080*4882a593Smuzhiyun 	if (skb_is_gso(skb)) {
5081*4882a593Smuzhiyun 		/* IPv6 TSO requests with extension hdrs are a problem
5082*4882a593Smuzhiyun 		 * to Lancer and BE3 HW. Disable TSO6 feature.
5083*4882a593Smuzhiyun 		 */
5084*4882a593Smuzhiyun 		if (!skyhawk_chip(adapter) && is_ipv6_ext_hdr(skb))
5085*4882a593Smuzhiyun 			features &= ~NETIF_F_TSO6;
5086*4882a593Smuzhiyun 
5087*4882a593Smuzhiyun 		/* Lancer cannot handle the packet with MSS less than 256.
5088*4882a593Smuzhiyun 		 * Also it can't handle a TSO packet with a single segment
5089*4882a593Smuzhiyun 		 * Disable the GSO support in such cases
5090*4882a593Smuzhiyun 		 */
5091*4882a593Smuzhiyun 		if (lancer_chip(adapter) &&
5092*4882a593Smuzhiyun 		    (skb_shinfo(skb)->gso_size < 256 ||
5093*4882a593Smuzhiyun 		     skb_shinfo(skb)->gso_segs == 1))
5094*4882a593Smuzhiyun 			features &= ~NETIF_F_GSO_MASK;
5095*4882a593Smuzhiyun 	}
5096*4882a593Smuzhiyun 
5097*4882a593Smuzhiyun 	/* The code below restricts offload features for some tunneled and
5098*4882a593Smuzhiyun 	 * Q-in-Q packets.
5099*4882a593Smuzhiyun 	 * Offload features for normal (non tunnel) packets are unchanged.
5100*4882a593Smuzhiyun 	 */
5101*4882a593Smuzhiyun 	features = vlan_features_check(skb, features);
5102*4882a593Smuzhiyun 	if (!skb->encapsulation ||
5103*4882a593Smuzhiyun 	    !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
5104*4882a593Smuzhiyun 		return features;
5105*4882a593Smuzhiyun 
5106*4882a593Smuzhiyun 	/* It's an encapsulated packet and VxLAN offloads are enabled. We
5107*4882a593Smuzhiyun 	 * should disable tunnel offload features if it's not a VxLAN packet,
5108*4882a593Smuzhiyun 	 * as tunnel offloads have been enabled only for VxLAN. This is done to
5109*4882a593Smuzhiyun 	 * allow other tunneled traffic like GRE work fine while VxLAN
5110*4882a593Smuzhiyun 	 * offloads are configured in Skyhawk-R.
5111*4882a593Smuzhiyun 	 */
5112*4882a593Smuzhiyun 	switch (vlan_get_protocol(skb)) {
5113*4882a593Smuzhiyun 	case htons(ETH_P_IP):
5114*4882a593Smuzhiyun 		l4_hdr = ip_hdr(skb)->protocol;
5115*4882a593Smuzhiyun 		break;
5116*4882a593Smuzhiyun 	case htons(ETH_P_IPV6):
5117*4882a593Smuzhiyun 		l4_hdr = ipv6_hdr(skb)->nexthdr;
5118*4882a593Smuzhiyun 		break;
5119*4882a593Smuzhiyun 	default:
5120*4882a593Smuzhiyun 		return features;
5121*4882a593Smuzhiyun 	}
5122*4882a593Smuzhiyun 
5123*4882a593Smuzhiyun 	if (l4_hdr != IPPROTO_UDP ||
5124*4882a593Smuzhiyun 	    skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
5125*4882a593Smuzhiyun 	    skb->inner_protocol != htons(ETH_P_TEB) ||
5126*4882a593Smuzhiyun 	    skb_inner_mac_header(skb) - skb_transport_header(skb) !=
5127*4882a593Smuzhiyun 		sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
5128*4882a593Smuzhiyun 	    !adapter->vxlan_port ||
5129*4882a593Smuzhiyun 	    udp_hdr(skb)->dest != adapter->vxlan_port)
5130*4882a593Smuzhiyun 		return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
5131*4882a593Smuzhiyun 
5132*4882a593Smuzhiyun 	return features;
5133*4882a593Smuzhiyun }
5134*4882a593Smuzhiyun 
be_get_phys_port_id(struct net_device * dev,struct netdev_phys_item_id * ppid)5135*4882a593Smuzhiyun static int be_get_phys_port_id(struct net_device *dev,
5136*4882a593Smuzhiyun 			       struct netdev_phys_item_id *ppid)
5137*4882a593Smuzhiyun {
5138*4882a593Smuzhiyun 	int i, id_len = CNTL_SERIAL_NUM_WORDS * CNTL_SERIAL_NUM_WORD_SZ + 1;
5139*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(dev);
5140*4882a593Smuzhiyun 	u8 *id;
5141*4882a593Smuzhiyun 
5142*4882a593Smuzhiyun 	if (MAX_PHYS_ITEM_ID_LEN < id_len)
5143*4882a593Smuzhiyun 		return -ENOSPC;
5144*4882a593Smuzhiyun 
5145*4882a593Smuzhiyun 	ppid->id[0] = adapter->hba_port_num + 1;
5146*4882a593Smuzhiyun 	id = &ppid->id[1];
5147*4882a593Smuzhiyun 	for (i = CNTL_SERIAL_NUM_WORDS - 1; i >= 0;
5148*4882a593Smuzhiyun 	     i--, id += CNTL_SERIAL_NUM_WORD_SZ)
5149*4882a593Smuzhiyun 		memcpy(id, &adapter->serial_num[i], CNTL_SERIAL_NUM_WORD_SZ);
5150*4882a593Smuzhiyun 
5151*4882a593Smuzhiyun 	ppid->id_len = id_len;
5152*4882a593Smuzhiyun 
5153*4882a593Smuzhiyun 	return 0;
5154*4882a593Smuzhiyun }
5155*4882a593Smuzhiyun 
be_set_rx_mode(struct net_device * dev)5156*4882a593Smuzhiyun static void be_set_rx_mode(struct net_device *dev)
5157*4882a593Smuzhiyun {
5158*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(dev);
5159*4882a593Smuzhiyun 	struct be_cmd_work *work;
5160*4882a593Smuzhiyun 
5161*4882a593Smuzhiyun 	work = be_alloc_work(adapter, be_work_set_rx_mode);
5162*4882a593Smuzhiyun 	if (work)
5163*4882a593Smuzhiyun 		queue_work(be_wq, &work->work);
5164*4882a593Smuzhiyun }
5165*4882a593Smuzhiyun 
5166*4882a593Smuzhiyun static const struct net_device_ops be_netdev_ops = {
5167*4882a593Smuzhiyun 	.ndo_open		= be_open,
5168*4882a593Smuzhiyun 	.ndo_stop		= be_close,
5169*4882a593Smuzhiyun 	.ndo_start_xmit		= be_xmit,
5170*4882a593Smuzhiyun 	.ndo_set_rx_mode	= be_set_rx_mode,
5171*4882a593Smuzhiyun 	.ndo_set_mac_address	= be_mac_addr_set,
5172*4882a593Smuzhiyun 	.ndo_get_stats64	= be_get_stats64,
5173*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
5174*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
5175*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
5176*4882a593Smuzhiyun 	.ndo_set_vf_mac		= be_set_vf_mac,
5177*4882a593Smuzhiyun 	.ndo_set_vf_vlan	= be_set_vf_vlan,
5178*4882a593Smuzhiyun 	.ndo_set_vf_rate	= be_set_vf_tx_rate,
5179*4882a593Smuzhiyun 	.ndo_get_vf_config	= be_get_vf_config,
5180*4882a593Smuzhiyun 	.ndo_set_vf_link_state  = be_set_vf_link_state,
5181*4882a593Smuzhiyun 	.ndo_set_vf_spoofchk    = be_set_vf_spoofchk,
5182*4882a593Smuzhiyun 	.ndo_tx_timeout		= be_tx_timeout,
5183*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
5184*4882a593Smuzhiyun 	.ndo_poll_controller	= be_netpoll,
5185*4882a593Smuzhiyun #endif
5186*4882a593Smuzhiyun 	.ndo_bridge_setlink	= be_ndo_bridge_setlink,
5187*4882a593Smuzhiyun 	.ndo_bridge_getlink	= be_ndo_bridge_getlink,
5188*4882a593Smuzhiyun 	.ndo_udp_tunnel_add	= udp_tunnel_nic_add_port,
5189*4882a593Smuzhiyun 	.ndo_udp_tunnel_del	= udp_tunnel_nic_del_port,
5190*4882a593Smuzhiyun 	.ndo_features_check	= be_features_check,
5191*4882a593Smuzhiyun 	.ndo_get_phys_port_id   = be_get_phys_port_id,
5192*4882a593Smuzhiyun };
5193*4882a593Smuzhiyun 
be_netdev_init(struct net_device * netdev)5194*4882a593Smuzhiyun static void be_netdev_init(struct net_device *netdev)
5195*4882a593Smuzhiyun {
5196*4882a593Smuzhiyun 	struct be_adapter *adapter = netdev_priv(netdev);
5197*4882a593Smuzhiyun 
5198*4882a593Smuzhiyun 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5199*4882a593Smuzhiyun 		NETIF_F_GSO_UDP_TUNNEL |
5200*4882a593Smuzhiyun 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
5201*4882a593Smuzhiyun 		NETIF_F_HW_VLAN_CTAG_TX;
5202*4882a593Smuzhiyun 	if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
5203*4882a593Smuzhiyun 		netdev->hw_features |= NETIF_F_RXHASH;
5204*4882a593Smuzhiyun 
5205*4882a593Smuzhiyun 	netdev->features |= netdev->hw_features |
5206*4882a593Smuzhiyun 		NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
5207*4882a593Smuzhiyun 
5208*4882a593Smuzhiyun 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
5209*4882a593Smuzhiyun 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
5210*4882a593Smuzhiyun 
5211*4882a593Smuzhiyun 	netdev->priv_flags |= IFF_UNICAST_FLT;
5212*4882a593Smuzhiyun 
5213*4882a593Smuzhiyun 	netdev->flags |= IFF_MULTICAST;
5214*4882a593Smuzhiyun 
5215*4882a593Smuzhiyun 	netif_set_gso_max_size(netdev, BE_MAX_GSO_SIZE - ETH_HLEN);
5216*4882a593Smuzhiyun 
5217*4882a593Smuzhiyun 	netdev->netdev_ops = &be_netdev_ops;
5218*4882a593Smuzhiyun 
5219*4882a593Smuzhiyun 	netdev->ethtool_ops = &be_ethtool_ops;
5220*4882a593Smuzhiyun 
5221*4882a593Smuzhiyun 	if (!lancer_chip(adapter) && !BEx_chip(adapter) && !be_is_mc(adapter))
5222*4882a593Smuzhiyun 		netdev->udp_tunnel_nic_info = &be_udp_tunnels;
5223*4882a593Smuzhiyun 
5224*4882a593Smuzhiyun 	/* MTU range: 256 - 9000 */
5225*4882a593Smuzhiyun 	netdev->min_mtu = BE_MIN_MTU;
5226*4882a593Smuzhiyun 	netdev->max_mtu = BE_MAX_MTU;
5227*4882a593Smuzhiyun }
5228*4882a593Smuzhiyun 
be_cleanup(struct be_adapter * adapter)5229*4882a593Smuzhiyun static void be_cleanup(struct be_adapter *adapter)
5230*4882a593Smuzhiyun {
5231*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
5232*4882a593Smuzhiyun 
5233*4882a593Smuzhiyun 	rtnl_lock();
5234*4882a593Smuzhiyun 	netif_device_detach(netdev);
5235*4882a593Smuzhiyun 	if (netif_running(netdev))
5236*4882a593Smuzhiyun 		be_close(netdev);
5237*4882a593Smuzhiyun 	rtnl_unlock();
5238*4882a593Smuzhiyun 
5239*4882a593Smuzhiyun 	be_clear(adapter);
5240*4882a593Smuzhiyun }
5241*4882a593Smuzhiyun 
be_resume(struct be_adapter * adapter)5242*4882a593Smuzhiyun static int be_resume(struct be_adapter *adapter)
5243*4882a593Smuzhiyun {
5244*4882a593Smuzhiyun 	struct net_device *netdev = adapter->netdev;
5245*4882a593Smuzhiyun 	int status;
5246*4882a593Smuzhiyun 
5247*4882a593Smuzhiyun 	status = be_setup(adapter);
5248*4882a593Smuzhiyun 	if (status)
5249*4882a593Smuzhiyun 		return status;
5250*4882a593Smuzhiyun 
5251*4882a593Smuzhiyun 	rtnl_lock();
5252*4882a593Smuzhiyun 	if (netif_running(netdev))
5253*4882a593Smuzhiyun 		status = be_open(netdev);
5254*4882a593Smuzhiyun 	rtnl_unlock();
5255*4882a593Smuzhiyun 
5256*4882a593Smuzhiyun 	if (status)
5257*4882a593Smuzhiyun 		return status;
5258*4882a593Smuzhiyun 
5259*4882a593Smuzhiyun 	netif_device_attach(netdev);
5260*4882a593Smuzhiyun 
5261*4882a593Smuzhiyun 	return 0;
5262*4882a593Smuzhiyun }
5263*4882a593Smuzhiyun 
be_soft_reset(struct be_adapter * adapter)5264*4882a593Smuzhiyun static void be_soft_reset(struct be_adapter *adapter)
5265*4882a593Smuzhiyun {
5266*4882a593Smuzhiyun 	u32 val;
5267*4882a593Smuzhiyun 
5268*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "Initiating chip soft reset\n");
5269*4882a593Smuzhiyun 	val = ioread32(adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5270*4882a593Smuzhiyun 	val |= SLIPORT_SOFTRESET_SR_MASK;
5271*4882a593Smuzhiyun 	iowrite32(val, adapter->pcicfg + SLIPORT_SOFTRESET_OFFSET);
5272*4882a593Smuzhiyun }
5273*4882a593Smuzhiyun 
be_err_is_recoverable(struct be_adapter * adapter)5274*4882a593Smuzhiyun static bool be_err_is_recoverable(struct be_adapter *adapter)
5275*4882a593Smuzhiyun {
5276*4882a593Smuzhiyun 	struct be_error_recovery *err_rec = &adapter->error_recovery;
5277*4882a593Smuzhiyun 	unsigned long initial_idle_time =
5278*4882a593Smuzhiyun 		msecs_to_jiffies(ERR_RECOVERY_IDLE_TIME);
5279*4882a593Smuzhiyun 	unsigned long recovery_interval =
5280*4882a593Smuzhiyun 		msecs_to_jiffies(ERR_RECOVERY_INTERVAL);
5281*4882a593Smuzhiyun 	u16 ue_err_code;
5282*4882a593Smuzhiyun 	u32 val;
5283*4882a593Smuzhiyun 
5284*4882a593Smuzhiyun 	val = be_POST_stage_get(adapter);
5285*4882a593Smuzhiyun 	if ((val & POST_STAGE_RECOVERABLE_ERR) != POST_STAGE_RECOVERABLE_ERR)
5286*4882a593Smuzhiyun 		return false;
5287*4882a593Smuzhiyun 	ue_err_code = val & POST_ERR_RECOVERY_CODE_MASK;
5288*4882a593Smuzhiyun 	if (ue_err_code == 0)
5289*4882a593Smuzhiyun 		return false;
5290*4882a593Smuzhiyun 
5291*4882a593Smuzhiyun 	dev_err(&adapter->pdev->dev, "Recoverable HW error code: 0x%x\n",
5292*4882a593Smuzhiyun 		ue_err_code);
5293*4882a593Smuzhiyun 
5294*4882a593Smuzhiyun 	if (time_before_eq(jiffies - err_rec->probe_time, initial_idle_time)) {
5295*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
5296*4882a593Smuzhiyun 			"Cannot recover within %lu sec from driver load\n",
5297*4882a593Smuzhiyun 			jiffies_to_msecs(initial_idle_time) / MSEC_PER_SEC);
5298*4882a593Smuzhiyun 		return false;
5299*4882a593Smuzhiyun 	}
5300*4882a593Smuzhiyun 
5301*4882a593Smuzhiyun 	if (err_rec->last_recovery_time && time_before_eq(
5302*4882a593Smuzhiyun 		jiffies - err_rec->last_recovery_time, recovery_interval)) {
5303*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
5304*4882a593Smuzhiyun 			"Cannot recover within %lu sec from last recovery\n",
5305*4882a593Smuzhiyun 			jiffies_to_msecs(recovery_interval) / MSEC_PER_SEC);
5306*4882a593Smuzhiyun 		return false;
5307*4882a593Smuzhiyun 	}
5308*4882a593Smuzhiyun 
5309*4882a593Smuzhiyun 	if (ue_err_code == err_rec->last_err_code) {
5310*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
5311*4882a593Smuzhiyun 			"Cannot recover from a consecutive TPE error\n");
5312*4882a593Smuzhiyun 		return false;
5313*4882a593Smuzhiyun 	}
5314*4882a593Smuzhiyun 
5315*4882a593Smuzhiyun 	err_rec->last_recovery_time = jiffies;
5316*4882a593Smuzhiyun 	err_rec->last_err_code = ue_err_code;
5317*4882a593Smuzhiyun 	return true;
5318*4882a593Smuzhiyun }
5319*4882a593Smuzhiyun 
be_tpe_recover(struct be_adapter * adapter)5320*4882a593Smuzhiyun static int be_tpe_recover(struct be_adapter *adapter)
5321*4882a593Smuzhiyun {
5322*4882a593Smuzhiyun 	struct be_error_recovery *err_rec = &adapter->error_recovery;
5323*4882a593Smuzhiyun 	int status = -EAGAIN;
5324*4882a593Smuzhiyun 	u32 val;
5325*4882a593Smuzhiyun 
5326*4882a593Smuzhiyun 	switch (err_rec->recovery_state) {
5327*4882a593Smuzhiyun 	case ERR_RECOVERY_ST_NONE:
5328*4882a593Smuzhiyun 		err_rec->recovery_state = ERR_RECOVERY_ST_DETECT;
5329*4882a593Smuzhiyun 		err_rec->resched_delay = ERR_RECOVERY_UE_DETECT_DURATION;
5330*4882a593Smuzhiyun 		break;
5331*4882a593Smuzhiyun 
5332*4882a593Smuzhiyun 	case ERR_RECOVERY_ST_DETECT:
5333*4882a593Smuzhiyun 		val = be_POST_stage_get(adapter);
5334*4882a593Smuzhiyun 		if ((val & POST_STAGE_RECOVERABLE_ERR) !=
5335*4882a593Smuzhiyun 		    POST_STAGE_RECOVERABLE_ERR) {
5336*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
5337*4882a593Smuzhiyun 				"Unrecoverable HW error detected: 0x%x\n", val);
5338*4882a593Smuzhiyun 			status = -EINVAL;
5339*4882a593Smuzhiyun 			err_rec->resched_delay = 0;
5340*4882a593Smuzhiyun 			break;
5341*4882a593Smuzhiyun 		}
5342*4882a593Smuzhiyun 
5343*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Recoverable HW error detected\n");
5344*4882a593Smuzhiyun 
5345*4882a593Smuzhiyun 		/* Only PF0 initiates Chip Soft Reset. But PF0 must wait UE2SR
5346*4882a593Smuzhiyun 		 * milliseconds before it checks for final error status in
5347*4882a593Smuzhiyun 		 * SLIPORT_SEMAPHORE to determine if recovery criteria is met.
5348*4882a593Smuzhiyun 		 * If it does, then PF0 initiates a Soft Reset.
5349*4882a593Smuzhiyun 		 */
5350*4882a593Smuzhiyun 		if (adapter->pf_num == 0) {
5351*4882a593Smuzhiyun 			err_rec->recovery_state = ERR_RECOVERY_ST_RESET;
5352*4882a593Smuzhiyun 			err_rec->resched_delay = err_rec->ue_to_reset_time -
5353*4882a593Smuzhiyun 					ERR_RECOVERY_UE_DETECT_DURATION;
5354*4882a593Smuzhiyun 			break;
5355*4882a593Smuzhiyun 		}
5356*4882a593Smuzhiyun 
5357*4882a593Smuzhiyun 		err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5358*4882a593Smuzhiyun 		err_rec->resched_delay = err_rec->ue_to_poll_time -
5359*4882a593Smuzhiyun 					ERR_RECOVERY_UE_DETECT_DURATION;
5360*4882a593Smuzhiyun 		break;
5361*4882a593Smuzhiyun 
5362*4882a593Smuzhiyun 	case ERR_RECOVERY_ST_RESET:
5363*4882a593Smuzhiyun 		if (!be_err_is_recoverable(adapter)) {
5364*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
5365*4882a593Smuzhiyun 				"Failed to meet recovery criteria\n");
5366*4882a593Smuzhiyun 			status = -EIO;
5367*4882a593Smuzhiyun 			err_rec->resched_delay = 0;
5368*4882a593Smuzhiyun 			break;
5369*4882a593Smuzhiyun 		}
5370*4882a593Smuzhiyun 		be_soft_reset(adapter);
5371*4882a593Smuzhiyun 		err_rec->recovery_state = ERR_RECOVERY_ST_PRE_POLL;
5372*4882a593Smuzhiyun 		err_rec->resched_delay = err_rec->ue_to_poll_time -
5373*4882a593Smuzhiyun 					err_rec->ue_to_reset_time;
5374*4882a593Smuzhiyun 		break;
5375*4882a593Smuzhiyun 
5376*4882a593Smuzhiyun 	case ERR_RECOVERY_ST_PRE_POLL:
5377*4882a593Smuzhiyun 		err_rec->recovery_state = ERR_RECOVERY_ST_REINIT;
5378*4882a593Smuzhiyun 		err_rec->resched_delay = 0;
5379*4882a593Smuzhiyun 		status = 0;			/* done */
5380*4882a593Smuzhiyun 		break;
5381*4882a593Smuzhiyun 
5382*4882a593Smuzhiyun 	default:
5383*4882a593Smuzhiyun 		status = -EINVAL;
5384*4882a593Smuzhiyun 		err_rec->resched_delay = 0;
5385*4882a593Smuzhiyun 		break;
5386*4882a593Smuzhiyun 	}
5387*4882a593Smuzhiyun 
5388*4882a593Smuzhiyun 	return status;
5389*4882a593Smuzhiyun }
5390*4882a593Smuzhiyun 
be_err_recover(struct be_adapter * adapter)5391*4882a593Smuzhiyun static int be_err_recover(struct be_adapter *adapter)
5392*4882a593Smuzhiyun {
5393*4882a593Smuzhiyun 	int status;
5394*4882a593Smuzhiyun 
5395*4882a593Smuzhiyun 	if (!lancer_chip(adapter)) {
5396*4882a593Smuzhiyun 		if (!adapter->error_recovery.recovery_supported ||
5397*4882a593Smuzhiyun 		    adapter->priv_flags & BE_DISABLE_TPE_RECOVERY)
5398*4882a593Smuzhiyun 			return -EIO;
5399*4882a593Smuzhiyun 		status = be_tpe_recover(adapter);
5400*4882a593Smuzhiyun 		if (status)
5401*4882a593Smuzhiyun 			goto err;
5402*4882a593Smuzhiyun 	}
5403*4882a593Smuzhiyun 
5404*4882a593Smuzhiyun 	/* Wait for adapter to reach quiescent state before
5405*4882a593Smuzhiyun 	 * destroying queues
5406*4882a593Smuzhiyun 	 */
5407*4882a593Smuzhiyun 	status = be_fw_wait_ready(adapter);
5408*4882a593Smuzhiyun 	if (status)
5409*4882a593Smuzhiyun 		goto err;
5410*4882a593Smuzhiyun 
5411*4882a593Smuzhiyun 	adapter->flags |= BE_FLAGS_TRY_RECOVERY;
5412*4882a593Smuzhiyun 
5413*4882a593Smuzhiyun 	be_cleanup(adapter);
5414*4882a593Smuzhiyun 
5415*4882a593Smuzhiyun 	status = be_resume(adapter);
5416*4882a593Smuzhiyun 	if (status)
5417*4882a593Smuzhiyun 		goto err;
5418*4882a593Smuzhiyun 
5419*4882a593Smuzhiyun 	adapter->flags &= ~BE_FLAGS_TRY_RECOVERY;
5420*4882a593Smuzhiyun 
5421*4882a593Smuzhiyun err:
5422*4882a593Smuzhiyun 	return status;
5423*4882a593Smuzhiyun }
5424*4882a593Smuzhiyun 
be_err_detection_task(struct work_struct * work)5425*4882a593Smuzhiyun static void be_err_detection_task(struct work_struct *work)
5426*4882a593Smuzhiyun {
5427*4882a593Smuzhiyun 	struct be_error_recovery *err_rec =
5428*4882a593Smuzhiyun 			container_of(work, struct be_error_recovery,
5429*4882a593Smuzhiyun 				     err_detection_work.work);
5430*4882a593Smuzhiyun 	struct be_adapter *adapter =
5431*4882a593Smuzhiyun 			container_of(err_rec, struct be_adapter,
5432*4882a593Smuzhiyun 				     error_recovery);
5433*4882a593Smuzhiyun 	u32 resched_delay = ERR_RECOVERY_DETECTION_DELAY;
5434*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
5435*4882a593Smuzhiyun 	int recovery_status;
5436*4882a593Smuzhiyun 
5437*4882a593Smuzhiyun 	be_detect_error(adapter);
5438*4882a593Smuzhiyun 	if (!be_check_error(adapter, BE_ERROR_HW))
5439*4882a593Smuzhiyun 		goto reschedule_task;
5440*4882a593Smuzhiyun 
5441*4882a593Smuzhiyun 	recovery_status = be_err_recover(adapter);
5442*4882a593Smuzhiyun 	if (!recovery_status) {
5443*4882a593Smuzhiyun 		err_rec->recovery_retries = 0;
5444*4882a593Smuzhiyun 		err_rec->recovery_state = ERR_RECOVERY_ST_NONE;
5445*4882a593Smuzhiyun 		dev_info(dev, "Adapter recovery successful\n");
5446*4882a593Smuzhiyun 		goto reschedule_task;
5447*4882a593Smuzhiyun 	} else if (!lancer_chip(adapter) && err_rec->resched_delay) {
5448*4882a593Smuzhiyun 		/* BEx/SH recovery state machine */
5449*4882a593Smuzhiyun 		if (adapter->pf_num == 0 &&
5450*4882a593Smuzhiyun 		    err_rec->recovery_state > ERR_RECOVERY_ST_DETECT)
5451*4882a593Smuzhiyun 			dev_err(&adapter->pdev->dev,
5452*4882a593Smuzhiyun 				"Adapter recovery in progress\n");
5453*4882a593Smuzhiyun 		resched_delay = err_rec->resched_delay;
5454*4882a593Smuzhiyun 		goto reschedule_task;
5455*4882a593Smuzhiyun 	} else if (lancer_chip(adapter) && be_virtfn(adapter)) {
5456*4882a593Smuzhiyun 		/* For VFs, check if PF have allocated resources
5457*4882a593Smuzhiyun 		 * every second.
5458*4882a593Smuzhiyun 		 */
5459*4882a593Smuzhiyun 		dev_err(dev, "Re-trying adapter recovery\n");
5460*4882a593Smuzhiyun 		goto reschedule_task;
5461*4882a593Smuzhiyun 	} else if (lancer_chip(adapter) && err_rec->recovery_retries++ <
5462*4882a593Smuzhiyun 		   ERR_RECOVERY_MAX_RETRY_COUNT) {
5463*4882a593Smuzhiyun 		/* In case of another error during recovery, it takes 30 sec
5464*4882a593Smuzhiyun 		 * for adapter to come out of error. Retry error recovery after
5465*4882a593Smuzhiyun 		 * this time interval.
5466*4882a593Smuzhiyun 		 */
5467*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev, "Re-trying adapter recovery\n");
5468*4882a593Smuzhiyun 		resched_delay = ERR_RECOVERY_RETRY_DELAY;
5469*4882a593Smuzhiyun 		goto reschedule_task;
5470*4882a593Smuzhiyun 	} else {
5471*4882a593Smuzhiyun 		dev_err(dev, "Adapter recovery failed\n");
5472*4882a593Smuzhiyun 		dev_err(dev, "Please reboot server to recover\n");
5473*4882a593Smuzhiyun 	}
5474*4882a593Smuzhiyun 
5475*4882a593Smuzhiyun 	return;
5476*4882a593Smuzhiyun 
5477*4882a593Smuzhiyun reschedule_task:
5478*4882a593Smuzhiyun 	be_schedule_err_detection(adapter, resched_delay);
5479*4882a593Smuzhiyun }
5480*4882a593Smuzhiyun 
be_log_sfp_info(struct be_adapter * adapter)5481*4882a593Smuzhiyun static void be_log_sfp_info(struct be_adapter *adapter)
5482*4882a593Smuzhiyun {
5483*4882a593Smuzhiyun 	int status;
5484*4882a593Smuzhiyun 
5485*4882a593Smuzhiyun 	status = be_cmd_query_sfp_info(adapter);
5486*4882a593Smuzhiyun 	if (!status) {
5487*4882a593Smuzhiyun 		dev_err(&adapter->pdev->dev,
5488*4882a593Smuzhiyun 			"Port %c: %s Vendor: %s part no: %s",
5489*4882a593Smuzhiyun 			adapter->port_name,
5490*4882a593Smuzhiyun 			be_misconfig_evt_port_state[adapter->phy_state],
5491*4882a593Smuzhiyun 			adapter->phy.vendor_name,
5492*4882a593Smuzhiyun 			adapter->phy.vendor_pn);
5493*4882a593Smuzhiyun 	}
5494*4882a593Smuzhiyun 	adapter->flags &= ~BE_FLAGS_PHY_MISCONFIGURED;
5495*4882a593Smuzhiyun }
5496*4882a593Smuzhiyun 
be_worker(struct work_struct * work)5497*4882a593Smuzhiyun static void be_worker(struct work_struct *work)
5498*4882a593Smuzhiyun {
5499*4882a593Smuzhiyun 	struct be_adapter *adapter =
5500*4882a593Smuzhiyun 		container_of(work, struct be_adapter, work.work);
5501*4882a593Smuzhiyun 	struct be_rx_obj *rxo;
5502*4882a593Smuzhiyun 	int i;
5503*4882a593Smuzhiyun 
5504*4882a593Smuzhiyun 	if (be_physfn(adapter) &&
5505*4882a593Smuzhiyun 	    MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
5506*4882a593Smuzhiyun 		be_cmd_get_die_temperature(adapter);
5507*4882a593Smuzhiyun 
5508*4882a593Smuzhiyun 	/* when interrupts are not yet enabled, just reap any pending
5509*4882a593Smuzhiyun 	 * mcc completions
5510*4882a593Smuzhiyun 	 */
5511*4882a593Smuzhiyun 	if (!netif_running(adapter->netdev)) {
5512*4882a593Smuzhiyun 		local_bh_disable();
5513*4882a593Smuzhiyun 		be_process_mcc(adapter);
5514*4882a593Smuzhiyun 		local_bh_enable();
5515*4882a593Smuzhiyun 		goto reschedule;
5516*4882a593Smuzhiyun 	}
5517*4882a593Smuzhiyun 
5518*4882a593Smuzhiyun 	if (!adapter->stats_cmd_sent) {
5519*4882a593Smuzhiyun 		if (lancer_chip(adapter))
5520*4882a593Smuzhiyun 			lancer_cmd_get_pport_stats(adapter,
5521*4882a593Smuzhiyun 						   &adapter->stats_cmd);
5522*4882a593Smuzhiyun 		else
5523*4882a593Smuzhiyun 			be_cmd_get_stats(adapter, &adapter->stats_cmd);
5524*4882a593Smuzhiyun 	}
5525*4882a593Smuzhiyun 
5526*4882a593Smuzhiyun 	for_all_rx_queues(adapter, rxo, i) {
5527*4882a593Smuzhiyun 		/* Replenish RX-queues starved due to memory
5528*4882a593Smuzhiyun 		 * allocation failures.
5529*4882a593Smuzhiyun 		 */
5530*4882a593Smuzhiyun 		if (rxo->rx_post_starved)
5531*4882a593Smuzhiyun 			be_post_rx_frags(rxo, GFP_KERNEL, MAX_RX_POST);
5532*4882a593Smuzhiyun 	}
5533*4882a593Smuzhiyun 
5534*4882a593Smuzhiyun 	/* EQ-delay update for Skyhawk is done while notifying EQ */
5535*4882a593Smuzhiyun 	if (!skyhawk_chip(adapter))
5536*4882a593Smuzhiyun 		be_eqd_update(adapter, false);
5537*4882a593Smuzhiyun 
5538*4882a593Smuzhiyun 	if (adapter->flags & BE_FLAGS_PHY_MISCONFIGURED)
5539*4882a593Smuzhiyun 		be_log_sfp_info(adapter);
5540*4882a593Smuzhiyun 
5541*4882a593Smuzhiyun reschedule:
5542*4882a593Smuzhiyun 	adapter->work_counter++;
5543*4882a593Smuzhiyun 	queue_delayed_work(be_wq, &adapter->work, msecs_to_jiffies(1000));
5544*4882a593Smuzhiyun }
5545*4882a593Smuzhiyun 
be_unmap_pci_bars(struct be_adapter * adapter)5546*4882a593Smuzhiyun static void be_unmap_pci_bars(struct be_adapter *adapter)
5547*4882a593Smuzhiyun {
5548*4882a593Smuzhiyun 	if (adapter->csr)
5549*4882a593Smuzhiyun 		pci_iounmap(adapter->pdev, adapter->csr);
5550*4882a593Smuzhiyun 	if (adapter->db)
5551*4882a593Smuzhiyun 		pci_iounmap(adapter->pdev, adapter->db);
5552*4882a593Smuzhiyun 	if (adapter->pcicfg && adapter->pcicfg_mapped)
5553*4882a593Smuzhiyun 		pci_iounmap(adapter->pdev, adapter->pcicfg);
5554*4882a593Smuzhiyun }
5555*4882a593Smuzhiyun 
db_bar(struct be_adapter * adapter)5556*4882a593Smuzhiyun static int db_bar(struct be_adapter *adapter)
5557*4882a593Smuzhiyun {
5558*4882a593Smuzhiyun 	if (lancer_chip(adapter) || be_virtfn(adapter))
5559*4882a593Smuzhiyun 		return 0;
5560*4882a593Smuzhiyun 	else
5561*4882a593Smuzhiyun 		return 4;
5562*4882a593Smuzhiyun }
5563*4882a593Smuzhiyun 
be_roce_map_pci_bars(struct be_adapter * adapter)5564*4882a593Smuzhiyun static int be_roce_map_pci_bars(struct be_adapter *adapter)
5565*4882a593Smuzhiyun {
5566*4882a593Smuzhiyun 	if (skyhawk_chip(adapter)) {
5567*4882a593Smuzhiyun 		adapter->roce_db.size = 4096;
5568*4882a593Smuzhiyun 		adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
5569*4882a593Smuzhiyun 							      db_bar(adapter));
5570*4882a593Smuzhiyun 		adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
5571*4882a593Smuzhiyun 							       db_bar(adapter));
5572*4882a593Smuzhiyun 	}
5573*4882a593Smuzhiyun 	return 0;
5574*4882a593Smuzhiyun }
5575*4882a593Smuzhiyun 
be_map_pci_bars(struct be_adapter * adapter)5576*4882a593Smuzhiyun static int be_map_pci_bars(struct be_adapter *adapter)
5577*4882a593Smuzhiyun {
5578*4882a593Smuzhiyun 	struct pci_dev *pdev = adapter->pdev;
5579*4882a593Smuzhiyun 	u8 __iomem *addr;
5580*4882a593Smuzhiyun 	u32 sli_intf;
5581*4882a593Smuzhiyun 
5582*4882a593Smuzhiyun 	pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
5583*4882a593Smuzhiyun 	adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
5584*4882a593Smuzhiyun 				SLI_INTF_FAMILY_SHIFT;
5585*4882a593Smuzhiyun 	adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
5586*4882a593Smuzhiyun 
5587*4882a593Smuzhiyun 	if (BEx_chip(adapter) && be_physfn(adapter)) {
5588*4882a593Smuzhiyun 		adapter->csr = pci_iomap(pdev, 2, 0);
5589*4882a593Smuzhiyun 		if (!adapter->csr)
5590*4882a593Smuzhiyun 			return -ENOMEM;
5591*4882a593Smuzhiyun 	}
5592*4882a593Smuzhiyun 
5593*4882a593Smuzhiyun 	addr = pci_iomap(pdev, db_bar(adapter), 0);
5594*4882a593Smuzhiyun 	if (!addr)
5595*4882a593Smuzhiyun 		goto pci_map_err;
5596*4882a593Smuzhiyun 	adapter->db = addr;
5597*4882a593Smuzhiyun 
5598*4882a593Smuzhiyun 	if (skyhawk_chip(adapter) || BEx_chip(adapter)) {
5599*4882a593Smuzhiyun 		if (be_physfn(adapter)) {
5600*4882a593Smuzhiyun 			/* PCICFG is the 2nd BAR in BE2 */
5601*4882a593Smuzhiyun 			addr = pci_iomap(pdev, BE2_chip(adapter) ? 1 : 0, 0);
5602*4882a593Smuzhiyun 			if (!addr)
5603*4882a593Smuzhiyun 				goto pci_map_err;
5604*4882a593Smuzhiyun 			adapter->pcicfg = addr;
5605*4882a593Smuzhiyun 			adapter->pcicfg_mapped = true;
5606*4882a593Smuzhiyun 		} else {
5607*4882a593Smuzhiyun 			adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
5608*4882a593Smuzhiyun 			adapter->pcicfg_mapped = false;
5609*4882a593Smuzhiyun 		}
5610*4882a593Smuzhiyun 	}
5611*4882a593Smuzhiyun 
5612*4882a593Smuzhiyun 	be_roce_map_pci_bars(adapter);
5613*4882a593Smuzhiyun 	return 0;
5614*4882a593Smuzhiyun 
5615*4882a593Smuzhiyun pci_map_err:
5616*4882a593Smuzhiyun 	dev_err(&pdev->dev, "Error in mapping PCI BARs\n");
5617*4882a593Smuzhiyun 	be_unmap_pci_bars(adapter);
5618*4882a593Smuzhiyun 	return -ENOMEM;
5619*4882a593Smuzhiyun }
5620*4882a593Smuzhiyun 
be_drv_cleanup(struct be_adapter * adapter)5621*4882a593Smuzhiyun static void be_drv_cleanup(struct be_adapter *adapter)
5622*4882a593Smuzhiyun {
5623*4882a593Smuzhiyun 	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
5624*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
5625*4882a593Smuzhiyun 
5626*4882a593Smuzhiyun 	if (mem->va)
5627*4882a593Smuzhiyun 		dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5628*4882a593Smuzhiyun 
5629*4882a593Smuzhiyun 	mem = &adapter->rx_filter;
5630*4882a593Smuzhiyun 	if (mem->va)
5631*4882a593Smuzhiyun 		dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5632*4882a593Smuzhiyun 
5633*4882a593Smuzhiyun 	mem = &adapter->stats_cmd;
5634*4882a593Smuzhiyun 	if (mem->va)
5635*4882a593Smuzhiyun 		dma_free_coherent(dev, mem->size, mem->va, mem->dma);
5636*4882a593Smuzhiyun }
5637*4882a593Smuzhiyun 
5638*4882a593Smuzhiyun /* Allocate and initialize various fields in be_adapter struct */
be_drv_init(struct be_adapter * adapter)5639*4882a593Smuzhiyun static int be_drv_init(struct be_adapter *adapter)
5640*4882a593Smuzhiyun {
5641*4882a593Smuzhiyun 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
5642*4882a593Smuzhiyun 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
5643*4882a593Smuzhiyun 	struct be_dma_mem *rx_filter = &adapter->rx_filter;
5644*4882a593Smuzhiyun 	struct be_dma_mem *stats_cmd = &adapter->stats_cmd;
5645*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
5646*4882a593Smuzhiyun 	int status = 0;
5647*4882a593Smuzhiyun 
5648*4882a593Smuzhiyun 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
5649*4882a593Smuzhiyun 	mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
5650*4882a593Smuzhiyun 						&mbox_mem_alloc->dma,
5651*4882a593Smuzhiyun 						GFP_KERNEL);
5652*4882a593Smuzhiyun 	if (!mbox_mem_alloc->va)
5653*4882a593Smuzhiyun 		return -ENOMEM;
5654*4882a593Smuzhiyun 
5655*4882a593Smuzhiyun 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
5656*4882a593Smuzhiyun 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
5657*4882a593Smuzhiyun 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
5658*4882a593Smuzhiyun 
5659*4882a593Smuzhiyun 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
5660*4882a593Smuzhiyun 	rx_filter->va = dma_alloc_coherent(dev, rx_filter->size,
5661*4882a593Smuzhiyun 					   &rx_filter->dma, GFP_KERNEL);
5662*4882a593Smuzhiyun 	if (!rx_filter->va) {
5663*4882a593Smuzhiyun 		status = -ENOMEM;
5664*4882a593Smuzhiyun 		goto free_mbox;
5665*4882a593Smuzhiyun 	}
5666*4882a593Smuzhiyun 
5667*4882a593Smuzhiyun 	if (lancer_chip(adapter))
5668*4882a593Smuzhiyun 		stats_cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
5669*4882a593Smuzhiyun 	else if (BE2_chip(adapter))
5670*4882a593Smuzhiyun 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
5671*4882a593Smuzhiyun 	else if (BE3_chip(adapter))
5672*4882a593Smuzhiyun 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
5673*4882a593Smuzhiyun 	else
5674*4882a593Smuzhiyun 		stats_cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
5675*4882a593Smuzhiyun 	stats_cmd->va = dma_alloc_coherent(dev, stats_cmd->size,
5676*4882a593Smuzhiyun 					   &stats_cmd->dma, GFP_KERNEL);
5677*4882a593Smuzhiyun 	if (!stats_cmd->va) {
5678*4882a593Smuzhiyun 		status = -ENOMEM;
5679*4882a593Smuzhiyun 		goto free_rx_filter;
5680*4882a593Smuzhiyun 	}
5681*4882a593Smuzhiyun 
5682*4882a593Smuzhiyun 	mutex_init(&adapter->mbox_lock);
5683*4882a593Smuzhiyun 	mutex_init(&adapter->mcc_lock);
5684*4882a593Smuzhiyun 	mutex_init(&adapter->rx_filter_lock);
5685*4882a593Smuzhiyun 	spin_lock_init(&adapter->mcc_cq_lock);
5686*4882a593Smuzhiyun 	init_completion(&adapter->et_cmd_compl);
5687*4882a593Smuzhiyun 
5688*4882a593Smuzhiyun 	pci_save_state(adapter->pdev);
5689*4882a593Smuzhiyun 
5690*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&adapter->work, be_worker);
5691*4882a593Smuzhiyun 
5692*4882a593Smuzhiyun 	adapter->error_recovery.recovery_state = ERR_RECOVERY_ST_NONE;
5693*4882a593Smuzhiyun 	adapter->error_recovery.resched_delay = 0;
5694*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&adapter->error_recovery.err_detection_work,
5695*4882a593Smuzhiyun 			  be_err_detection_task);
5696*4882a593Smuzhiyun 
5697*4882a593Smuzhiyun 	adapter->rx_fc = true;
5698*4882a593Smuzhiyun 	adapter->tx_fc = true;
5699*4882a593Smuzhiyun 
5700*4882a593Smuzhiyun 	/* Must be a power of 2 or else MODULO will BUG_ON */
5701*4882a593Smuzhiyun 	adapter->be_get_temp_freq = 64;
5702*4882a593Smuzhiyun 
5703*4882a593Smuzhiyun 	return 0;
5704*4882a593Smuzhiyun 
5705*4882a593Smuzhiyun free_rx_filter:
5706*4882a593Smuzhiyun 	dma_free_coherent(dev, rx_filter->size, rx_filter->va, rx_filter->dma);
5707*4882a593Smuzhiyun free_mbox:
5708*4882a593Smuzhiyun 	dma_free_coherent(dev, mbox_mem_alloc->size, mbox_mem_alloc->va,
5709*4882a593Smuzhiyun 			  mbox_mem_alloc->dma);
5710*4882a593Smuzhiyun 	return status;
5711*4882a593Smuzhiyun }
5712*4882a593Smuzhiyun 
be_remove(struct pci_dev * pdev)5713*4882a593Smuzhiyun static void be_remove(struct pci_dev *pdev)
5714*4882a593Smuzhiyun {
5715*4882a593Smuzhiyun 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5716*4882a593Smuzhiyun 
5717*4882a593Smuzhiyun 	if (!adapter)
5718*4882a593Smuzhiyun 		return;
5719*4882a593Smuzhiyun 
5720*4882a593Smuzhiyun 	be_roce_dev_remove(adapter);
5721*4882a593Smuzhiyun 	be_intr_set(adapter, false);
5722*4882a593Smuzhiyun 
5723*4882a593Smuzhiyun 	be_cancel_err_detection(adapter);
5724*4882a593Smuzhiyun 
5725*4882a593Smuzhiyun 	unregister_netdev(adapter->netdev);
5726*4882a593Smuzhiyun 
5727*4882a593Smuzhiyun 	be_clear(adapter);
5728*4882a593Smuzhiyun 
5729*4882a593Smuzhiyun 	if (!pci_vfs_assigned(adapter->pdev))
5730*4882a593Smuzhiyun 		be_cmd_reset_function(adapter);
5731*4882a593Smuzhiyun 
5732*4882a593Smuzhiyun 	/* tell fw we're done with firing cmds */
5733*4882a593Smuzhiyun 	be_cmd_fw_clean(adapter);
5734*4882a593Smuzhiyun 
5735*4882a593Smuzhiyun 	be_unmap_pci_bars(adapter);
5736*4882a593Smuzhiyun 	be_drv_cleanup(adapter);
5737*4882a593Smuzhiyun 
5738*4882a593Smuzhiyun 	pci_disable_pcie_error_reporting(pdev);
5739*4882a593Smuzhiyun 
5740*4882a593Smuzhiyun 	pci_release_regions(pdev);
5741*4882a593Smuzhiyun 	pci_disable_device(pdev);
5742*4882a593Smuzhiyun 
5743*4882a593Smuzhiyun 	free_netdev(adapter->netdev);
5744*4882a593Smuzhiyun }
5745*4882a593Smuzhiyun 
be_hwmon_show_temp(struct device * dev,struct device_attribute * dev_attr,char * buf)5746*4882a593Smuzhiyun static ssize_t be_hwmon_show_temp(struct device *dev,
5747*4882a593Smuzhiyun 				  struct device_attribute *dev_attr,
5748*4882a593Smuzhiyun 				  char *buf)
5749*4882a593Smuzhiyun {
5750*4882a593Smuzhiyun 	struct be_adapter *adapter = dev_get_drvdata(dev);
5751*4882a593Smuzhiyun 
5752*4882a593Smuzhiyun 	/* Unit: millidegree Celsius */
5753*4882a593Smuzhiyun 	if (adapter->hwmon_info.be_on_die_temp == BE_INVALID_DIE_TEMP)
5754*4882a593Smuzhiyun 		return -EIO;
5755*4882a593Smuzhiyun 	else
5756*4882a593Smuzhiyun 		return sprintf(buf, "%u\n",
5757*4882a593Smuzhiyun 			       adapter->hwmon_info.be_on_die_temp * 1000);
5758*4882a593Smuzhiyun }
5759*4882a593Smuzhiyun 
5760*4882a593Smuzhiyun static SENSOR_DEVICE_ATTR(temp1_input, 0444,
5761*4882a593Smuzhiyun 			  be_hwmon_show_temp, NULL, 1);
5762*4882a593Smuzhiyun 
5763*4882a593Smuzhiyun static struct attribute *be_hwmon_attrs[] = {
5764*4882a593Smuzhiyun 	&sensor_dev_attr_temp1_input.dev_attr.attr,
5765*4882a593Smuzhiyun 	NULL
5766*4882a593Smuzhiyun };
5767*4882a593Smuzhiyun 
5768*4882a593Smuzhiyun ATTRIBUTE_GROUPS(be_hwmon);
5769*4882a593Smuzhiyun 
mc_name(struct be_adapter * adapter)5770*4882a593Smuzhiyun static char *mc_name(struct be_adapter *adapter)
5771*4882a593Smuzhiyun {
5772*4882a593Smuzhiyun 	char *str = "";	/* default */
5773*4882a593Smuzhiyun 
5774*4882a593Smuzhiyun 	switch (adapter->mc_type) {
5775*4882a593Smuzhiyun 	case UMC:
5776*4882a593Smuzhiyun 		str = "UMC";
5777*4882a593Smuzhiyun 		break;
5778*4882a593Smuzhiyun 	case FLEX10:
5779*4882a593Smuzhiyun 		str = "FLEX10";
5780*4882a593Smuzhiyun 		break;
5781*4882a593Smuzhiyun 	case vNIC1:
5782*4882a593Smuzhiyun 		str = "vNIC-1";
5783*4882a593Smuzhiyun 		break;
5784*4882a593Smuzhiyun 	case nPAR:
5785*4882a593Smuzhiyun 		str = "nPAR";
5786*4882a593Smuzhiyun 		break;
5787*4882a593Smuzhiyun 	case UFP:
5788*4882a593Smuzhiyun 		str = "UFP";
5789*4882a593Smuzhiyun 		break;
5790*4882a593Smuzhiyun 	case vNIC2:
5791*4882a593Smuzhiyun 		str = "vNIC-2";
5792*4882a593Smuzhiyun 		break;
5793*4882a593Smuzhiyun 	default:
5794*4882a593Smuzhiyun 		str = "";
5795*4882a593Smuzhiyun 	}
5796*4882a593Smuzhiyun 
5797*4882a593Smuzhiyun 	return str;
5798*4882a593Smuzhiyun }
5799*4882a593Smuzhiyun 
func_name(struct be_adapter * adapter)5800*4882a593Smuzhiyun static inline char *func_name(struct be_adapter *adapter)
5801*4882a593Smuzhiyun {
5802*4882a593Smuzhiyun 	return be_physfn(adapter) ? "PF" : "VF";
5803*4882a593Smuzhiyun }
5804*4882a593Smuzhiyun 
nic_name(struct pci_dev * pdev)5805*4882a593Smuzhiyun static inline char *nic_name(struct pci_dev *pdev)
5806*4882a593Smuzhiyun {
5807*4882a593Smuzhiyun 	switch (pdev->device) {
5808*4882a593Smuzhiyun 	case OC_DEVICE_ID1:
5809*4882a593Smuzhiyun 		return OC_NAME;
5810*4882a593Smuzhiyun 	case OC_DEVICE_ID2:
5811*4882a593Smuzhiyun 		return OC_NAME_BE;
5812*4882a593Smuzhiyun 	case OC_DEVICE_ID3:
5813*4882a593Smuzhiyun 	case OC_DEVICE_ID4:
5814*4882a593Smuzhiyun 		return OC_NAME_LANCER;
5815*4882a593Smuzhiyun 	case BE_DEVICE_ID2:
5816*4882a593Smuzhiyun 		return BE3_NAME;
5817*4882a593Smuzhiyun 	case OC_DEVICE_ID5:
5818*4882a593Smuzhiyun 	case OC_DEVICE_ID6:
5819*4882a593Smuzhiyun 		return OC_NAME_SH;
5820*4882a593Smuzhiyun 	default:
5821*4882a593Smuzhiyun 		return BE_NAME;
5822*4882a593Smuzhiyun 	}
5823*4882a593Smuzhiyun }
5824*4882a593Smuzhiyun 
be_probe(struct pci_dev * pdev,const struct pci_device_id * pdev_id)5825*4882a593Smuzhiyun static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
5826*4882a593Smuzhiyun {
5827*4882a593Smuzhiyun 	struct be_adapter *adapter;
5828*4882a593Smuzhiyun 	struct net_device *netdev;
5829*4882a593Smuzhiyun 	int status = 0;
5830*4882a593Smuzhiyun 
5831*4882a593Smuzhiyun 	status = pci_enable_device(pdev);
5832*4882a593Smuzhiyun 	if (status)
5833*4882a593Smuzhiyun 		goto do_none;
5834*4882a593Smuzhiyun 
5835*4882a593Smuzhiyun 	status = pci_request_regions(pdev, DRV_NAME);
5836*4882a593Smuzhiyun 	if (status)
5837*4882a593Smuzhiyun 		goto disable_dev;
5838*4882a593Smuzhiyun 	pci_set_master(pdev);
5839*4882a593Smuzhiyun 
5840*4882a593Smuzhiyun 	netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
5841*4882a593Smuzhiyun 	if (!netdev) {
5842*4882a593Smuzhiyun 		status = -ENOMEM;
5843*4882a593Smuzhiyun 		goto rel_reg;
5844*4882a593Smuzhiyun 	}
5845*4882a593Smuzhiyun 	adapter = netdev_priv(netdev);
5846*4882a593Smuzhiyun 	adapter->pdev = pdev;
5847*4882a593Smuzhiyun 	pci_set_drvdata(pdev, adapter);
5848*4882a593Smuzhiyun 	adapter->netdev = netdev;
5849*4882a593Smuzhiyun 	SET_NETDEV_DEV(netdev, &pdev->dev);
5850*4882a593Smuzhiyun 
5851*4882a593Smuzhiyun 	status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
5852*4882a593Smuzhiyun 	if (!status) {
5853*4882a593Smuzhiyun 		netdev->features |= NETIF_F_HIGHDMA;
5854*4882a593Smuzhiyun 	} else {
5855*4882a593Smuzhiyun 		status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
5856*4882a593Smuzhiyun 		if (status) {
5857*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
5858*4882a593Smuzhiyun 			goto free_netdev;
5859*4882a593Smuzhiyun 		}
5860*4882a593Smuzhiyun 	}
5861*4882a593Smuzhiyun 
5862*4882a593Smuzhiyun 	status = pci_enable_pcie_error_reporting(pdev);
5863*4882a593Smuzhiyun 	if (!status)
5864*4882a593Smuzhiyun 		dev_info(&pdev->dev, "PCIe error reporting enabled\n");
5865*4882a593Smuzhiyun 
5866*4882a593Smuzhiyun 	status = be_map_pci_bars(adapter);
5867*4882a593Smuzhiyun 	if (status)
5868*4882a593Smuzhiyun 		goto free_netdev;
5869*4882a593Smuzhiyun 
5870*4882a593Smuzhiyun 	status = be_drv_init(adapter);
5871*4882a593Smuzhiyun 	if (status)
5872*4882a593Smuzhiyun 		goto unmap_bars;
5873*4882a593Smuzhiyun 
5874*4882a593Smuzhiyun 	status = be_setup(adapter);
5875*4882a593Smuzhiyun 	if (status)
5876*4882a593Smuzhiyun 		goto drv_cleanup;
5877*4882a593Smuzhiyun 
5878*4882a593Smuzhiyun 	be_netdev_init(netdev);
5879*4882a593Smuzhiyun 	status = register_netdev(netdev);
5880*4882a593Smuzhiyun 	if (status != 0)
5881*4882a593Smuzhiyun 		goto unsetup;
5882*4882a593Smuzhiyun 
5883*4882a593Smuzhiyun 	be_roce_dev_add(adapter);
5884*4882a593Smuzhiyun 
5885*4882a593Smuzhiyun 	be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5886*4882a593Smuzhiyun 	adapter->error_recovery.probe_time = jiffies;
5887*4882a593Smuzhiyun 
5888*4882a593Smuzhiyun 	/* On Die temperature not supported for VF. */
5889*4882a593Smuzhiyun 	if (be_physfn(adapter) && IS_ENABLED(CONFIG_BE2NET_HWMON)) {
5890*4882a593Smuzhiyun 		adapter->hwmon_info.hwmon_dev =
5891*4882a593Smuzhiyun 			devm_hwmon_device_register_with_groups(&pdev->dev,
5892*4882a593Smuzhiyun 							       DRV_NAME,
5893*4882a593Smuzhiyun 							       adapter,
5894*4882a593Smuzhiyun 							       be_hwmon_groups);
5895*4882a593Smuzhiyun 		adapter->hwmon_info.be_on_die_temp = BE_INVALID_DIE_TEMP;
5896*4882a593Smuzhiyun 	}
5897*4882a593Smuzhiyun 
5898*4882a593Smuzhiyun 	dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
5899*4882a593Smuzhiyun 		 func_name(adapter), mc_name(adapter), adapter->port_name);
5900*4882a593Smuzhiyun 
5901*4882a593Smuzhiyun 	return 0;
5902*4882a593Smuzhiyun 
5903*4882a593Smuzhiyun unsetup:
5904*4882a593Smuzhiyun 	be_clear(adapter);
5905*4882a593Smuzhiyun drv_cleanup:
5906*4882a593Smuzhiyun 	be_drv_cleanup(adapter);
5907*4882a593Smuzhiyun unmap_bars:
5908*4882a593Smuzhiyun 	be_unmap_pci_bars(adapter);
5909*4882a593Smuzhiyun free_netdev:
5910*4882a593Smuzhiyun 	pci_disable_pcie_error_reporting(pdev);
5911*4882a593Smuzhiyun 	free_netdev(netdev);
5912*4882a593Smuzhiyun rel_reg:
5913*4882a593Smuzhiyun 	pci_release_regions(pdev);
5914*4882a593Smuzhiyun disable_dev:
5915*4882a593Smuzhiyun 	pci_disable_device(pdev);
5916*4882a593Smuzhiyun do_none:
5917*4882a593Smuzhiyun 	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
5918*4882a593Smuzhiyun 	return status;
5919*4882a593Smuzhiyun }
5920*4882a593Smuzhiyun 
be_suspend(struct device * dev_d)5921*4882a593Smuzhiyun static int __maybe_unused be_suspend(struct device *dev_d)
5922*4882a593Smuzhiyun {
5923*4882a593Smuzhiyun 	struct be_adapter *adapter = dev_get_drvdata(dev_d);
5924*4882a593Smuzhiyun 
5925*4882a593Smuzhiyun 	be_intr_set(adapter, false);
5926*4882a593Smuzhiyun 	be_cancel_err_detection(adapter);
5927*4882a593Smuzhiyun 
5928*4882a593Smuzhiyun 	be_cleanup(adapter);
5929*4882a593Smuzhiyun 
5930*4882a593Smuzhiyun 	return 0;
5931*4882a593Smuzhiyun }
5932*4882a593Smuzhiyun 
be_pci_resume(struct device * dev_d)5933*4882a593Smuzhiyun static int __maybe_unused be_pci_resume(struct device *dev_d)
5934*4882a593Smuzhiyun {
5935*4882a593Smuzhiyun 	struct be_adapter *adapter = dev_get_drvdata(dev_d);
5936*4882a593Smuzhiyun 	int status = 0;
5937*4882a593Smuzhiyun 
5938*4882a593Smuzhiyun 	status = be_resume(adapter);
5939*4882a593Smuzhiyun 	if (status)
5940*4882a593Smuzhiyun 		return status;
5941*4882a593Smuzhiyun 
5942*4882a593Smuzhiyun 	be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
5943*4882a593Smuzhiyun 
5944*4882a593Smuzhiyun 	return 0;
5945*4882a593Smuzhiyun }
5946*4882a593Smuzhiyun 
5947*4882a593Smuzhiyun /*
5948*4882a593Smuzhiyun  * An FLR will stop BE from DMAing any data.
5949*4882a593Smuzhiyun  */
be_shutdown(struct pci_dev * pdev)5950*4882a593Smuzhiyun static void be_shutdown(struct pci_dev *pdev)
5951*4882a593Smuzhiyun {
5952*4882a593Smuzhiyun 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5953*4882a593Smuzhiyun 
5954*4882a593Smuzhiyun 	if (!adapter)
5955*4882a593Smuzhiyun 		return;
5956*4882a593Smuzhiyun 
5957*4882a593Smuzhiyun 	be_roce_dev_shutdown(adapter);
5958*4882a593Smuzhiyun 	cancel_delayed_work_sync(&adapter->work);
5959*4882a593Smuzhiyun 	be_cancel_err_detection(adapter);
5960*4882a593Smuzhiyun 
5961*4882a593Smuzhiyun 	netif_device_detach(adapter->netdev);
5962*4882a593Smuzhiyun 
5963*4882a593Smuzhiyun 	be_cmd_reset_function(adapter);
5964*4882a593Smuzhiyun 
5965*4882a593Smuzhiyun 	pci_disable_device(pdev);
5966*4882a593Smuzhiyun }
5967*4882a593Smuzhiyun 
be_eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)5968*4882a593Smuzhiyun static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5969*4882a593Smuzhiyun 					    pci_channel_state_t state)
5970*4882a593Smuzhiyun {
5971*4882a593Smuzhiyun 	struct be_adapter *adapter = pci_get_drvdata(pdev);
5972*4882a593Smuzhiyun 
5973*4882a593Smuzhiyun 	dev_err(&adapter->pdev->dev, "EEH error detected\n");
5974*4882a593Smuzhiyun 
5975*4882a593Smuzhiyun 	be_roce_dev_remove(adapter);
5976*4882a593Smuzhiyun 
5977*4882a593Smuzhiyun 	if (!be_check_error(adapter, BE_ERROR_EEH)) {
5978*4882a593Smuzhiyun 		be_set_error(adapter, BE_ERROR_EEH);
5979*4882a593Smuzhiyun 
5980*4882a593Smuzhiyun 		be_cancel_err_detection(adapter);
5981*4882a593Smuzhiyun 
5982*4882a593Smuzhiyun 		be_cleanup(adapter);
5983*4882a593Smuzhiyun 	}
5984*4882a593Smuzhiyun 
5985*4882a593Smuzhiyun 	if (state == pci_channel_io_perm_failure)
5986*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
5987*4882a593Smuzhiyun 
5988*4882a593Smuzhiyun 	pci_disable_device(pdev);
5989*4882a593Smuzhiyun 
5990*4882a593Smuzhiyun 	/* The error could cause the FW to trigger a flash debug dump.
5991*4882a593Smuzhiyun 	 * Resetting the card while flash dump is in progress
5992*4882a593Smuzhiyun 	 * can cause it not to recover; wait for it to finish.
5993*4882a593Smuzhiyun 	 * Wait only for first function as it is needed only once per
5994*4882a593Smuzhiyun 	 * adapter.
5995*4882a593Smuzhiyun 	 */
5996*4882a593Smuzhiyun 	if (pdev->devfn == 0)
5997*4882a593Smuzhiyun 		ssleep(30);
5998*4882a593Smuzhiyun 
5999*4882a593Smuzhiyun 	return PCI_ERS_RESULT_NEED_RESET;
6000*4882a593Smuzhiyun }
6001*4882a593Smuzhiyun 
be_eeh_reset(struct pci_dev * pdev)6002*4882a593Smuzhiyun static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
6003*4882a593Smuzhiyun {
6004*4882a593Smuzhiyun 	struct be_adapter *adapter = pci_get_drvdata(pdev);
6005*4882a593Smuzhiyun 	int status;
6006*4882a593Smuzhiyun 
6007*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "EEH reset\n");
6008*4882a593Smuzhiyun 
6009*4882a593Smuzhiyun 	status = pci_enable_device(pdev);
6010*4882a593Smuzhiyun 	if (status)
6011*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
6012*4882a593Smuzhiyun 
6013*4882a593Smuzhiyun 	pci_set_master(pdev);
6014*4882a593Smuzhiyun 	pci_restore_state(pdev);
6015*4882a593Smuzhiyun 
6016*4882a593Smuzhiyun 	/* Check if card is ok and fw is ready */
6017*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev,
6018*4882a593Smuzhiyun 		 "Waiting for FW to be ready after EEH reset\n");
6019*4882a593Smuzhiyun 	status = be_fw_wait_ready(adapter);
6020*4882a593Smuzhiyun 	if (status)
6021*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
6022*4882a593Smuzhiyun 
6023*4882a593Smuzhiyun 	be_clear_error(adapter, BE_CLEAR_ALL);
6024*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
6025*4882a593Smuzhiyun }
6026*4882a593Smuzhiyun 
be_eeh_resume(struct pci_dev * pdev)6027*4882a593Smuzhiyun static void be_eeh_resume(struct pci_dev *pdev)
6028*4882a593Smuzhiyun {
6029*4882a593Smuzhiyun 	int status = 0;
6030*4882a593Smuzhiyun 	struct be_adapter *adapter = pci_get_drvdata(pdev);
6031*4882a593Smuzhiyun 
6032*4882a593Smuzhiyun 	dev_info(&adapter->pdev->dev, "EEH resume\n");
6033*4882a593Smuzhiyun 
6034*4882a593Smuzhiyun 	pci_save_state(pdev);
6035*4882a593Smuzhiyun 
6036*4882a593Smuzhiyun 	status = be_resume(adapter);
6037*4882a593Smuzhiyun 	if (status)
6038*4882a593Smuzhiyun 		goto err;
6039*4882a593Smuzhiyun 
6040*4882a593Smuzhiyun 	be_roce_dev_add(adapter);
6041*4882a593Smuzhiyun 
6042*4882a593Smuzhiyun 	be_schedule_err_detection(adapter, ERR_DETECTION_DELAY);
6043*4882a593Smuzhiyun 	return;
6044*4882a593Smuzhiyun err:
6045*4882a593Smuzhiyun 	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
6046*4882a593Smuzhiyun }
6047*4882a593Smuzhiyun 
be_pci_sriov_configure(struct pci_dev * pdev,int num_vfs)6048*4882a593Smuzhiyun static int be_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
6049*4882a593Smuzhiyun {
6050*4882a593Smuzhiyun 	struct be_adapter *adapter = pci_get_drvdata(pdev);
6051*4882a593Smuzhiyun 	struct be_resources vft_res = {0};
6052*4882a593Smuzhiyun 	int status;
6053*4882a593Smuzhiyun 
6054*4882a593Smuzhiyun 	if (!num_vfs)
6055*4882a593Smuzhiyun 		be_vf_clear(adapter);
6056*4882a593Smuzhiyun 
6057*4882a593Smuzhiyun 	adapter->num_vfs = num_vfs;
6058*4882a593Smuzhiyun 
6059*4882a593Smuzhiyun 	if (adapter->num_vfs == 0 && pci_vfs_assigned(pdev)) {
6060*4882a593Smuzhiyun 		dev_warn(&pdev->dev,
6061*4882a593Smuzhiyun 			 "Cannot disable VFs while they are assigned\n");
6062*4882a593Smuzhiyun 		return -EBUSY;
6063*4882a593Smuzhiyun 	}
6064*4882a593Smuzhiyun 
6065*4882a593Smuzhiyun 	/* When the HW is in SRIOV capable configuration, the PF-pool resources
6066*4882a593Smuzhiyun 	 * are equally distributed across the max-number of VFs. The user may
6067*4882a593Smuzhiyun 	 * request only a subset of the max-vfs to be enabled.
6068*4882a593Smuzhiyun 	 * Based on num_vfs, redistribute the resources across num_vfs so that
6069*4882a593Smuzhiyun 	 * each VF will have access to more number of resources.
6070*4882a593Smuzhiyun 	 * This facility is not available in BE3 FW.
6071*4882a593Smuzhiyun 	 * Also, this is done by FW in Lancer chip.
6072*4882a593Smuzhiyun 	 */
6073*4882a593Smuzhiyun 	if (skyhawk_chip(adapter) && !pci_num_vf(pdev)) {
6074*4882a593Smuzhiyun 		be_calculate_vf_res(adapter, adapter->num_vfs,
6075*4882a593Smuzhiyun 				    &vft_res);
6076*4882a593Smuzhiyun 		status = be_cmd_set_sriov_config(adapter, adapter->pool_res,
6077*4882a593Smuzhiyun 						 adapter->num_vfs, &vft_res);
6078*4882a593Smuzhiyun 		if (status)
6079*4882a593Smuzhiyun 			dev_err(&pdev->dev,
6080*4882a593Smuzhiyun 				"Failed to optimize SR-IOV resources\n");
6081*4882a593Smuzhiyun 	}
6082*4882a593Smuzhiyun 
6083*4882a593Smuzhiyun 	status = be_get_resources(adapter);
6084*4882a593Smuzhiyun 	if (status)
6085*4882a593Smuzhiyun 		return be_cmd_status(status);
6086*4882a593Smuzhiyun 
6087*4882a593Smuzhiyun 	/* Updating real_num_tx/rx_queues() requires rtnl_lock() */
6088*4882a593Smuzhiyun 	rtnl_lock();
6089*4882a593Smuzhiyun 	status = be_update_queues(adapter);
6090*4882a593Smuzhiyun 	rtnl_unlock();
6091*4882a593Smuzhiyun 	if (status)
6092*4882a593Smuzhiyun 		return be_cmd_status(status);
6093*4882a593Smuzhiyun 
6094*4882a593Smuzhiyun 	if (adapter->num_vfs)
6095*4882a593Smuzhiyun 		status = be_vf_setup(adapter);
6096*4882a593Smuzhiyun 
6097*4882a593Smuzhiyun 	if (!status)
6098*4882a593Smuzhiyun 		return adapter->num_vfs;
6099*4882a593Smuzhiyun 
6100*4882a593Smuzhiyun 	return 0;
6101*4882a593Smuzhiyun }
6102*4882a593Smuzhiyun 
6103*4882a593Smuzhiyun static const struct pci_error_handlers be_eeh_handlers = {
6104*4882a593Smuzhiyun 	.error_detected = be_eeh_err_detected,
6105*4882a593Smuzhiyun 	.slot_reset = be_eeh_reset,
6106*4882a593Smuzhiyun 	.resume = be_eeh_resume,
6107*4882a593Smuzhiyun };
6108*4882a593Smuzhiyun 
6109*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(be_pci_pm_ops, be_suspend, be_pci_resume);
6110*4882a593Smuzhiyun 
6111*4882a593Smuzhiyun static struct pci_driver be_driver = {
6112*4882a593Smuzhiyun 	.name = DRV_NAME,
6113*4882a593Smuzhiyun 	.id_table = be_dev_ids,
6114*4882a593Smuzhiyun 	.probe = be_probe,
6115*4882a593Smuzhiyun 	.remove = be_remove,
6116*4882a593Smuzhiyun 	.driver.pm = &be_pci_pm_ops,
6117*4882a593Smuzhiyun 	.shutdown = be_shutdown,
6118*4882a593Smuzhiyun 	.sriov_configure = be_pci_sriov_configure,
6119*4882a593Smuzhiyun 	.err_handler = &be_eeh_handlers
6120*4882a593Smuzhiyun };
6121*4882a593Smuzhiyun 
be_init_module(void)6122*4882a593Smuzhiyun static int __init be_init_module(void)
6123*4882a593Smuzhiyun {
6124*4882a593Smuzhiyun 	int status;
6125*4882a593Smuzhiyun 
6126*4882a593Smuzhiyun 	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
6127*4882a593Smuzhiyun 	    rx_frag_size != 2048) {
6128*4882a593Smuzhiyun 		printk(KERN_WARNING DRV_NAME
6129*4882a593Smuzhiyun 			" : Module param rx_frag_size must be 2048/4096/8192."
6130*4882a593Smuzhiyun 			" Using 2048\n");
6131*4882a593Smuzhiyun 		rx_frag_size = 2048;
6132*4882a593Smuzhiyun 	}
6133*4882a593Smuzhiyun 
6134*4882a593Smuzhiyun 	if (num_vfs > 0) {
6135*4882a593Smuzhiyun 		pr_info(DRV_NAME " : Module param num_vfs is obsolete.");
6136*4882a593Smuzhiyun 		pr_info(DRV_NAME " : Use sysfs method to enable VFs\n");
6137*4882a593Smuzhiyun 	}
6138*4882a593Smuzhiyun 
6139*4882a593Smuzhiyun 	be_wq = create_singlethread_workqueue("be_wq");
6140*4882a593Smuzhiyun 	if (!be_wq) {
6141*4882a593Smuzhiyun 		pr_warn(DRV_NAME "workqueue creation failed\n");
6142*4882a593Smuzhiyun 		return -1;
6143*4882a593Smuzhiyun 	}
6144*4882a593Smuzhiyun 
6145*4882a593Smuzhiyun 	be_err_recovery_workq =
6146*4882a593Smuzhiyun 		create_singlethread_workqueue("be_err_recover");
6147*4882a593Smuzhiyun 	if (!be_err_recovery_workq)
6148*4882a593Smuzhiyun 		pr_warn(DRV_NAME "Could not create error recovery workqueue\n");
6149*4882a593Smuzhiyun 
6150*4882a593Smuzhiyun 	status = pci_register_driver(&be_driver);
6151*4882a593Smuzhiyun 	if (status) {
6152*4882a593Smuzhiyun 		destroy_workqueue(be_wq);
6153*4882a593Smuzhiyun 		be_destroy_err_recovery_workq();
6154*4882a593Smuzhiyun 	}
6155*4882a593Smuzhiyun 	return status;
6156*4882a593Smuzhiyun }
6157*4882a593Smuzhiyun module_init(be_init_module);
6158*4882a593Smuzhiyun 
be_exit_module(void)6159*4882a593Smuzhiyun static void __exit be_exit_module(void)
6160*4882a593Smuzhiyun {
6161*4882a593Smuzhiyun 	pci_unregister_driver(&be_driver);
6162*4882a593Smuzhiyun 
6163*4882a593Smuzhiyun 	be_destroy_err_recovery_workq();
6164*4882a593Smuzhiyun 
6165*4882a593Smuzhiyun 	if (be_wq)
6166*4882a593Smuzhiyun 		destroy_workqueue(be_wq);
6167*4882a593Smuzhiyun }
6168*4882a593Smuzhiyun module_exit(be_exit_module);
6169