xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* bnx2x_cmn.c: QLogic Everest network driver.
2*4882a593Smuzhiyun  *
3*4882a593Smuzhiyun  * Copyright (c) 2007-2013 Broadcom Corporation
4*4882a593Smuzhiyun  * Copyright (c) 2014 QLogic Corporation
5*4882a593Smuzhiyun  * All rights reserved
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This program is free software; you can redistribute it and/or modify
8*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
9*4882a593Smuzhiyun  * the Free Software Foundation.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
12*4882a593Smuzhiyun  * Written by: Eliezer Tamir
13*4882a593Smuzhiyun  * Based on code from Michael Chan's bnx2 driver
14*4882a593Smuzhiyun  * UDP CSUM errata workaround by Arik Gendelman
15*4882a593Smuzhiyun  * Slowpath and fastpath rework by Vladislav Zolotarov
16*4882a593Smuzhiyun  * Statistics and Link management by Yitchak Gertner
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  */
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <linux/etherdevice.h>
23*4882a593Smuzhiyun #include <linux/if_vlan.h>
24*4882a593Smuzhiyun #include <linux/interrupt.h>
25*4882a593Smuzhiyun #include <linux/ip.h>
26*4882a593Smuzhiyun #include <linux/crash_dump.h>
27*4882a593Smuzhiyun #include <net/tcp.h>
28*4882a593Smuzhiyun #include <net/ipv6.h>
29*4882a593Smuzhiyun #include <net/ip6_checksum.h>
30*4882a593Smuzhiyun #include <linux/prefetch.h>
31*4882a593Smuzhiyun #include "bnx2x_cmn.h"
32*4882a593Smuzhiyun #include "bnx2x_init.h"
33*4882a593Smuzhiyun #include "bnx2x_sp.h"
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
36*4882a593Smuzhiyun static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
37*4882a593Smuzhiyun static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
38*4882a593Smuzhiyun static int bnx2x_poll(struct napi_struct *napi, int budget);
39*4882a593Smuzhiyun 
bnx2x_add_all_napi_cnic(struct bnx2x * bp)40*4882a593Smuzhiyun static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	int i;
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/* Add NAPI objects */
45*4882a593Smuzhiyun 	for_each_rx_queue_cnic(bp, i) {
46*4882a593Smuzhiyun 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
47*4882a593Smuzhiyun 			       bnx2x_poll, NAPI_POLL_WEIGHT);
48*4882a593Smuzhiyun 	}
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
bnx2x_add_all_napi(struct bnx2x * bp)51*4882a593Smuzhiyun static void bnx2x_add_all_napi(struct bnx2x *bp)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	int i;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	/* Add NAPI objects */
56*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
57*4882a593Smuzhiyun 		netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
58*4882a593Smuzhiyun 			       bnx2x_poll, NAPI_POLL_WEIGHT);
59*4882a593Smuzhiyun 	}
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
bnx2x_calc_num_queues(struct bnx2x * bp)62*4882a593Smuzhiyun static int bnx2x_calc_num_queues(struct bnx2x *bp)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* Reduce memory usage in kdump environment by using only one queue */
67*4882a593Smuzhiyun 	if (is_kdump_kernel())
68*4882a593Smuzhiyun 		nq = 1;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
71*4882a593Smuzhiyun 	return nq;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun /**
75*4882a593Smuzhiyun  * bnx2x_move_fp - move content of the fastpath structure.
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * @bp:		driver handle
78*4882a593Smuzhiyun  * @from:	source FP index
79*4882a593Smuzhiyun  * @to:		destination FP index
80*4882a593Smuzhiyun  *
81*4882a593Smuzhiyun  * Makes sure the contents of the bp->fp[to].napi is kept
82*4882a593Smuzhiyun  * intact. This is done by first copying the napi struct from
83*4882a593Smuzhiyun  * the target to the source, and then mem copying the entire
84*4882a593Smuzhiyun  * source onto the target. Update txdata pointers and related
85*4882a593Smuzhiyun  * content.
86*4882a593Smuzhiyun  */
bnx2x_move_fp(struct bnx2x * bp,int from,int to)87*4882a593Smuzhiyun static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct bnx2x_fastpath *from_fp = &bp->fp[from];
90*4882a593Smuzhiyun 	struct bnx2x_fastpath *to_fp = &bp->fp[to];
91*4882a593Smuzhiyun 	struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92*4882a593Smuzhiyun 	struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93*4882a593Smuzhiyun 	struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94*4882a593Smuzhiyun 	struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95*4882a593Smuzhiyun 	int old_max_eth_txqs, new_max_eth_txqs;
96*4882a593Smuzhiyun 	int old_txdata_index = 0, new_txdata_index = 0;
97*4882a593Smuzhiyun 	struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	/* Copy the NAPI object as it has been already initialized */
100*4882a593Smuzhiyun 	from_fp->napi = to_fp->napi;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* Move bnx2x_fastpath contents */
103*4882a593Smuzhiyun 	memcpy(to_fp, from_fp, sizeof(*to_fp));
104*4882a593Smuzhiyun 	to_fp->index = to;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	/* Retain the tpa_info of the original `to' version as we don't want
107*4882a593Smuzhiyun 	 * 2 FPs to contain the same tpa_info pointer.
108*4882a593Smuzhiyun 	 */
109*4882a593Smuzhiyun 	to_fp->tpa_info = old_tpa_info;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	/* move sp_objs contents as well, as their indices match fp ones */
112*4882a593Smuzhiyun 	memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	/* move fp_stats contents as well, as their indices match fp ones */
115*4882a593Smuzhiyun 	memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	/* Update txdata pointers in fp and move txdata content accordingly:
118*4882a593Smuzhiyun 	 * Each fp consumes 'max_cos' txdata structures, so the index should be
119*4882a593Smuzhiyun 	 * decremented by max_cos x delta.
120*4882a593Smuzhiyun 	 */
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123*4882a593Smuzhiyun 	new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
124*4882a593Smuzhiyun 				(bp)->max_cos;
125*4882a593Smuzhiyun 	if (from == FCOE_IDX(bp)) {
126*4882a593Smuzhiyun 		old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127*4882a593Smuzhiyun 		new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
128*4882a593Smuzhiyun 	}
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	memcpy(&bp->bnx2x_txq[new_txdata_index],
131*4882a593Smuzhiyun 	       &bp->bnx2x_txq[old_txdata_index],
132*4882a593Smuzhiyun 	       sizeof(struct bnx2x_fp_txdata));
133*4882a593Smuzhiyun 	to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /**
137*4882a593Smuzhiyun  * bnx2x_fill_fw_str - Fill buffer with FW version string.
138*4882a593Smuzhiyun  *
139*4882a593Smuzhiyun  * @bp:        driver handle
140*4882a593Smuzhiyun  * @buf:       character buffer to fill with the fw name
141*4882a593Smuzhiyun  * @buf_len:   length of the above buffer
142*4882a593Smuzhiyun  *
143*4882a593Smuzhiyun  */
bnx2x_fill_fw_str(struct bnx2x * bp,char * buf,size_t buf_len)144*4882a593Smuzhiyun void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	if (IS_PF(bp)) {
147*4882a593Smuzhiyun 		u8 phy_fw_ver[PHY_FW_VER_LEN];
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		phy_fw_ver[0] = '\0';
150*4882a593Smuzhiyun 		bnx2x_get_ext_phy_fw_version(&bp->link_params,
151*4882a593Smuzhiyun 					     phy_fw_ver, PHY_FW_VER_LEN);
152*4882a593Smuzhiyun 		strlcpy(buf, bp->fw_ver, buf_len);
153*4882a593Smuzhiyun 		snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
154*4882a593Smuzhiyun 			 "bc %d.%d.%d%s%s",
155*4882a593Smuzhiyun 			 (bp->common.bc_ver & 0xff0000) >> 16,
156*4882a593Smuzhiyun 			 (bp->common.bc_ver & 0xff00) >> 8,
157*4882a593Smuzhiyun 			 (bp->common.bc_ver & 0xff),
158*4882a593Smuzhiyun 			 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
159*4882a593Smuzhiyun 	} else {
160*4882a593Smuzhiyun 		bnx2x_vf_fill_fw_str(bp, buf, buf_len);
161*4882a593Smuzhiyun 	}
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun /**
165*4882a593Smuzhiyun  * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
166*4882a593Smuzhiyun  *
167*4882a593Smuzhiyun  * @bp:	driver handle
168*4882a593Smuzhiyun  * @delta:	number of eth queues which were not allocated
169*4882a593Smuzhiyun  */
bnx2x_shrink_eth_fp(struct bnx2x * bp,int delta)170*4882a593Smuzhiyun static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun 	int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	/* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175*4882a593Smuzhiyun 	 * backward along the array could cause memory to be overridden
176*4882a593Smuzhiyun 	 */
177*4882a593Smuzhiyun 	for (cos = 1; cos < bp->max_cos; cos++) {
178*4882a593Smuzhiyun 		for (i = 0; i < old_eth_num - delta; i++) {
179*4882a593Smuzhiyun 			struct bnx2x_fastpath *fp = &bp->fp[i];
180*4882a593Smuzhiyun 			int new_idx = cos * (old_eth_num - delta) + i;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 			memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183*4882a593Smuzhiyun 			       sizeof(struct bnx2x_fp_txdata));
184*4882a593Smuzhiyun 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
185*4882a593Smuzhiyun 		}
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /* free skb in the packet ring at pos idx
192*4882a593Smuzhiyun  * return idx of last bd freed
193*4882a593Smuzhiyun  */
bnx2x_free_tx_pkt(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,u16 idx,unsigned int * pkts_compl,unsigned int * bytes_compl)194*4882a593Smuzhiyun static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195*4882a593Smuzhiyun 			     u16 idx, unsigned int *pkts_compl,
196*4882a593Smuzhiyun 			     unsigned int *bytes_compl)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun 	struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199*4882a593Smuzhiyun 	struct eth_tx_start_bd *tx_start_bd;
200*4882a593Smuzhiyun 	struct eth_tx_bd *tx_data_bd;
201*4882a593Smuzhiyun 	struct sk_buff *skb = tx_buf->skb;
202*4882a593Smuzhiyun 	u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
203*4882a593Smuzhiyun 	int nbd;
204*4882a593Smuzhiyun 	u16 split_bd_len = 0;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* prefetch skb end pointer to speedup dev_kfree_skb() */
207*4882a593Smuzhiyun 	prefetch(&skb->end);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d  buff @(%p)->skb %p\n",
210*4882a593Smuzhiyun 	   txdata->txq_index, idx, tx_buf, skb);
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
216*4882a593Smuzhiyun 	if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217*4882a593Smuzhiyun 		BNX2X_ERR("BAD nbd!\n");
218*4882a593Smuzhiyun 		bnx2x_panic();
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun #endif
221*4882a593Smuzhiyun 	new_cons = nbd + tx_buf->first_bd;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* Get the next bd */
224*4882a593Smuzhiyun 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Skip a parse bd... */
227*4882a593Smuzhiyun 	--nbd;
228*4882a593Smuzhiyun 	bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231*4882a593Smuzhiyun 		/* Skip second parse bd... */
232*4882a593Smuzhiyun 		--nbd;
233*4882a593Smuzhiyun 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237*4882a593Smuzhiyun 	if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238*4882a593Smuzhiyun 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239*4882a593Smuzhiyun 		split_bd_len = BD_UNMAP_LEN(tx_data_bd);
240*4882a593Smuzhiyun 		--nbd;
241*4882a593Smuzhiyun 		bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	/* unmap first bd */
245*4882a593Smuzhiyun 	dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246*4882a593Smuzhiyun 			 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
247*4882a593Smuzhiyun 			 DMA_TO_DEVICE);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	/* now free frags */
250*4882a593Smuzhiyun 	while (nbd > 0) {
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 		tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253*4882a593Smuzhiyun 		dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254*4882a593Smuzhiyun 			       BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
255*4882a593Smuzhiyun 		if (--nbd)
256*4882a593Smuzhiyun 			bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
257*4882a593Smuzhiyun 	}
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	/* release skb */
260*4882a593Smuzhiyun 	WARN_ON(!skb);
261*4882a593Smuzhiyun 	if (likely(skb)) {
262*4882a593Smuzhiyun 		(*pkts_compl)++;
263*4882a593Smuzhiyun 		(*bytes_compl) += skb->len;
264*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	tx_buf->first_bd = 0;
268*4882a593Smuzhiyun 	tx_buf->skb = NULL;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	return new_cons;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun 
bnx2x_tx_int(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata)273*4882a593Smuzhiyun int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct netdev_queue *txq;
276*4882a593Smuzhiyun 	u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277*4882a593Smuzhiyun 	unsigned int pkts_compl = 0, bytes_compl = 0;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
280*4882a593Smuzhiyun 	if (unlikely(bp->panic))
281*4882a593Smuzhiyun 		return -1;
282*4882a593Smuzhiyun #endif
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285*4882a593Smuzhiyun 	hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286*4882a593Smuzhiyun 	sw_cons = txdata->tx_pkt_cons;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	/* Ensure subsequent loads occur after hw_cons */
289*4882a593Smuzhiyun 	smp_rmb();
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	while (sw_cons != hw_cons) {
292*4882a593Smuzhiyun 		u16 pkt_cons;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		pkt_cons = TX_BD(sw_cons);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_DONE,
297*4882a593Smuzhiyun 		   "queue[%d]: hw_cons %u  sw_cons %u  pkt_cons %u\n",
298*4882a593Smuzhiyun 		   txdata->txq_index, hw_cons, sw_cons, pkt_cons);
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 		bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
301*4882a593Smuzhiyun 					    &pkts_compl, &bytes_compl);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		sw_cons++;
304*4882a593Smuzhiyun 	}
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	txdata->tx_pkt_cons = sw_cons;
309*4882a593Smuzhiyun 	txdata->tx_bd_cons = bd_cons;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* Need to make the tx_bd_cons update visible to start_xmit()
312*4882a593Smuzhiyun 	 * before checking for netif_tx_queue_stopped().  Without the
313*4882a593Smuzhiyun 	 * memory barrier, there is a small possibility that
314*4882a593Smuzhiyun 	 * start_xmit() will miss it and cause the queue to be stopped
315*4882a593Smuzhiyun 	 * forever.
316*4882a593Smuzhiyun 	 * On the other hand we need an rmb() here to ensure the proper
317*4882a593Smuzhiyun 	 * ordering of bit testing in the following
318*4882a593Smuzhiyun 	 * netif_tx_queue_stopped(txq) call.
319*4882a593Smuzhiyun 	 */
320*4882a593Smuzhiyun 	smp_mb();
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (unlikely(netif_tx_queue_stopped(txq))) {
323*4882a593Smuzhiyun 		/* Taking tx_lock() is needed to prevent re-enabling the queue
324*4882a593Smuzhiyun 		 * while it's empty. This could have happen if rx_action() gets
325*4882a593Smuzhiyun 		 * suspended in bnx2x_tx_int() after the condition before
326*4882a593Smuzhiyun 		 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
327*4882a593Smuzhiyun 		 *
328*4882a593Smuzhiyun 		 * stops the queue->sees fresh tx_bd_cons->releases the queue->
329*4882a593Smuzhiyun 		 * sends some packets consuming the whole queue again->
330*4882a593Smuzhiyun 		 * stops the queue
331*4882a593Smuzhiyun 		 */
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 		__netif_tx_lock(txq, smp_processor_id());
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 		if ((netif_tx_queue_stopped(txq)) &&
336*4882a593Smuzhiyun 		    (bp->state == BNX2X_STATE_OPEN) &&
337*4882a593Smuzhiyun 		    (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
338*4882a593Smuzhiyun 			netif_tx_wake_queue(txq);
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		__netif_tx_unlock(txq);
341*4882a593Smuzhiyun 	}
342*4882a593Smuzhiyun 	return 0;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
bnx2x_update_last_max_sge(struct bnx2x_fastpath * fp,u16 idx)345*4882a593Smuzhiyun static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
346*4882a593Smuzhiyun 					     u16 idx)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	u16 last_max = fp->last_max_sge;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	if (SUB_S16(idx, last_max) > 0)
351*4882a593Smuzhiyun 		fp->last_max_sge = idx;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun 
bnx2x_update_sge_prod(struct bnx2x_fastpath * fp,u16 sge_len,struct eth_end_agg_rx_cqe * cqe)354*4882a593Smuzhiyun static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
355*4882a593Smuzhiyun 					 u16 sge_len,
356*4882a593Smuzhiyun 					 struct eth_end_agg_rx_cqe *cqe)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
359*4882a593Smuzhiyun 	u16 last_max, last_elem, first_elem;
360*4882a593Smuzhiyun 	u16 delta = 0;
361*4882a593Smuzhiyun 	u16 i;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (!sge_len)
364*4882a593Smuzhiyun 		return;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* First mark all used pages */
367*4882a593Smuzhiyun 	for (i = 0; i < sge_len; i++)
368*4882a593Smuzhiyun 		BIT_VEC64_CLEAR_BIT(fp->sge_mask,
369*4882a593Smuzhiyun 			RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
372*4882a593Smuzhiyun 	   sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	/* Here we assume that the last SGE index is the biggest */
375*4882a593Smuzhiyun 	prefetch((void *)(fp->sge_mask));
376*4882a593Smuzhiyun 	bnx2x_update_last_max_sge(fp,
377*4882a593Smuzhiyun 		le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	last_max = RX_SGE(fp->last_max_sge);
380*4882a593Smuzhiyun 	last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
381*4882a593Smuzhiyun 	first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* If ring is not full */
384*4882a593Smuzhiyun 	if (last_elem + 1 != first_elem)
385*4882a593Smuzhiyun 		last_elem++;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	/* Now update the prod */
388*4882a593Smuzhiyun 	for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
389*4882a593Smuzhiyun 		if (likely(fp->sge_mask[i]))
390*4882a593Smuzhiyun 			break;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 		fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
393*4882a593Smuzhiyun 		delta += BIT_VEC64_ELEM_SZ;
394*4882a593Smuzhiyun 	}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	if (delta > 0) {
397*4882a593Smuzhiyun 		fp->rx_sge_prod += delta;
398*4882a593Smuzhiyun 		/* clear page-end entries */
399*4882a593Smuzhiyun 		bnx2x_clear_sge_mask_next_elems(fp);
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	DP(NETIF_MSG_RX_STATUS,
403*4882a593Smuzhiyun 	   "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
404*4882a593Smuzhiyun 	   fp->last_max_sge, fp->rx_sge_prod);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun /* Get Toeplitz hash value in the skb using the value from the
408*4882a593Smuzhiyun  * CQE (calculated by HW).
409*4882a593Smuzhiyun  */
bnx2x_get_rxhash(const struct bnx2x * bp,const struct eth_fast_path_rx_cqe * cqe,enum pkt_hash_types * rxhash_type)410*4882a593Smuzhiyun static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
411*4882a593Smuzhiyun 			    const struct eth_fast_path_rx_cqe *cqe,
412*4882a593Smuzhiyun 			    enum pkt_hash_types *rxhash_type)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun 	/* Get Toeplitz hash from CQE */
415*4882a593Smuzhiyun 	if ((bp->dev->features & NETIF_F_RXHASH) &&
416*4882a593Smuzhiyun 	    (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
417*4882a593Smuzhiyun 		enum eth_rss_hash_type htype;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 		htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
420*4882a593Smuzhiyun 		*rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
421*4882a593Smuzhiyun 				(htype == TCP_IPV6_HASH_TYPE)) ?
422*4882a593Smuzhiyun 			       PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 		return le32_to_cpu(cqe->rss_hash_result);
425*4882a593Smuzhiyun 	}
426*4882a593Smuzhiyun 	*rxhash_type = PKT_HASH_TYPE_NONE;
427*4882a593Smuzhiyun 	return 0;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
bnx2x_tpa_start(struct bnx2x_fastpath * fp,u16 queue,u16 cons,u16 prod,struct eth_fast_path_rx_cqe * cqe)430*4882a593Smuzhiyun static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
431*4882a593Smuzhiyun 			    u16 cons, u16 prod,
432*4882a593Smuzhiyun 			    struct eth_fast_path_rx_cqe *cqe)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
435*4882a593Smuzhiyun 	struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
436*4882a593Smuzhiyun 	struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
437*4882a593Smuzhiyun 	struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
438*4882a593Smuzhiyun 	dma_addr_t mapping;
439*4882a593Smuzhiyun 	struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
440*4882a593Smuzhiyun 	struct sw_rx_bd *first_buf = &tpa_info->first_buf;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	/* print error if current state != stop */
443*4882a593Smuzhiyun 	if (tpa_info->tpa_state != BNX2X_TPA_STOP)
444*4882a593Smuzhiyun 		BNX2X_ERR("start of bin not in stop [%d]\n", queue);
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	/* Try to map an empty data buffer from the aggregation info  */
447*4882a593Smuzhiyun 	mapping = dma_map_single(&bp->pdev->dev,
448*4882a593Smuzhiyun 				 first_buf->data + NET_SKB_PAD,
449*4882a593Smuzhiyun 				 fp->rx_buf_size, DMA_FROM_DEVICE);
450*4882a593Smuzhiyun 	/*
451*4882a593Smuzhiyun 	 *  ...if it fails - move the skb from the consumer to the producer
452*4882a593Smuzhiyun 	 *  and set the current aggregation state as ERROR to drop it
453*4882a593Smuzhiyun 	 *  when TPA_STOP arrives.
454*4882a593Smuzhiyun 	 */
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
457*4882a593Smuzhiyun 		/* Move the BD from the consumer to the producer */
458*4882a593Smuzhiyun 		bnx2x_reuse_rx_data(fp, cons, prod);
459*4882a593Smuzhiyun 		tpa_info->tpa_state = BNX2X_TPA_ERROR;
460*4882a593Smuzhiyun 		return;
461*4882a593Smuzhiyun 	}
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun 	/* move empty data from pool to prod */
464*4882a593Smuzhiyun 	prod_rx_buf->data = first_buf->data;
465*4882a593Smuzhiyun 	dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
466*4882a593Smuzhiyun 	/* point prod_bd to new data */
467*4882a593Smuzhiyun 	prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
468*4882a593Smuzhiyun 	prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	/* move partial skb from cons to pool (don't unmap yet) */
471*4882a593Smuzhiyun 	*first_buf = *cons_rx_buf;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* mark bin state as START */
474*4882a593Smuzhiyun 	tpa_info->parsing_flags =
475*4882a593Smuzhiyun 		le16_to_cpu(cqe->pars_flags.flags);
476*4882a593Smuzhiyun 	tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
477*4882a593Smuzhiyun 	tpa_info->tpa_state = BNX2X_TPA_START;
478*4882a593Smuzhiyun 	tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
479*4882a593Smuzhiyun 	tpa_info->placement_offset = cqe->placement_offset;
480*4882a593Smuzhiyun 	tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
481*4882a593Smuzhiyun 	if (fp->mode == TPA_MODE_GRO) {
482*4882a593Smuzhiyun 		u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
483*4882a593Smuzhiyun 		tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
484*4882a593Smuzhiyun 		tpa_info->gro_size = gro_size;
485*4882a593Smuzhiyun 	}
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
488*4882a593Smuzhiyun 	fp->tpa_queue_used |= (1 << queue);
489*4882a593Smuzhiyun 	DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
490*4882a593Smuzhiyun 	   fp->tpa_queue_used);
491*4882a593Smuzhiyun #endif
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun /* Timestamp option length allowed for TPA aggregation:
495*4882a593Smuzhiyun  *
496*4882a593Smuzhiyun  *		nop nop kind length echo val
497*4882a593Smuzhiyun  */
498*4882a593Smuzhiyun #define TPA_TSTAMP_OPT_LEN	12
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun  * bnx2x_set_gro_params - compute GRO values
501*4882a593Smuzhiyun  *
502*4882a593Smuzhiyun  * @skb:		packet skb
503*4882a593Smuzhiyun  * @parsing_flags:	parsing flags from the START CQE
504*4882a593Smuzhiyun  * @len_on_bd:		total length of the first packet for the
505*4882a593Smuzhiyun  *			aggregation.
506*4882a593Smuzhiyun  * @pkt_len:		length of all segments
507*4882a593Smuzhiyun  * @num_of_coalesced_segs: count of segments
508*4882a593Smuzhiyun  *
509*4882a593Smuzhiyun  * Approximate value of the MSS for this aggregation calculated using
510*4882a593Smuzhiyun  * the first packet of it.
511*4882a593Smuzhiyun  * Compute number of aggregated segments, and gso_type.
512*4882a593Smuzhiyun  */
bnx2x_set_gro_params(struct sk_buff * skb,u16 parsing_flags,u16 len_on_bd,unsigned int pkt_len,u16 num_of_coalesced_segs)513*4882a593Smuzhiyun static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
514*4882a593Smuzhiyun 				 u16 len_on_bd, unsigned int pkt_len,
515*4882a593Smuzhiyun 				 u16 num_of_coalesced_segs)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	/* TPA aggregation won't have either IP options or TCP options
518*4882a593Smuzhiyun 	 * other than timestamp or IPv6 extension headers.
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
523*4882a593Smuzhiyun 	    PRS_FLAG_OVERETH_IPV6) {
524*4882a593Smuzhiyun 		hdrs_len += sizeof(struct ipv6hdr);
525*4882a593Smuzhiyun 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
526*4882a593Smuzhiyun 	} else {
527*4882a593Smuzhiyun 		hdrs_len += sizeof(struct iphdr);
528*4882a593Smuzhiyun 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* Check if there was a TCP timestamp, if there is it's will
532*4882a593Smuzhiyun 	 * always be 12 bytes length: nop nop kind length echo val.
533*4882a593Smuzhiyun 	 *
534*4882a593Smuzhiyun 	 * Otherwise FW would close the aggregation.
535*4882a593Smuzhiyun 	 */
536*4882a593Smuzhiyun 	if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
537*4882a593Smuzhiyun 		hdrs_len += TPA_TSTAMP_OPT_LEN;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
542*4882a593Smuzhiyun 	 * to skb_shinfo(skb)->gso_segs
543*4882a593Smuzhiyun 	 */
544*4882a593Smuzhiyun 	NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun 
bnx2x_alloc_rx_sge(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)547*4882a593Smuzhiyun static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
548*4882a593Smuzhiyun 			      u16 index, gfp_t gfp_mask)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
551*4882a593Smuzhiyun 	struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
552*4882a593Smuzhiyun 	struct bnx2x_alloc_pool *pool = &fp->page_pool;
553*4882a593Smuzhiyun 	dma_addr_t mapping;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	if (!pool->page) {
556*4882a593Smuzhiyun 		pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
557*4882a593Smuzhiyun 		if (unlikely(!pool->page))
558*4882a593Smuzhiyun 			return -ENOMEM;
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 		pool->offset = 0;
561*4882a593Smuzhiyun 	}
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	mapping = dma_map_page(&bp->pdev->dev, pool->page,
564*4882a593Smuzhiyun 			       pool->offset, SGE_PAGE_SIZE, DMA_FROM_DEVICE);
565*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
566*4882a593Smuzhiyun 		BNX2X_ERR("Can't map sge\n");
567*4882a593Smuzhiyun 		return -ENOMEM;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	sw_buf->page = pool->page;
571*4882a593Smuzhiyun 	sw_buf->offset = pool->offset;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	dma_unmap_addr_set(sw_buf, mapping, mapping);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	sge->addr_hi = cpu_to_le32(U64_HI(mapping));
576*4882a593Smuzhiyun 	sge->addr_lo = cpu_to_le32(U64_LO(mapping));
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	pool->offset += SGE_PAGE_SIZE;
579*4882a593Smuzhiyun 	if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
580*4882a593Smuzhiyun 		get_page(pool->page);
581*4882a593Smuzhiyun 	else
582*4882a593Smuzhiyun 		pool->page = NULL;
583*4882a593Smuzhiyun 	return 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
bnx2x_fill_frag_skb(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct sk_buff * skb,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)586*4882a593Smuzhiyun static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
587*4882a593Smuzhiyun 			       struct bnx2x_agg_info *tpa_info,
588*4882a593Smuzhiyun 			       u16 pages,
589*4882a593Smuzhiyun 			       struct sk_buff *skb,
590*4882a593Smuzhiyun 			       struct eth_end_agg_rx_cqe *cqe,
591*4882a593Smuzhiyun 			       u16 cqe_idx)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct sw_rx_page *rx_pg, old_rx_pg;
594*4882a593Smuzhiyun 	u32 i, frag_len, frag_size;
595*4882a593Smuzhiyun 	int err, j, frag_id = 0;
596*4882a593Smuzhiyun 	u16 len_on_bd = tpa_info->len_on_bd;
597*4882a593Smuzhiyun 	u16 full_page = 0, gro_size = 0;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	if (fp->mode == TPA_MODE_GRO) {
602*4882a593Smuzhiyun 		gro_size = tpa_info->gro_size;
603*4882a593Smuzhiyun 		full_page = tpa_info->full_page;
604*4882a593Smuzhiyun 	}
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* This is needed in order to enable forwarding support */
607*4882a593Smuzhiyun 	if (frag_size)
608*4882a593Smuzhiyun 		bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
609*4882a593Smuzhiyun 				     le16_to_cpu(cqe->pkt_len),
610*4882a593Smuzhiyun 				     le16_to_cpu(cqe->num_of_coalesced_segs));
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
613*4882a593Smuzhiyun 	if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
614*4882a593Smuzhiyun 		BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
615*4882a593Smuzhiyun 			  pages, cqe_idx);
616*4882a593Smuzhiyun 		BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
617*4882a593Smuzhiyun 		bnx2x_panic();
618*4882a593Smuzhiyun 		return -EINVAL;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun #endif
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/* Run through the SGL and compose the fragmented skb */
623*4882a593Smuzhiyun 	for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
624*4882a593Smuzhiyun 		u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		/* FW gives the indices of the SGE as if the ring is an array
627*4882a593Smuzhiyun 		   (meaning that "next" element will consume 2 indices) */
628*4882a593Smuzhiyun 		if (fp->mode == TPA_MODE_GRO)
629*4882a593Smuzhiyun 			frag_len = min_t(u32, frag_size, (u32)full_page);
630*4882a593Smuzhiyun 		else /* LRO */
631*4882a593Smuzhiyun 			frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
632*4882a593Smuzhiyun 
633*4882a593Smuzhiyun 		rx_pg = &fp->rx_page_ring[sge_idx];
634*4882a593Smuzhiyun 		old_rx_pg = *rx_pg;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		/* If we fail to allocate a substitute page, we simply stop
637*4882a593Smuzhiyun 		   where we are and drop the whole packet */
638*4882a593Smuzhiyun 		err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
639*4882a593Smuzhiyun 		if (unlikely(err)) {
640*4882a593Smuzhiyun 			bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
641*4882a593Smuzhiyun 			return err;
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		dma_unmap_page(&bp->pdev->dev,
645*4882a593Smuzhiyun 			       dma_unmap_addr(&old_rx_pg, mapping),
646*4882a593Smuzhiyun 			       SGE_PAGE_SIZE, DMA_FROM_DEVICE);
647*4882a593Smuzhiyun 		/* Add one frag and update the appropriate fields in the skb */
648*4882a593Smuzhiyun 		if (fp->mode == TPA_MODE_LRO)
649*4882a593Smuzhiyun 			skb_fill_page_desc(skb, j, old_rx_pg.page,
650*4882a593Smuzhiyun 					   old_rx_pg.offset, frag_len);
651*4882a593Smuzhiyun 		else { /* GRO */
652*4882a593Smuzhiyun 			int rem;
653*4882a593Smuzhiyun 			int offset = 0;
654*4882a593Smuzhiyun 			for (rem = frag_len; rem > 0; rem -= gro_size) {
655*4882a593Smuzhiyun 				int len = rem > gro_size ? gro_size : rem;
656*4882a593Smuzhiyun 				skb_fill_page_desc(skb, frag_id++,
657*4882a593Smuzhiyun 						   old_rx_pg.page,
658*4882a593Smuzhiyun 						   old_rx_pg.offset + offset,
659*4882a593Smuzhiyun 						   len);
660*4882a593Smuzhiyun 				if (offset)
661*4882a593Smuzhiyun 					get_page(old_rx_pg.page);
662*4882a593Smuzhiyun 				offset += len;
663*4882a593Smuzhiyun 			}
664*4882a593Smuzhiyun 		}
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 		skb->data_len += frag_len;
667*4882a593Smuzhiyun 		skb->truesize += SGE_PAGES;
668*4882a593Smuzhiyun 		skb->len += frag_len;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 		frag_size -= frag_len;
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	return 0;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
bnx2x_frag_free(const struct bnx2x_fastpath * fp,void * data)676*4882a593Smuzhiyun static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	if (fp->rx_frag_size)
679*4882a593Smuzhiyun 		skb_free_frag(data);
680*4882a593Smuzhiyun 	else
681*4882a593Smuzhiyun 		kfree(data);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun 
bnx2x_frag_alloc(const struct bnx2x_fastpath * fp,gfp_t gfp_mask)684*4882a593Smuzhiyun static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
685*4882a593Smuzhiyun {
686*4882a593Smuzhiyun 	if (fp->rx_frag_size) {
687*4882a593Smuzhiyun 		/* GFP_KERNEL allocations are used only during initialization */
688*4882a593Smuzhiyun 		if (unlikely(gfpflags_allow_blocking(gfp_mask)))
689*4882a593Smuzhiyun 			return (void *)__get_free_page(gfp_mask);
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		return napi_alloc_frag(fp->rx_frag_size);
692*4882a593Smuzhiyun 	}
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
695*4882a593Smuzhiyun }
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun #ifdef CONFIG_INET
bnx2x_gro_ip_csum(struct bnx2x * bp,struct sk_buff * skb)698*4882a593Smuzhiyun static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun 	const struct iphdr *iph = ip_hdr(skb);
701*4882a593Smuzhiyun 	struct tcphdr *th;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	skb_set_transport_header(skb, sizeof(struct iphdr));
704*4882a593Smuzhiyun 	th = tcp_hdr(skb);
705*4882a593Smuzhiyun 
706*4882a593Smuzhiyun 	th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
707*4882a593Smuzhiyun 				  iph->saddr, iph->daddr, 0);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
bnx2x_gro_ipv6_csum(struct bnx2x * bp,struct sk_buff * skb)710*4882a593Smuzhiyun static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	struct ipv6hdr *iph = ipv6_hdr(skb);
713*4882a593Smuzhiyun 	struct tcphdr *th;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	skb_set_transport_header(skb, sizeof(struct ipv6hdr));
716*4882a593Smuzhiyun 	th = tcp_hdr(skb);
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
719*4882a593Smuzhiyun 				  &iph->saddr, &iph->daddr, 0);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
bnx2x_gro_csum(struct bnx2x * bp,struct sk_buff * skb,void (* gro_func)(struct bnx2x *,struct sk_buff *))722*4882a593Smuzhiyun static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
723*4882a593Smuzhiyun 			    void (*gro_func)(struct bnx2x*, struct sk_buff*))
724*4882a593Smuzhiyun {
725*4882a593Smuzhiyun 	skb_reset_network_header(skb);
726*4882a593Smuzhiyun 	gro_func(bp, skb);
727*4882a593Smuzhiyun 	tcp_gro_complete(skb);
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun #endif
730*4882a593Smuzhiyun 
bnx2x_gro_receive(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct sk_buff * skb)731*4882a593Smuzhiyun static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
732*4882a593Smuzhiyun 			       struct sk_buff *skb)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun #ifdef CONFIG_INET
735*4882a593Smuzhiyun 	if (skb_shinfo(skb)->gso_size) {
736*4882a593Smuzhiyun 		switch (be16_to_cpu(skb->protocol)) {
737*4882a593Smuzhiyun 		case ETH_P_IP:
738*4882a593Smuzhiyun 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
739*4882a593Smuzhiyun 			break;
740*4882a593Smuzhiyun 		case ETH_P_IPV6:
741*4882a593Smuzhiyun 			bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
742*4882a593Smuzhiyun 			break;
743*4882a593Smuzhiyun 		default:
744*4882a593Smuzhiyun 			netdev_WARN_ONCE(bp->dev,
745*4882a593Smuzhiyun 					 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
746*4882a593Smuzhiyun 					 be16_to_cpu(skb->protocol));
747*4882a593Smuzhiyun 		}
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun #endif
750*4882a593Smuzhiyun 	skb_record_rx_queue(skb, fp->rx_queue);
751*4882a593Smuzhiyun 	napi_gro_receive(&fp->napi, skb);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
bnx2x_tpa_stop(struct bnx2x * bp,struct bnx2x_fastpath * fp,struct bnx2x_agg_info * tpa_info,u16 pages,struct eth_end_agg_rx_cqe * cqe,u16 cqe_idx)754*4882a593Smuzhiyun static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
755*4882a593Smuzhiyun 			   struct bnx2x_agg_info *tpa_info,
756*4882a593Smuzhiyun 			   u16 pages,
757*4882a593Smuzhiyun 			   struct eth_end_agg_rx_cqe *cqe,
758*4882a593Smuzhiyun 			   u16 cqe_idx)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
761*4882a593Smuzhiyun 	u8 pad = tpa_info->placement_offset;
762*4882a593Smuzhiyun 	u16 len = tpa_info->len_on_bd;
763*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
764*4882a593Smuzhiyun 	u8 *new_data, *data = rx_buf->data;
765*4882a593Smuzhiyun 	u8 old_tpa_state = tpa_info->tpa_state;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	tpa_info->tpa_state = BNX2X_TPA_STOP;
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	/* If we there was an error during the handling of the TPA_START -
770*4882a593Smuzhiyun 	 * drop this aggregation.
771*4882a593Smuzhiyun 	 */
772*4882a593Smuzhiyun 	if (old_tpa_state == BNX2X_TPA_ERROR)
773*4882a593Smuzhiyun 		goto drop;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	/* Try to allocate the new data */
776*4882a593Smuzhiyun 	new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
777*4882a593Smuzhiyun 	/* Unmap skb in the pool anyway, as we are going to change
778*4882a593Smuzhiyun 	   pool entry status to BNX2X_TPA_STOP even if new skb allocation
779*4882a593Smuzhiyun 	   fails. */
780*4882a593Smuzhiyun 	dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
781*4882a593Smuzhiyun 			 fp->rx_buf_size, DMA_FROM_DEVICE);
782*4882a593Smuzhiyun 	if (likely(new_data))
783*4882a593Smuzhiyun 		skb = build_skb(data, fp->rx_frag_size);
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (likely(skb)) {
786*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
787*4882a593Smuzhiyun 		if (pad + len > fp->rx_buf_size) {
788*4882a593Smuzhiyun 			BNX2X_ERR("skb_put is about to fail...  pad %d  len %d  rx_buf_size %d\n",
789*4882a593Smuzhiyun 				  pad, len, fp->rx_buf_size);
790*4882a593Smuzhiyun 			bnx2x_panic();
791*4882a593Smuzhiyun 			bnx2x_frag_free(fp, new_data);
792*4882a593Smuzhiyun 			return;
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun #endif
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 		skb_reserve(skb, pad + NET_SKB_PAD);
797*4882a593Smuzhiyun 		skb_put(skb, len);
798*4882a593Smuzhiyun 		skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, bp->dev);
801*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 		if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
804*4882a593Smuzhiyun 					 skb, cqe, cqe_idx)) {
805*4882a593Smuzhiyun 			if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
806*4882a593Smuzhiyun 				__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
807*4882a593Smuzhiyun 			bnx2x_gro_receive(bp, fp, skb);
808*4882a593Smuzhiyun 		} else {
809*4882a593Smuzhiyun 			DP(NETIF_MSG_RX_STATUS,
810*4882a593Smuzhiyun 			   "Failed to allocate new pages - dropping packet!\n");
811*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
812*4882a593Smuzhiyun 		}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 		/* put new data in bin */
815*4882a593Smuzhiyun 		rx_buf->data = new_data;
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 		return;
818*4882a593Smuzhiyun 	}
819*4882a593Smuzhiyun 	if (new_data)
820*4882a593Smuzhiyun 		bnx2x_frag_free(fp, new_data);
821*4882a593Smuzhiyun drop:
822*4882a593Smuzhiyun 	/* drop the packet and keep the buffer in the bin */
823*4882a593Smuzhiyun 	DP(NETIF_MSG_RX_STATUS,
824*4882a593Smuzhiyun 	   "Failed to allocate or map a new skb - dropping packet!\n");
825*4882a593Smuzhiyun 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
bnx2x_alloc_rx_data(struct bnx2x * bp,struct bnx2x_fastpath * fp,u16 index,gfp_t gfp_mask)828*4882a593Smuzhiyun static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
829*4882a593Smuzhiyun 			       u16 index, gfp_t gfp_mask)
830*4882a593Smuzhiyun {
831*4882a593Smuzhiyun 	u8 *data;
832*4882a593Smuzhiyun 	struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
833*4882a593Smuzhiyun 	struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
834*4882a593Smuzhiyun 	dma_addr_t mapping;
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	data = bnx2x_frag_alloc(fp, gfp_mask);
837*4882a593Smuzhiyun 	if (unlikely(data == NULL))
838*4882a593Smuzhiyun 		return -ENOMEM;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
841*4882a593Smuzhiyun 				 fp->rx_buf_size,
842*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
843*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
844*4882a593Smuzhiyun 		bnx2x_frag_free(fp, data);
845*4882a593Smuzhiyun 		BNX2X_ERR("Can't map rx data\n");
846*4882a593Smuzhiyun 		return -ENOMEM;
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	rx_buf->data = data;
850*4882a593Smuzhiyun 	dma_unmap_addr_set(rx_buf, mapping, mapping);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
853*4882a593Smuzhiyun 	rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	return 0;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun static
bnx2x_csum_validate(struct sk_buff * skb,union eth_rx_cqe * cqe,struct bnx2x_fastpath * fp,struct bnx2x_eth_q_stats * qstats)859*4882a593Smuzhiyun void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
860*4882a593Smuzhiyun 				 struct bnx2x_fastpath *fp,
861*4882a593Smuzhiyun 				 struct bnx2x_eth_q_stats *qstats)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	/* Do nothing if no L4 csum validation was done.
864*4882a593Smuzhiyun 	 * We do not check whether IP csum was validated. For IPv4 we assume
865*4882a593Smuzhiyun 	 * that if the card got as far as validating the L4 csum, it also
866*4882a593Smuzhiyun 	 * validated the IP csum. IPv6 has no IP csum.
867*4882a593Smuzhiyun 	 */
868*4882a593Smuzhiyun 	if (cqe->fast_path_cqe.status_flags &
869*4882a593Smuzhiyun 	    ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
870*4882a593Smuzhiyun 		return;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* If L4 validation was done, check if an error was found. */
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if (cqe->fast_path_cqe.type_error_flags &
875*4882a593Smuzhiyun 	    (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
876*4882a593Smuzhiyun 	     ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
877*4882a593Smuzhiyun 		qstats->hw_csum_err++;
878*4882a593Smuzhiyun 	else
879*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
bnx2x_rx_int(struct bnx2x_fastpath * fp,int budget)882*4882a593Smuzhiyun static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
885*4882a593Smuzhiyun 	u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
886*4882a593Smuzhiyun 	u16 sw_comp_cons, sw_comp_prod;
887*4882a593Smuzhiyun 	int rx_pkt = 0;
888*4882a593Smuzhiyun 	union eth_rx_cqe *cqe;
889*4882a593Smuzhiyun 	struct eth_fast_path_rx_cqe *cqe_fp;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
892*4882a593Smuzhiyun 	if (unlikely(bp->panic))
893*4882a593Smuzhiyun 		return 0;
894*4882a593Smuzhiyun #endif
895*4882a593Smuzhiyun 	if (budget <= 0)
896*4882a593Smuzhiyun 		return rx_pkt;
897*4882a593Smuzhiyun 
898*4882a593Smuzhiyun 	bd_cons = fp->rx_bd_cons;
899*4882a593Smuzhiyun 	bd_prod = fp->rx_bd_prod;
900*4882a593Smuzhiyun 	bd_prod_fw = bd_prod;
901*4882a593Smuzhiyun 	sw_comp_cons = fp->rx_comp_cons;
902*4882a593Smuzhiyun 	sw_comp_prod = fp->rx_comp_prod;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	comp_ring_cons = RCQ_BD(sw_comp_cons);
905*4882a593Smuzhiyun 	cqe = &fp->rx_comp_ring[comp_ring_cons];
906*4882a593Smuzhiyun 	cqe_fp = &cqe->fast_path_cqe;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	DP(NETIF_MSG_RX_STATUS,
909*4882a593Smuzhiyun 	   "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
912*4882a593Smuzhiyun 		struct sw_rx_bd *rx_buf = NULL;
913*4882a593Smuzhiyun 		struct sk_buff *skb;
914*4882a593Smuzhiyun 		u8 cqe_fp_flags;
915*4882a593Smuzhiyun 		enum eth_rx_cqe_type cqe_fp_type;
916*4882a593Smuzhiyun 		u16 len, pad, queue;
917*4882a593Smuzhiyun 		u8 *data;
918*4882a593Smuzhiyun 		u32 rxhash;
919*4882a593Smuzhiyun 		enum pkt_hash_types rxhash_type;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
922*4882a593Smuzhiyun 		if (unlikely(bp->panic))
923*4882a593Smuzhiyun 			return 0;
924*4882a593Smuzhiyun #endif
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 		bd_prod = RX_BD(bd_prod);
927*4882a593Smuzhiyun 		bd_cons = RX_BD(bd_cons);
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 		/* A rmb() is required to ensure that the CQE is not read
930*4882a593Smuzhiyun 		 * before it is written by the adapter DMA.  PCI ordering
931*4882a593Smuzhiyun 		 * rules will make sure the other fields are written before
932*4882a593Smuzhiyun 		 * the marker at the end of struct eth_fast_path_rx_cqe
933*4882a593Smuzhiyun 		 * but without rmb() a weakly ordered processor can process
934*4882a593Smuzhiyun 		 * stale data.  Without the barrier TPA state-machine might
935*4882a593Smuzhiyun 		 * enter inconsistent state and kernel stack might be
936*4882a593Smuzhiyun 		 * provided with incorrect packet description - these lead
937*4882a593Smuzhiyun 		 * to various kernel crashed.
938*4882a593Smuzhiyun 		 */
939*4882a593Smuzhiyun 		rmb();
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 		cqe_fp_flags = cqe_fp->type_error_flags;
942*4882a593Smuzhiyun 		cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 		DP(NETIF_MSG_RX_STATUS,
945*4882a593Smuzhiyun 		   "CQE type %x  err %x  status %x  queue %x  vlan %x  len %u\n",
946*4882a593Smuzhiyun 		   CQE_TYPE(cqe_fp_flags),
947*4882a593Smuzhiyun 		   cqe_fp_flags, cqe_fp->status_flags,
948*4882a593Smuzhiyun 		   le32_to_cpu(cqe_fp->rss_hash_result),
949*4882a593Smuzhiyun 		   le16_to_cpu(cqe_fp->vlan_tag),
950*4882a593Smuzhiyun 		   le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 		/* is this a slowpath msg? */
953*4882a593Smuzhiyun 		if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
954*4882a593Smuzhiyun 			bnx2x_sp_event(fp, cqe);
955*4882a593Smuzhiyun 			goto next_cqe;
956*4882a593Smuzhiyun 		}
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 		rx_buf = &fp->rx_buf_ring[bd_cons];
959*4882a593Smuzhiyun 		data = rx_buf->data;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 		if (!CQE_TYPE_FAST(cqe_fp_type)) {
962*4882a593Smuzhiyun 			struct bnx2x_agg_info *tpa_info;
963*4882a593Smuzhiyun 			u16 frag_size, pages;
964*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
965*4882a593Smuzhiyun 			/* sanity check */
966*4882a593Smuzhiyun 			if (fp->mode == TPA_MODE_DISABLED &&
967*4882a593Smuzhiyun 			    (CQE_TYPE_START(cqe_fp_type) ||
968*4882a593Smuzhiyun 			     CQE_TYPE_STOP(cqe_fp_type)))
969*4882a593Smuzhiyun 				BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
970*4882a593Smuzhiyun 					  CQE_TYPE(cqe_fp_type));
971*4882a593Smuzhiyun #endif
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 			if (CQE_TYPE_START(cqe_fp_type)) {
974*4882a593Smuzhiyun 				u16 queue = cqe_fp->queue_index;
975*4882a593Smuzhiyun 				DP(NETIF_MSG_RX_STATUS,
976*4882a593Smuzhiyun 				   "calling tpa_start on queue %d\n",
977*4882a593Smuzhiyun 				   queue);
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 				bnx2x_tpa_start(fp, queue,
980*4882a593Smuzhiyun 						bd_cons, bd_prod,
981*4882a593Smuzhiyun 						cqe_fp);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 				goto next_rx;
984*4882a593Smuzhiyun 			}
985*4882a593Smuzhiyun 			queue = cqe->end_agg_cqe.queue_index;
986*4882a593Smuzhiyun 			tpa_info = &fp->tpa_info[queue];
987*4882a593Smuzhiyun 			DP(NETIF_MSG_RX_STATUS,
988*4882a593Smuzhiyun 			   "calling tpa_stop on queue %d\n",
989*4882a593Smuzhiyun 			   queue);
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 			frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
992*4882a593Smuzhiyun 				    tpa_info->len_on_bd;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 			if (fp->mode == TPA_MODE_GRO)
995*4882a593Smuzhiyun 				pages = (frag_size + tpa_info->full_page - 1) /
996*4882a593Smuzhiyun 					 tpa_info->full_page;
997*4882a593Smuzhiyun 			else
998*4882a593Smuzhiyun 				pages = SGE_PAGE_ALIGN(frag_size) >>
999*4882a593Smuzhiyun 					SGE_PAGE_SHIFT;
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun 			bnx2x_tpa_stop(bp, fp, tpa_info, pages,
1002*4882a593Smuzhiyun 				       &cqe->end_agg_cqe, comp_ring_cons);
1003*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
1004*4882a593Smuzhiyun 			if (bp->panic)
1005*4882a593Smuzhiyun 				return 0;
1006*4882a593Smuzhiyun #endif
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun 			bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
1009*4882a593Smuzhiyun 			goto next_cqe;
1010*4882a593Smuzhiyun 		}
1011*4882a593Smuzhiyun 		/* non TPA */
1012*4882a593Smuzhiyun 		len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
1013*4882a593Smuzhiyun 		pad = cqe_fp->placement_offset;
1014*4882a593Smuzhiyun 		dma_sync_single_for_cpu(&bp->pdev->dev,
1015*4882a593Smuzhiyun 					dma_unmap_addr(rx_buf, mapping),
1016*4882a593Smuzhiyun 					pad + RX_COPY_THRESH,
1017*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
1018*4882a593Smuzhiyun 		pad += NET_SKB_PAD;
1019*4882a593Smuzhiyun 		prefetch(data + pad); /* speedup eth_type_trans() */
1020*4882a593Smuzhiyun 		/* is this an error packet? */
1021*4882a593Smuzhiyun 		if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1022*4882a593Smuzhiyun 			DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1023*4882a593Smuzhiyun 			   "ERROR  flags %x  rx packet %u\n",
1024*4882a593Smuzhiyun 			   cqe_fp_flags, sw_comp_cons);
1025*4882a593Smuzhiyun 			bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1026*4882a593Smuzhiyun 			goto reuse_rx;
1027*4882a593Smuzhiyun 		}
1028*4882a593Smuzhiyun 
1029*4882a593Smuzhiyun 		/* Since we don't have a jumbo ring
1030*4882a593Smuzhiyun 		 * copy small packets if mtu > 1500
1031*4882a593Smuzhiyun 		 */
1032*4882a593Smuzhiyun 		if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1033*4882a593Smuzhiyun 		    (len <= RX_COPY_THRESH)) {
1034*4882a593Smuzhiyun 			skb = napi_alloc_skb(&fp->napi, len);
1035*4882a593Smuzhiyun 			if (skb == NULL) {
1036*4882a593Smuzhiyun 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1037*4882a593Smuzhiyun 				   "ERROR  packet dropped because of alloc failure\n");
1038*4882a593Smuzhiyun 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1039*4882a593Smuzhiyun 				goto reuse_rx;
1040*4882a593Smuzhiyun 			}
1041*4882a593Smuzhiyun 			memcpy(skb->data, data + pad, len);
1042*4882a593Smuzhiyun 			bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1043*4882a593Smuzhiyun 		} else {
1044*4882a593Smuzhiyun 			if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1045*4882a593Smuzhiyun 						       GFP_ATOMIC) == 0)) {
1046*4882a593Smuzhiyun 				dma_unmap_single(&bp->pdev->dev,
1047*4882a593Smuzhiyun 						 dma_unmap_addr(rx_buf, mapping),
1048*4882a593Smuzhiyun 						 fp->rx_buf_size,
1049*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
1050*4882a593Smuzhiyun 				skb = build_skb(data, fp->rx_frag_size);
1051*4882a593Smuzhiyun 				if (unlikely(!skb)) {
1052*4882a593Smuzhiyun 					bnx2x_frag_free(fp, data);
1053*4882a593Smuzhiyun 					bnx2x_fp_qstats(bp, fp)->
1054*4882a593Smuzhiyun 							rx_skb_alloc_failed++;
1055*4882a593Smuzhiyun 					goto next_rx;
1056*4882a593Smuzhiyun 				}
1057*4882a593Smuzhiyun 				skb_reserve(skb, pad);
1058*4882a593Smuzhiyun 			} else {
1059*4882a593Smuzhiyun 				DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1060*4882a593Smuzhiyun 				   "ERROR  packet dropped because of alloc failure\n");
1061*4882a593Smuzhiyun 				bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1062*4882a593Smuzhiyun reuse_rx:
1063*4882a593Smuzhiyun 				bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1064*4882a593Smuzhiyun 				goto next_rx;
1065*4882a593Smuzhiyun 			}
1066*4882a593Smuzhiyun 		}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 		skb_put(skb, len);
1069*4882a593Smuzhiyun 		skb->protocol = eth_type_trans(skb, bp->dev);
1070*4882a593Smuzhiyun 
1071*4882a593Smuzhiyun 		/* Set Toeplitz hash for a none-LRO skb */
1072*4882a593Smuzhiyun 		rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1073*4882a593Smuzhiyun 		skb_set_hash(skb, rxhash, rxhash_type);
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun 		skb_checksum_none_assert(skb);
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 		if (bp->dev->features & NETIF_F_RXCSUM)
1078*4882a593Smuzhiyun 			bnx2x_csum_validate(skb, cqe, fp,
1079*4882a593Smuzhiyun 					    bnx2x_fp_qstats(bp, fp));
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 		skb_record_rx_queue(skb, fp->rx_queue);
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 		/* Check if this packet was timestamped */
1084*4882a593Smuzhiyun 		if (unlikely(cqe->fast_path_cqe.type_error_flags &
1085*4882a593Smuzhiyun 			     (1 << ETH_FAST_PATH_RX_CQE_PTP_PKT_SHIFT)))
1086*4882a593Smuzhiyun 			bnx2x_set_rx_ts(bp, skb);
1087*4882a593Smuzhiyun 
1088*4882a593Smuzhiyun 		if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1089*4882a593Smuzhiyun 		    PARSING_FLAGS_VLAN)
1090*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1091*4882a593Smuzhiyun 					       le16_to_cpu(cqe_fp->vlan_tag));
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun 		napi_gro_receive(&fp->napi, skb);
1094*4882a593Smuzhiyun next_rx:
1095*4882a593Smuzhiyun 		rx_buf->data = NULL;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 		bd_cons = NEXT_RX_IDX(bd_cons);
1098*4882a593Smuzhiyun 		bd_prod = NEXT_RX_IDX(bd_prod);
1099*4882a593Smuzhiyun 		bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1100*4882a593Smuzhiyun 		rx_pkt++;
1101*4882a593Smuzhiyun next_cqe:
1102*4882a593Smuzhiyun 		sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1103*4882a593Smuzhiyun 		sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1104*4882a593Smuzhiyun 
1105*4882a593Smuzhiyun 		/* mark CQE as free */
1106*4882a593Smuzhiyun 		BNX2X_SEED_CQE(cqe_fp);
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 		if (rx_pkt == budget)
1109*4882a593Smuzhiyun 			break;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 		comp_ring_cons = RCQ_BD(sw_comp_cons);
1112*4882a593Smuzhiyun 		cqe = &fp->rx_comp_ring[comp_ring_cons];
1113*4882a593Smuzhiyun 		cqe_fp = &cqe->fast_path_cqe;
1114*4882a593Smuzhiyun 	} /* while */
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 	fp->rx_bd_cons = bd_cons;
1117*4882a593Smuzhiyun 	fp->rx_bd_prod = bd_prod_fw;
1118*4882a593Smuzhiyun 	fp->rx_comp_cons = sw_comp_cons;
1119*4882a593Smuzhiyun 	fp->rx_comp_prod = sw_comp_prod;
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	/* Update producers */
1122*4882a593Smuzhiyun 	bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1123*4882a593Smuzhiyun 			     fp->rx_sge_prod);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	return rx_pkt;
1126*4882a593Smuzhiyun }
1127*4882a593Smuzhiyun 
bnx2x_msix_fp_int(int irq,void * fp_cookie)1128*4882a593Smuzhiyun static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1129*4882a593Smuzhiyun {
1130*4882a593Smuzhiyun 	struct bnx2x_fastpath *fp = fp_cookie;
1131*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
1132*4882a593Smuzhiyun 	u8 cos;
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	DP(NETIF_MSG_INTR,
1135*4882a593Smuzhiyun 	   "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1136*4882a593Smuzhiyun 	   fp->index, fp->fw_sb_id, fp->igu_sb_id);
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
1141*4882a593Smuzhiyun 	if (unlikely(bp->panic))
1142*4882a593Smuzhiyun 		return IRQ_HANDLED;
1143*4882a593Smuzhiyun #endif
1144*4882a593Smuzhiyun 
1145*4882a593Smuzhiyun 	/* Handle Rx and Tx according to MSI-X vector */
1146*4882a593Smuzhiyun 	for_each_cos_in_tx_queue(fp, cos)
1147*4882a593Smuzhiyun 		prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun 	prefetch(&fp->sb_running_index[SM_RX_ID]);
1150*4882a593Smuzhiyun 	napi_schedule_irqoff(&bnx2x_fp(bp, fp->index, napi));
1151*4882a593Smuzhiyun 
1152*4882a593Smuzhiyun 	return IRQ_HANDLED;
1153*4882a593Smuzhiyun }
1154*4882a593Smuzhiyun 
1155*4882a593Smuzhiyun /* HW Lock for shared dual port PHYs */
bnx2x_acquire_phy_lock(struct bnx2x * bp)1156*4882a593Smuzhiyun void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun 	mutex_lock(&bp->port.phy_mutex);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1161*4882a593Smuzhiyun }
1162*4882a593Smuzhiyun 
bnx2x_release_phy_lock(struct bnx2x * bp)1163*4882a593Smuzhiyun void bnx2x_release_phy_lock(struct bnx2x *bp)
1164*4882a593Smuzhiyun {
1165*4882a593Smuzhiyun 	bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 	mutex_unlock(&bp->port.phy_mutex);
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun /* calculates MF speed according to current linespeed and MF configuration */
bnx2x_get_mf_speed(struct bnx2x * bp)1171*4882a593Smuzhiyun u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1172*4882a593Smuzhiyun {
1173*4882a593Smuzhiyun 	u16 line_speed = bp->link_vars.line_speed;
1174*4882a593Smuzhiyun 	if (IS_MF(bp)) {
1175*4882a593Smuzhiyun 		u16 maxCfg = bnx2x_extract_max_cfg(bp,
1176*4882a593Smuzhiyun 						   bp->mf_config[BP_VN(bp)]);
1177*4882a593Smuzhiyun 
1178*4882a593Smuzhiyun 		/* Calculate the current MAX line speed limit for the MF
1179*4882a593Smuzhiyun 		 * devices
1180*4882a593Smuzhiyun 		 */
1181*4882a593Smuzhiyun 		if (IS_MF_PERCENT_BW(bp))
1182*4882a593Smuzhiyun 			line_speed = (line_speed * maxCfg) / 100;
1183*4882a593Smuzhiyun 		else { /* SD mode */
1184*4882a593Smuzhiyun 			u16 vn_max_rate = maxCfg * 100;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 			if (vn_max_rate < line_speed)
1187*4882a593Smuzhiyun 				line_speed = vn_max_rate;
1188*4882a593Smuzhiyun 		}
1189*4882a593Smuzhiyun 	}
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 	return line_speed;
1192*4882a593Smuzhiyun }
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun /**
1195*4882a593Smuzhiyun  * bnx2x_fill_report_data - fill link report data to report
1196*4882a593Smuzhiyun  *
1197*4882a593Smuzhiyun  * @bp:		driver handle
1198*4882a593Smuzhiyun  * @data:	link state to update
1199*4882a593Smuzhiyun  *
1200*4882a593Smuzhiyun  * It uses a none-atomic bit operations because is called under the mutex.
1201*4882a593Smuzhiyun  */
bnx2x_fill_report_data(struct bnx2x * bp,struct bnx2x_link_report_data * data)1202*4882a593Smuzhiyun static void bnx2x_fill_report_data(struct bnx2x *bp,
1203*4882a593Smuzhiyun 				   struct bnx2x_link_report_data *data)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun 	memset(data, 0, sizeof(*data));
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	if (IS_PF(bp)) {
1208*4882a593Smuzhiyun 		/* Fill the report data: effective line speed */
1209*4882a593Smuzhiyun 		data->line_speed = bnx2x_get_mf_speed(bp);
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 		/* Link is down */
1212*4882a593Smuzhiyun 		if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1213*4882a593Smuzhiyun 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1214*4882a593Smuzhiyun 				  &data->link_report_flags);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 		if (!BNX2X_NUM_ETH_QUEUES(bp))
1217*4882a593Smuzhiyun 			__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1218*4882a593Smuzhiyun 				  &data->link_report_flags);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 		/* Full DUPLEX */
1221*4882a593Smuzhiyun 		if (bp->link_vars.duplex == DUPLEX_FULL)
1222*4882a593Smuzhiyun 			__set_bit(BNX2X_LINK_REPORT_FD,
1223*4882a593Smuzhiyun 				  &data->link_report_flags);
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun 		/* Rx Flow Control is ON */
1226*4882a593Smuzhiyun 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1227*4882a593Smuzhiyun 			__set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1228*4882a593Smuzhiyun 				  &data->link_report_flags);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 		/* Tx Flow Control is ON */
1231*4882a593Smuzhiyun 		if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1232*4882a593Smuzhiyun 			__set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1233*4882a593Smuzhiyun 				  &data->link_report_flags);
1234*4882a593Smuzhiyun 	} else { /* VF */
1235*4882a593Smuzhiyun 		*data = bp->vf_link_vars;
1236*4882a593Smuzhiyun 	}
1237*4882a593Smuzhiyun }
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun /**
1240*4882a593Smuzhiyun  * bnx2x_link_report - report link status to OS.
1241*4882a593Smuzhiyun  *
1242*4882a593Smuzhiyun  * @bp:		driver handle
1243*4882a593Smuzhiyun  *
1244*4882a593Smuzhiyun  * Calls the __bnx2x_link_report() under the same locking scheme
1245*4882a593Smuzhiyun  * as a link/PHY state managing code to ensure a consistent link
1246*4882a593Smuzhiyun  * reporting.
1247*4882a593Smuzhiyun  */
1248*4882a593Smuzhiyun 
bnx2x_link_report(struct bnx2x * bp)1249*4882a593Smuzhiyun void bnx2x_link_report(struct bnx2x *bp)
1250*4882a593Smuzhiyun {
1251*4882a593Smuzhiyun 	bnx2x_acquire_phy_lock(bp);
1252*4882a593Smuzhiyun 	__bnx2x_link_report(bp);
1253*4882a593Smuzhiyun 	bnx2x_release_phy_lock(bp);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun /**
1257*4882a593Smuzhiyun  * __bnx2x_link_report - report link status to OS.
1258*4882a593Smuzhiyun  *
1259*4882a593Smuzhiyun  * @bp:		driver handle
1260*4882a593Smuzhiyun  *
1261*4882a593Smuzhiyun  * None atomic implementation.
1262*4882a593Smuzhiyun  * Should be called under the phy_lock.
1263*4882a593Smuzhiyun  */
__bnx2x_link_report(struct bnx2x * bp)1264*4882a593Smuzhiyun void __bnx2x_link_report(struct bnx2x *bp)
1265*4882a593Smuzhiyun {
1266*4882a593Smuzhiyun 	struct bnx2x_link_report_data cur_data;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 	if (bp->force_link_down) {
1269*4882a593Smuzhiyun 		bp->link_vars.link_up = 0;
1270*4882a593Smuzhiyun 		return;
1271*4882a593Smuzhiyun 	}
1272*4882a593Smuzhiyun 
1273*4882a593Smuzhiyun 	/* reread mf_cfg */
1274*4882a593Smuzhiyun 	if (IS_PF(bp) && !CHIP_IS_E1(bp))
1275*4882a593Smuzhiyun 		bnx2x_read_mf_cfg(bp);
1276*4882a593Smuzhiyun 
1277*4882a593Smuzhiyun 	/* Read the current link report info */
1278*4882a593Smuzhiyun 	bnx2x_fill_report_data(bp, &cur_data);
1279*4882a593Smuzhiyun 
1280*4882a593Smuzhiyun 	/* Don't report link down or exactly the same link status twice */
1281*4882a593Smuzhiyun 	if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1282*4882a593Smuzhiyun 	    (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1283*4882a593Smuzhiyun 		      &bp->last_reported_link.link_report_flags) &&
1284*4882a593Smuzhiyun 	     test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1285*4882a593Smuzhiyun 		      &cur_data.link_report_flags)))
1286*4882a593Smuzhiyun 		return;
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	bp->link_cnt++;
1289*4882a593Smuzhiyun 
1290*4882a593Smuzhiyun 	/* We are going to report a new link parameters now -
1291*4882a593Smuzhiyun 	 * remember the current data for the next time.
1292*4882a593Smuzhiyun 	 */
1293*4882a593Smuzhiyun 	memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	/* propagate status to VFs */
1296*4882a593Smuzhiyun 	if (IS_PF(bp))
1297*4882a593Smuzhiyun 		bnx2x_iov_link_update(bp);
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1300*4882a593Smuzhiyun 		     &cur_data.link_report_flags)) {
1301*4882a593Smuzhiyun 		netif_carrier_off(bp->dev);
1302*4882a593Smuzhiyun 		netdev_err(bp->dev, "NIC Link is Down\n");
1303*4882a593Smuzhiyun 		return;
1304*4882a593Smuzhiyun 	} else {
1305*4882a593Smuzhiyun 		const char *duplex;
1306*4882a593Smuzhiyun 		const char *flow;
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 		netif_carrier_on(bp->dev);
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 		if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1311*4882a593Smuzhiyun 				       &cur_data.link_report_flags))
1312*4882a593Smuzhiyun 			duplex = "full";
1313*4882a593Smuzhiyun 		else
1314*4882a593Smuzhiyun 			duplex = "half";
1315*4882a593Smuzhiyun 
1316*4882a593Smuzhiyun 		/* Handle the FC at the end so that only these flags would be
1317*4882a593Smuzhiyun 		 * possibly set. This way we may easily check if there is no FC
1318*4882a593Smuzhiyun 		 * enabled.
1319*4882a593Smuzhiyun 		 */
1320*4882a593Smuzhiyun 		if (cur_data.link_report_flags) {
1321*4882a593Smuzhiyun 			if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1322*4882a593Smuzhiyun 				     &cur_data.link_report_flags)) {
1323*4882a593Smuzhiyun 				if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1324*4882a593Smuzhiyun 				     &cur_data.link_report_flags))
1325*4882a593Smuzhiyun 					flow = "ON - receive & transmit";
1326*4882a593Smuzhiyun 				else
1327*4882a593Smuzhiyun 					flow = "ON - receive";
1328*4882a593Smuzhiyun 			} else {
1329*4882a593Smuzhiyun 				flow = "ON - transmit";
1330*4882a593Smuzhiyun 			}
1331*4882a593Smuzhiyun 		} else {
1332*4882a593Smuzhiyun 			flow = "none";
1333*4882a593Smuzhiyun 		}
1334*4882a593Smuzhiyun 		netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1335*4882a593Smuzhiyun 			    cur_data.line_speed, duplex, flow);
1336*4882a593Smuzhiyun 	}
1337*4882a593Smuzhiyun }
1338*4882a593Smuzhiyun 
bnx2x_set_next_page_sgl(struct bnx2x_fastpath * fp)1339*4882a593Smuzhiyun static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1340*4882a593Smuzhiyun {
1341*4882a593Smuzhiyun 	int i;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1344*4882a593Smuzhiyun 		struct eth_rx_sge *sge;
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 		sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1347*4882a593Smuzhiyun 		sge->addr_hi =
1348*4882a593Smuzhiyun 			cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1349*4882a593Smuzhiyun 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 		sge->addr_lo =
1352*4882a593Smuzhiyun 			cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1353*4882a593Smuzhiyun 			BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1354*4882a593Smuzhiyun 	}
1355*4882a593Smuzhiyun }
1356*4882a593Smuzhiyun 
bnx2x_free_tpa_pool(struct bnx2x * bp,struct bnx2x_fastpath * fp,int last)1357*4882a593Smuzhiyun static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1358*4882a593Smuzhiyun 				struct bnx2x_fastpath *fp, int last)
1359*4882a593Smuzhiyun {
1360*4882a593Smuzhiyun 	int i;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	for (i = 0; i < last; i++) {
1363*4882a593Smuzhiyun 		struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1364*4882a593Smuzhiyun 		struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1365*4882a593Smuzhiyun 		u8 *data = first_buf->data;
1366*4882a593Smuzhiyun 
1367*4882a593Smuzhiyun 		if (data == NULL) {
1368*4882a593Smuzhiyun 			DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1369*4882a593Smuzhiyun 			continue;
1370*4882a593Smuzhiyun 		}
1371*4882a593Smuzhiyun 		if (tpa_info->tpa_state == BNX2X_TPA_START)
1372*4882a593Smuzhiyun 			dma_unmap_single(&bp->pdev->dev,
1373*4882a593Smuzhiyun 					 dma_unmap_addr(first_buf, mapping),
1374*4882a593Smuzhiyun 					 fp->rx_buf_size, DMA_FROM_DEVICE);
1375*4882a593Smuzhiyun 		bnx2x_frag_free(fp, data);
1376*4882a593Smuzhiyun 		first_buf->data = NULL;
1377*4882a593Smuzhiyun 	}
1378*4882a593Smuzhiyun }
1379*4882a593Smuzhiyun 
bnx2x_init_rx_rings_cnic(struct bnx2x * bp)1380*4882a593Smuzhiyun void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1381*4882a593Smuzhiyun {
1382*4882a593Smuzhiyun 	int j;
1383*4882a593Smuzhiyun 
1384*4882a593Smuzhiyun 	for_each_rx_queue_cnic(bp, j) {
1385*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[j];
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 		fp->rx_bd_cons = 0;
1388*4882a593Smuzhiyun 
1389*4882a593Smuzhiyun 		/* Activate BD ring */
1390*4882a593Smuzhiyun 		/* Warning!
1391*4882a593Smuzhiyun 		 * this will generate an interrupt (to the TSTORM)
1392*4882a593Smuzhiyun 		 * must only be done after chip is initialized
1393*4882a593Smuzhiyun 		 */
1394*4882a593Smuzhiyun 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1395*4882a593Smuzhiyun 				     fp->rx_sge_prod);
1396*4882a593Smuzhiyun 	}
1397*4882a593Smuzhiyun }
1398*4882a593Smuzhiyun 
bnx2x_init_rx_rings(struct bnx2x * bp)1399*4882a593Smuzhiyun void bnx2x_init_rx_rings(struct bnx2x *bp)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun 	int func = BP_FUNC(bp);
1402*4882a593Smuzhiyun 	u16 ring_prod;
1403*4882a593Smuzhiyun 	int i, j;
1404*4882a593Smuzhiyun 
1405*4882a593Smuzhiyun 	/* Allocate TPA resources */
1406*4882a593Smuzhiyun 	for_each_eth_queue(bp, j) {
1407*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[j];
1408*4882a593Smuzhiyun 
1409*4882a593Smuzhiyun 		DP(NETIF_MSG_IFUP,
1410*4882a593Smuzhiyun 		   "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1411*4882a593Smuzhiyun 
1412*4882a593Smuzhiyun 		if (fp->mode != TPA_MODE_DISABLED) {
1413*4882a593Smuzhiyun 			/* Fill the per-aggregation pool */
1414*4882a593Smuzhiyun 			for (i = 0; i < MAX_AGG_QS(bp); i++) {
1415*4882a593Smuzhiyun 				struct bnx2x_agg_info *tpa_info =
1416*4882a593Smuzhiyun 					&fp->tpa_info[i];
1417*4882a593Smuzhiyun 				struct sw_rx_bd *first_buf =
1418*4882a593Smuzhiyun 					&tpa_info->first_buf;
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 				first_buf->data =
1421*4882a593Smuzhiyun 					bnx2x_frag_alloc(fp, GFP_KERNEL);
1422*4882a593Smuzhiyun 				if (!first_buf->data) {
1423*4882a593Smuzhiyun 					BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1424*4882a593Smuzhiyun 						  j);
1425*4882a593Smuzhiyun 					bnx2x_free_tpa_pool(bp, fp, i);
1426*4882a593Smuzhiyun 					fp->mode = TPA_MODE_DISABLED;
1427*4882a593Smuzhiyun 					break;
1428*4882a593Smuzhiyun 				}
1429*4882a593Smuzhiyun 				dma_unmap_addr_set(first_buf, mapping, 0);
1430*4882a593Smuzhiyun 				tpa_info->tpa_state = BNX2X_TPA_STOP;
1431*4882a593Smuzhiyun 			}
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 			/* "next page" elements initialization */
1434*4882a593Smuzhiyun 			bnx2x_set_next_page_sgl(fp);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 			/* set SGEs bit mask */
1437*4882a593Smuzhiyun 			bnx2x_init_sge_ring_bit_mask(fp);
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun 			/* Allocate SGEs and initialize the ring elements */
1440*4882a593Smuzhiyun 			for (i = 0, ring_prod = 0;
1441*4882a593Smuzhiyun 			     i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1442*4882a593Smuzhiyun 
1443*4882a593Smuzhiyun 				if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1444*4882a593Smuzhiyun 						       GFP_KERNEL) < 0) {
1445*4882a593Smuzhiyun 					BNX2X_ERR("was only able to allocate %d rx sges\n",
1446*4882a593Smuzhiyun 						  i);
1447*4882a593Smuzhiyun 					BNX2X_ERR("disabling TPA for queue[%d]\n",
1448*4882a593Smuzhiyun 						  j);
1449*4882a593Smuzhiyun 					/* Cleanup already allocated elements */
1450*4882a593Smuzhiyun 					bnx2x_free_rx_sge_range(bp, fp,
1451*4882a593Smuzhiyun 								ring_prod);
1452*4882a593Smuzhiyun 					bnx2x_free_tpa_pool(bp, fp,
1453*4882a593Smuzhiyun 							    MAX_AGG_QS(bp));
1454*4882a593Smuzhiyun 					fp->mode = TPA_MODE_DISABLED;
1455*4882a593Smuzhiyun 					ring_prod = 0;
1456*4882a593Smuzhiyun 					break;
1457*4882a593Smuzhiyun 				}
1458*4882a593Smuzhiyun 				ring_prod = NEXT_SGE_IDX(ring_prod);
1459*4882a593Smuzhiyun 			}
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 			fp->rx_sge_prod = ring_prod;
1462*4882a593Smuzhiyun 		}
1463*4882a593Smuzhiyun 	}
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	for_each_eth_queue(bp, j) {
1466*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[j];
1467*4882a593Smuzhiyun 
1468*4882a593Smuzhiyun 		fp->rx_bd_cons = 0;
1469*4882a593Smuzhiyun 
1470*4882a593Smuzhiyun 		/* Activate BD ring */
1471*4882a593Smuzhiyun 		/* Warning!
1472*4882a593Smuzhiyun 		 * this will generate an interrupt (to the TSTORM)
1473*4882a593Smuzhiyun 		 * must only be done after chip is initialized
1474*4882a593Smuzhiyun 		 */
1475*4882a593Smuzhiyun 		bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1476*4882a593Smuzhiyun 				     fp->rx_sge_prod);
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 		if (j != 0)
1479*4882a593Smuzhiyun 			continue;
1480*4882a593Smuzhiyun 
1481*4882a593Smuzhiyun 		if (CHIP_IS_E1(bp)) {
1482*4882a593Smuzhiyun 			REG_WR(bp, BAR_USTRORM_INTMEM +
1483*4882a593Smuzhiyun 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1484*4882a593Smuzhiyun 			       U64_LO(fp->rx_comp_mapping));
1485*4882a593Smuzhiyun 			REG_WR(bp, BAR_USTRORM_INTMEM +
1486*4882a593Smuzhiyun 			       USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1487*4882a593Smuzhiyun 			       U64_HI(fp->rx_comp_mapping));
1488*4882a593Smuzhiyun 		}
1489*4882a593Smuzhiyun 	}
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun 
bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath * fp)1492*4882a593Smuzhiyun static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	u8 cos;
1495*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
1496*4882a593Smuzhiyun 
1497*4882a593Smuzhiyun 	for_each_cos_in_tx_queue(fp, cos) {
1498*4882a593Smuzhiyun 		struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1499*4882a593Smuzhiyun 		unsigned pkts_compl = 0, bytes_compl = 0;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 		u16 sw_prod = txdata->tx_pkt_prod;
1502*4882a593Smuzhiyun 		u16 sw_cons = txdata->tx_pkt_cons;
1503*4882a593Smuzhiyun 
1504*4882a593Smuzhiyun 		while (sw_cons != sw_prod) {
1505*4882a593Smuzhiyun 			bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1506*4882a593Smuzhiyun 					  &pkts_compl, &bytes_compl);
1507*4882a593Smuzhiyun 			sw_cons++;
1508*4882a593Smuzhiyun 		}
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 		netdev_tx_reset_queue(
1511*4882a593Smuzhiyun 			netdev_get_tx_queue(bp->dev,
1512*4882a593Smuzhiyun 					    txdata->txq_index));
1513*4882a593Smuzhiyun 	}
1514*4882a593Smuzhiyun }
1515*4882a593Smuzhiyun 
bnx2x_free_tx_skbs_cnic(struct bnx2x * bp)1516*4882a593Smuzhiyun static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1517*4882a593Smuzhiyun {
1518*4882a593Smuzhiyun 	int i;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	for_each_tx_queue_cnic(bp, i) {
1521*4882a593Smuzhiyun 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1522*4882a593Smuzhiyun 	}
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun 
bnx2x_free_tx_skbs(struct bnx2x * bp)1525*4882a593Smuzhiyun static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1526*4882a593Smuzhiyun {
1527*4882a593Smuzhiyun 	int i;
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
1530*4882a593Smuzhiyun 		bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1531*4882a593Smuzhiyun 	}
1532*4882a593Smuzhiyun }
1533*4882a593Smuzhiyun 
bnx2x_free_rx_bds(struct bnx2x_fastpath * fp)1534*4882a593Smuzhiyun static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1535*4882a593Smuzhiyun {
1536*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
1537*4882a593Smuzhiyun 	int i;
1538*4882a593Smuzhiyun 
1539*4882a593Smuzhiyun 	/* ring wasn't allocated */
1540*4882a593Smuzhiyun 	if (fp->rx_buf_ring == NULL)
1541*4882a593Smuzhiyun 		return;
1542*4882a593Smuzhiyun 
1543*4882a593Smuzhiyun 	for (i = 0; i < NUM_RX_BD; i++) {
1544*4882a593Smuzhiyun 		struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1545*4882a593Smuzhiyun 		u8 *data = rx_buf->data;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 		if (data == NULL)
1548*4882a593Smuzhiyun 			continue;
1549*4882a593Smuzhiyun 		dma_unmap_single(&bp->pdev->dev,
1550*4882a593Smuzhiyun 				 dma_unmap_addr(rx_buf, mapping),
1551*4882a593Smuzhiyun 				 fp->rx_buf_size, DMA_FROM_DEVICE);
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 		rx_buf->data = NULL;
1554*4882a593Smuzhiyun 		bnx2x_frag_free(fp, data);
1555*4882a593Smuzhiyun 	}
1556*4882a593Smuzhiyun }
1557*4882a593Smuzhiyun 
bnx2x_free_rx_skbs_cnic(struct bnx2x * bp)1558*4882a593Smuzhiyun static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1559*4882a593Smuzhiyun {
1560*4882a593Smuzhiyun 	int j;
1561*4882a593Smuzhiyun 
1562*4882a593Smuzhiyun 	for_each_rx_queue_cnic(bp, j) {
1563*4882a593Smuzhiyun 		bnx2x_free_rx_bds(&bp->fp[j]);
1564*4882a593Smuzhiyun 	}
1565*4882a593Smuzhiyun }
1566*4882a593Smuzhiyun 
bnx2x_free_rx_skbs(struct bnx2x * bp)1567*4882a593Smuzhiyun static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1568*4882a593Smuzhiyun {
1569*4882a593Smuzhiyun 	int j;
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun 	for_each_eth_queue(bp, j) {
1572*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[j];
1573*4882a593Smuzhiyun 
1574*4882a593Smuzhiyun 		bnx2x_free_rx_bds(fp);
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun 		if (fp->mode != TPA_MODE_DISABLED)
1577*4882a593Smuzhiyun 			bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1578*4882a593Smuzhiyun 	}
1579*4882a593Smuzhiyun }
1580*4882a593Smuzhiyun 
bnx2x_free_skbs_cnic(struct bnx2x * bp)1581*4882a593Smuzhiyun static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun 	bnx2x_free_tx_skbs_cnic(bp);
1584*4882a593Smuzhiyun 	bnx2x_free_rx_skbs_cnic(bp);
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun 
bnx2x_free_skbs(struct bnx2x * bp)1587*4882a593Smuzhiyun void bnx2x_free_skbs(struct bnx2x *bp)
1588*4882a593Smuzhiyun {
1589*4882a593Smuzhiyun 	bnx2x_free_tx_skbs(bp);
1590*4882a593Smuzhiyun 	bnx2x_free_rx_skbs(bp);
1591*4882a593Smuzhiyun }
1592*4882a593Smuzhiyun 
bnx2x_update_max_mf_config(struct bnx2x * bp,u32 value)1593*4882a593Smuzhiyun void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1594*4882a593Smuzhiyun {
1595*4882a593Smuzhiyun 	/* load old values */
1596*4882a593Smuzhiyun 	u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 	if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1599*4882a593Smuzhiyun 		/* leave all but MAX value */
1600*4882a593Smuzhiyun 		mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun 		/* set new MAX value */
1603*4882a593Smuzhiyun 		mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1604*4882a593Smuzhiyun 				& FUNC_MF_CFG_MAX_BW_MASK;
1605*4882a593Smuzhiyun 
1606*4882a593Smuzhiyun 		bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1607*4882a593Smuzhiyun 	}
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun /**
1611*4882a593Smuzhiyun  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1612*4882a593Smuzhiyun  *
1613*4882a593Smuzhiyun  * @bp:		driver handle
1614*4882a593Smuzhiyun  * @nvecs:	number of vectors to be released
1615*4882a593Smuzhiyun  */
bnx2x_free_msix_irqs(struct bnx2x * bp,int nvecs)1616*4882a593Smuzhiyun static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1617*4882a593Smuzhiyun {
1618*4882a593Smuzhiyun 	int i, offset = 0;
1619*4882a593Smuzhiyun 
1620*4882a593Smuzhiyun 	if (nvecs == offset)
1621*4882a593Smuzhiyun 		return;
1622*4882a593Smuzhiyun 
1623*4882a593Smuzhiyun 	/* VFs don't have a default SB */
1624*4882a593Smuzhiyun 	if (IS_PF(bp)) {
1625*4882a593Smuzhiyun 		free_irq(bp->msix_table[offset].vector, bp->dev);
1626*4882a593Smuzhiyun 		DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1627*4882a593Smuzhiyun 		   bp->msix_table[offset].vector);
1628*4882a593Smuzhiyun 		offset++;
1629*4882a593Smuzhiyun 	}
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	if (CNIC_SUPPORT(bp)) {
1632*4882a593Smuzhiyun 		if (nvecs == offset)
1633*4882a593Smuzhiyun 			return;
1634*4882a593Smuzhiyun 		offset++;
1635*4882a593Smuzhiyun 	}
1636*4882a593Smuzhiyun 
1637*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
1638*4882a593Smuzhiyun 		if (nvecs == offset)
1639*4882a593Smuzhiyun 			return;
1640*4882a593Smuzhiyun 		DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1641*4882a593Smuzhiyun 		   i, bp->msix_table[offset].vector);
1642*4882a593Smuzhiyun 
1643*4882a593Smuzhiyun 		free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1644*4882a593Smuzhiyun 	}
1645*4882a593Smuzhiyun }
1646*4882a593Smuzhiyun 
bnx2x_free_irq(struct bnx2x * bp)1647*4882a593Smuzhiyun void bnx2x_free_irq(struct bnx2x *bp)
1648*4882a593Smuzhiyun {
1649*4882a593Smuzhiyun 	if (bp->flags & USING_MSIX_FLAG &&
1650*4882a593Smuzhiyun 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1651*4882a593Smuzhiyun 		int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 		/* vfs don't have a default status block */
1654*4882a593Smuzhiyun 		if (IS_PF(bp))
1655*4882a593Smuzhiyun 			nvecs++;
1656*4882a593Smuzhiyun 
1657*4882a593Smuzhiyun 		bnx2x_free_msix_irqs(bp, nvecs);
1658*4882a593Smuzhiyun 	} else {
1659*4882a593Smuzhiyun 		free_irq(bp->dev->irq, bp->dev);
1660*4882a593Smuzhiyun 	}
1661*4882a593Smuzhiyun }
1662*4882a593Smuzhiyun 
bnx2x_enable_msix(struct bnx2x * bp)1663*4882a593Smuzhiyun int bnx2x_enable_msix(struct bnx2x *bp)
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun 	int msix_vec = 0, i, rc;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	/* VFs don't have a default status block */
1668*4882a593Smuzhiyun 	if (IS_PF(bp)) {
1669*4882a593Smuzhiyun 		bp->msix_table[msix_vec].entry = msix_vec;
1670*4882a593Smuzhiyun 		BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1671*4882a593Smuzhiyun 			       bp->msix_table[0].entry);
1672*4882a593Smuzhiyun 		msix_vec++;
1673*4882a593Smuzhiyun 	}
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	/* Cnic requires an msix vector for itself */
1676*4882a593Smuzhiyun 	if (CNIC_SUPPORT(bp)) {
1677*4882a593Smuzhiyun 		bp->msix_table[msix_vec].entry = msix_vec;
1678*4882a593Smuzhiyun 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1679*4882a593Smuzhiyun 			       msix_vec, bp->msix_table[msix_vec].entry);
1680*4882a593Smuzhiyun 		msix_vec++;
1681*4882a593Smuzhiyun 	}
1682*4882a593Smuzhiyun 
1683*4882a593Smuzhiyun 	/* We need separate vectors for ETH queues only (not FCoE) */
1684*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
1685*4882a593Smuzhiyun 		bp->msix_table[msix_vec].entry = msix_vec;
1686*4882a593Smuzhiyun 		BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1687*4882a593Smuzhiyun 			       msix_vec, msix_vec, i);
1688*4882a593Smuzhiyun 		msix_vec++;
1689*4882a593Smuzhiyun 	}
1690*4882a593Smuzhiyun 
1691*4882a593Smuzhiyun 	DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1692*4882a593Smuzhiyun 	   msix_vec);
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun 	rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1695*4882a593Smuzhiyun 				   BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1696*4882a593Smuzhiyun 	/*
1697*4882a593Smuzhiyun 	 * reconfigure number of tx/rx queues according to available
1698*4882a593Smuzhiyun 	 * MSI-X vectors
1699*4882a593Smuzhiyun 	 */
1700*4882a593Smuzhiyun 	if (rc == -ENOSPC) {
1701*4882a593Smuzhiyun 		/* Get by with single vector */
1702*4882a593Smuzhiyun 		rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1703*4882a593Smuzhiyun 		if (rc < 0) {
1704*4882a593Smuzhiyun 			BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1705*4882a593Smuzhiyun 				       rc);
1706*4882a593Smuzhiyun 			goto no_msix;
1707*4882a593Smuzhiyun 		}
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun 		BNX2X_DEV_INFO("Using single MSI-X vector\n");
1710*4882a593Smuzhiyun 		bp->flags |= USING_SINGLE_MSIX_FLAG;
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun 		BNX2X_DEV_INFO("set number of queues to 1\n");
1713*4882a593Smuzhiyun 		bp->num_ethernet_queues = 1;
1714*4882a593Smuzhiyun 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1715*4882a593Smuzhiyun 	} else if (rc < 0) {
1716*4882a593Smuzhiyun 		BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1717*4882a593Smuzhiyun 		goto no_msix;
1718*4882a593Smuzhiyun 	} else if (rc < msix_vec) {
1719*4882a593Smuzhiyun 		/* how less vectors we will have? */
1720*4882a593Smuzhiyun 		int diff = msix_vec - rc;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 		BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 		/*
1725*4882a593Smuzhiyun 		 * decrease number of queues by number of unallocated entries
1726*4882a593Smuzhiyun 		 */
1727*4882a593Smuzhiyun 		bp->num_ethernet_queues -= diff;
1728*4882a593Smuzhiyun 		bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1729*4882a593Smuzhiyun 
1730*4882a593Smuzhiyun 		BNX2X_DEV_INFO("New queue configuration set: %d\n",
1731*4882a593Smuzhiyun 			       bp->num_queues);
1732*4882a593Smuzhiyun 	}
1733*4882a593Smuzhiyun 
1734*4882a593Smuzhiyun 	bp->flags |= USING_MSIX_FLAG;
1735*4882a593Smuzhiyun 
1736*4882a593Smuzhiyun 	return 0;
1737*4882a593Smuzhiyun 
1738*4882a593Smuzhiyun no_msix:
1739*4882a593Smuzhiyun 	/* fall to INTx if not enough memory */
1740*4882a593Smuzhiyun 	if (rc == -ENOMEM)
1741*4882a593Smuzhiyun 		bp->flags |= DISABLE_MSI_FLAG;
1742*4882a593Smuzhiyun 
1743*4882a593Smuzhiyun 	return rc;
1744*4882a593Smuzhiyun }
1745*4882a593Smuzhiyun 
bnx2x_req_msix_irqs(struct bnx2x * bp)1746*4882a593Smuzhiyun static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1747*4882a593Smuzhiyun {
1748*4882a593Smuzhiyun 	int i, rc, offset = 0;
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	/* no default status block for vf */
1751*4882a593Smuzhiyun 	if (IS_PF(bp)) {
1752*4882a593Smuzhiyun 		rc = request_irq(bp->msix_table[offset++].vector,
1753*4882a593Smuzhiyun 				 bnx2x_msix_sp_int, 0,
1754*4882a593Smuzhiyun 				 bp->dev->name, bp->dev);
1755*4882a593Smuzhiyun 		if (rc) {
1756*4882a593Smuzhiyun 			BNX2X_ERR("request sp irq failed\n");
1757*4882a593Smuzhiyun 			return -EBUSY;
1758*4882a593Smuzhiyun 		}
1759*4882a593Smuzhiyun 	}
1760*4882a593Smuzhiyun 
1761*4882a593Smuzhiyun 	if (CNIC_SUPPORT(bp))
1762*4882a593Smuzhiyun 		offset++;
1763*4882a593Smuzhiyun 
1764*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
1765*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[i];
1766*4882a593Smuzhiyun 		snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1767*4882a593Smuzhiyun 			 bp->dev->name, i);
1768*4882a593Smuzhiyun 
1769*4882a593Smuzhiyun 		rc = request_irq(bp->msix_table[offset].vector,
1770*4882a593Smuzhiyun 				 bnx2x_msix_fp_int, 0, fp->name, fp);
1771*4882a593Smuzhiyun 		if (rc) {
1772*4882a593Smuzhiyun 			BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1773*4882a593Smuzhiyun 			      bp->msix_table[offset].vector, rc);
1774*4882a593Smuzhiyun 			bnx2x_free_msix_irqs(bp, offset);
1775*4882a593Smuzhiyun 			return -EBUSY;
1776*4882a593Smuzhiyun 		}
1777*4882a593Smuzhiyun 
1778*4882a593Smuzhiyun 		offset++;
1779*4882a593Smuzhiyun 	}
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	i = BNX2X_NUM_ETH_QUEUES(bp);
1782*4882a593Smuzhiyun 	if (IS_PF(bp)) {
1783*4882a593Smuzhiyun 		offset = 1 + CNIC_SUPPORT(bp);
1784*4882a593Smuzhiyun 		netdev_info(bp->dev,
1785*4882a593Smuzhiyun 			    "using MSI-X  IRQs: sp %d  fp[%d] %d ... fp[%d] %d\n",
1786*4882a593Smuzhiyun 			    bp->msix_table[0].vector,
1787*4882a593Smuzhiyun 			    0, bp->msix_table[offset].vector,
1788*4882a593Smuzhiyun 			    i - 1, bp->msix_table[offset + i - 1].vector);
1789*4882a593Smuzhiyun 	} else {
1790*4882a593Smuzhiyun 		offset = CNIC_SUPPORT(bp);
1791*4882a593Smuzhiyun 		netdev_info(bp->dev,
1792*4882a593Smuzhiyun 			    "using MSI-X  IRQs: fp[%d] %d ... fp[%d] %d\n",
1793*4882a593Smuzhiyun 			    0, bp->msix_table[offset].vector,
1794*4882a593Smuzhiyun 			    i - 1, bp->msix_table[offset + i - 1].vector);
1795*4882a593Smuzhiyun 	}
1796*4882a593Smuzhiyun 	return 0;
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun 
bnx2x_enable_msi(struct bnx2x * bp)1799*4882a593Smuzhiyun int bnx2x_enable_msi(struct bnx2x *bp)
1800*4882a593Smuzhiyun {
1801*4882a593Smuzhiyun 	int rc;
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	rc = pci_enable_msi(bp->pdev);
1804*4882a593Smuzhiyun 	if (rc) {
1805*4882a593Smuzhiyun 		BNX2X_DEV_INFO("MSI is not attainable\n");
1806*4882a593Smuzhiyun 		return -1;
1807*4882a593Smuzhiyun 	}
1808*4882a593Smuzhiyun 	bp->flags |= USING_MSI_FLAG;
1809*4882a593Smuzhiyun 
1810*4882a593Smuzhiyun 	return 0;
1811*4882a593Smuzhiyun }
1812*4882a593Smuzhiyun 
bnx2x_req_irq(struct bnx2x * bp)1813*4882a593Smuzhiyun static int bnx2x_req_irq(struct bnx2x *bp)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun 	unsigned long flags;
1816*4882a593Smuzhiyun 	unsigned int irq;
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1819*4882a593Smuzhiyun 		flags = 0;
1820*4882a593Smuzhiyun 	else
1821*4882a593Smuzhiyun 		flags = IRQF_SHARED;
1822*4882a593Smuzhiyun 
1823*4882a593Smuzhiyun 	if (bp->flags & USING_MSIX_FLAG)
1824*4882a593Smuzhiyun 		irq = bp->msix_table[0].vector;
1825*4882a593Smuzhiyun 	else
1826*4882a593Smuzhiyun 		irq = bp->pdev->irq;
1827*4882a593Smuzhiyun 
1828*4882a593Smuzhiyun 	return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun 
bnx2x_setup_irqs(struct bnx2x * bp)1831*4882a593Smuzhiyun static int bnx2x_setup_irqs(struct bnx2x *bp)
1832*4882a593Smuzhiyun {
1833*4882a593Smuzhiyun 	int rc = 0;
1834*4882a593Smuzhiyun 	if (bp->flags & USING_MSIX_FLAG &&
1835*4882a593Smuzhiyun 	    !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1836*4882a593Smuzhiyun 		rc = bnx2x_req_msix_irqs(bp);
1837*4882a593Smuzhiyun 		if (rc)
1838*4882a593Smuzhiyun 			return rc;
1839*4882a593Smuzhiyun 	} else {
1840*4882a593Smuzhiyun 		rc = bnx2x_req_irq(bp);
1841*4882a593Smuzhiyun 		if (rc) {
1842*4882a593Smuzhiyun 			BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
1843*4882a593Smuzhiyun 			return rc;
1844*4882a593Smuzhiyun 		}
1845*4882a593Smuzhiyun 		if (bp->flags & USING_MSI_FLAG) {
1846*4882a593Smuzhiyun 			bp->dev->irq = bp->pdev->irq;
1847*4882a593Smuzhiyun 			netdev_info(bp->dev, "using MSI IRQ %d\n",
1848*4882a593Smuzhiyun 				    bp->dev->irq);
1849*4882a593Smuzhiyun 		}
1850*4882a593Smuzhiyun 		if (bp->flags & USING_MSIX_FLAG) {
1851*4882a593Smuzhiyun 			bp->dev->irq = bp->msix_table[0].vector;
1852*4882a593Smuzhiyun 			netdev_info(bp->dev, "using MSIX IRQ %d\n",
1853*4882a593Smuzhiyun 				    bp->dev->irq);
1854*4882a593Smuzhiyun 		}
1855*4882a593Smuzhiyun 	}
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 	return 0;
1858*4882a593Smuzhiyun }
1859*4882a593Smuzhiyun 
bnx2x_napi_enable_cnic(struct bnx2x * bp)1860*4882a593Smuzhiyun static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1861*4882a593Smuzhiyun {
1862*4882a593Smuzhiyun 	int i;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	for_each_rx_queue_cnic(bp, i) {
1865*4882a593Smuzhiyun 		napi_enable(&bnx2x_fp(bp, i, napi));
1866*4882a593Smuzhiyun 	}
1867*4882a593Smuzhiyun }
1868*4882a593Smuzhiyun 
bnx2x_napi_enable(struct bnx2x * bp)1869*4882a593Smuzhiyun static void bnx2x_napi_enable(struct bnx2x *bp)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	int i;
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
1874*4882a593Smuzhiyun 		napi_enable(&bnx2x_fp(bp, i, napi));
1875*4882a593Smuzhiyun 	}
1876*4882a593Smuzhiyun }
1877*4882a593Smuzhiyun 
bnx2x_napi_disable_cnic(struct bnx2x * bp)1878*4882a593Smuzhiyun static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1879*4882a593Smuzhiyun {
1880*4882a593Smuzhiyun 	int i;
1881*4882a593Smuzhiyun 
1882*4882a593Smuzhiyun 	for_each_rx_queue_cnic(bp, i) {
1883*4882a593Smuzhiyun 		napi_disable(&bnx2x_fp(bp, i, napi));
1884*4882a593Smuzhiyun 	}
1885*4882a593Smuzhiyun }
1886*4882a593Smuzhiyun 
bnx2x_napi_disable(struct bnx2x * bp)1887*4882a593Smuzhiyun static void bnx2x_napi_disable(struct bnx2x *bp)
1888*4882a593Smuzhiyun {
1889*4882a593Smuzhiyun 	int i;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	for_each_eth_queue(bp, i) {
1892*4882a593Smuzhiyun 		napi_disable(&bnx2x_fp(bp, i, napi));
1893*4882a593Smuzhiyun 	}
1894*4882a593Smuzhiyun }
1895*4882a593Smuzhiyun 
bnx2x_netif_start(struct bnx2x * bp)1896*4882a593Smuzhiyun void bnx2x_netif_start(struct bnx2x *bp)
1897*4882a593Smuzhiyun {
1898*4882a593Smuzhiyun 	if (netif_running(bp->dev)) {
1899*4882a593Smuzhiyun 		bnx2x_napi_enable(bp);
1900*4882a593Smuzhiyun 		if (CNIC_LOADED(bp))
1901*4882a593Smuzhiyun 			bnx2x_napi_enable_cnic(bp);
1902*4882a593Smuzhiyun 		bnx2x_int_enable(bp);
1903*4882a593Smuzhiyun 		if (bp->state == BNX2X_STATE_OPEN)
1904*4882a593Smuzhiyun 			netif_tx_wake_all_queues(bp->dev);
1905*4882a593Smuzhiyun 	}
1906*4882a593Smuzhiyun }
1907*4882a593Smuzhiyun 
bnx2x_netif_stop(struct bnx2x * bp,int disable_hw)1908*4882a593Smuzhiyun void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1909*4882a593Smuzhiyun {
1910*4882a593Smuzhiyun 	bnx2x_int_disable_sync(bp, disable_hw);
1911*4882a593Smuzhiyun 	bnx2x_napi_disable(bp);
1912*4882a593Smuzhiyun 	if (CNIC_LOADED(bp))
1913*4882a593Smuzhiyun 		bnx2x_napi_disable_cnic(bp);
1914*4882a593Smuzhiyun }
1915*4882a593Smuzhiyun 
bnx2x_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)1916*4882a593Smuzhiyun u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1917*4882a593Smuzhiyun 		       struct net_device *sb_dev)
1918*4882a593Smuzhiyun {
1919*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
1920*4882a593Smuzhiyun 
1921*4882a593Smuzhiyun 	if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1922*4882a593Smuzhiyun 		struct ethhdr *hdr = (struct ethhdr *)skb->data;
1923*4882a593Smuzhiyun 		u16 ether_type = ntohs(hdr->h_proto);
1924*4882a593Smuzhiyun 
1925*4882a593Smuzhiyun 		/* Skip VLAN tag if present */
1926*4882a593Smuzhiyun 		if (ether_type == ETH_P_8021Q) {
1927*4882a593Smuzhiyun 			struct vlan_ethhdr *vhdr =
1928*4882a593Smuzhiyun 				(struct vlan_ethhdr *)skb->data;
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 			ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1931*4882a593Smuzhiyun 		}
1932*4882a593Smuzhiyun 
1933*4882a593Smuzhiyun 		/* If ethertype is FCoE or FIP - use FCoE ring */
1934*4882a593Smuzhiyun 		if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1935*4882a593Smuzhiyun 			return bnx2x_fcoe_tx(bp, txq_index);
1936*4882a593Smuzhiyun 	}
1937*4882a593Smuzhiyun 
1938*4882a593Smuzhiyun 	/* select a non-FCoE queue */
1939*4882a593Smuzhiyun 	return netdev_pick_tx(dev, skb, NULL) %
1940*4882a593Smuzhiyun 			(BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos);
1941*4882a593Smuzhiyun }
1942*4882a593Smuzhiyun 
bnx2x_set_num_queues(struct bnx2x * bp)1943*4882a593Smuzhiyun void bnx2x_set_num_queues(struct bnx2x *bp)
1944*4882a593Smuzhiyun {
1945*4882a593Smuzhiyun 	/* RSS queues */
1946*4882a593Smuzhiyun 	bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	/* override in STORAGE SD modes */
1949*4882a593Smuzhiyun 	if (IS_MF_STORAGE_ONLY(bp))
1950*4882a593Smuzhiyun 		bp->num_ethernet_queues = 1;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	/* Add special queues */
1953*4882a593Smuzhiyun 	bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1954*4882a593Smuzhiyun 	bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun 
1959*4882a593Smuzhiyun /**
1960*4882a593Smuzhiyun  * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1961*4882a593Smuzhiyun  *
1962*4882a593Smuzhiyun  * @bp:		Driver handle
1963*4882a593Smuzhiyun  * @include_cnic: handle cnic case
1964*4882a593Smuzhiyun  *
1965*4882a593Smuzhiyun  * We currently support for at most 16 Tx queues for each CoS thus we will
1966*4882a593Smuzhiyun  * allocate a multiple of 16 for ETH L2 rings according to the value of the
1967*4882a593Smuzhiyun  * bp->max_cos.
1968*4882a593Smuzhiyun  *
1969*4882a593Smuzhiyun  * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1970*4882a593Smuzhiyun  * index after all ETH L2 indices.
1971*4882a593Smuzhiyun  *
1972*4882a593Smuzhiyun  * If the actual number of Tx queues (for each CoS) is less than 16 then there
1973*4882a593Smuzhiyun  * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1974*4882a593Smuzhiyun  * 16..31,...) with indices that are not coupled with any real Tx queue.
1975*4882a593Smuzhiyun  *
1976*4882a593Smuzhiyun  * The proper configuration of skb->queue_mapping is handled by
1977*4882a593Smuzhiyun  * bnx2x_select_queue() and __skb_tx_hash().
1978*4882a593Smuzhiyun  *
1979*4882a593Smuzhiyun  * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1980*4882a593Smuzhiyun  * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1981*4882a593Smuzhiyun  */
bnx2x_set_real_num_queues(struct bnx2x * bp,int include_cnic)1982*4882a593Smuzhiyun static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1983*4882a593Smuzhiyun {
1984*4882a593Smuzhiyun 	int rc, tx, rx;
1985*4882a593Smuzhiyun 
1986*4882a593Smuzhiyun 	tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1987*4882a593Smuzhiyun 	rx = BNX2X_NUM_ETH_QUEUES(bp);
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun /* account for fcoe queue */
1990*4882a593Smuzhiyun 	if (include_cnic && !NO_FCOE(bp)) {
1991*4882a593Smuzhiyun 		rx++;
1992*4882a593Smuzhiyun 		tx++;
1993*4882a593Smuzhiyun 	}
1994*4882a593Smuzhiyun 
1995*4882a593Smuzhiyun 	rc = netif_set_real_num_tx_queues(bp->dev, tx);
1996*4882a593Smuzhiyun 	if (rc) {
1997*4882a593Smuzhiyun 		BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1998*4882a593Smuzhiyun 		return rc;
1999*4882a593Smuzhiyun 	}
2000*4882a593Smuzhiyun 	rc = netif_set_real_num_rx_queues(bp->dev, rx);
2001*4882a593Smuzhiyun 	if (rc) {
2002*4882a593Smuzhiyun 		BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
2003*4882a593Smuzhiyun 		return rc;
2004*4882a593Smuzhiyun 	}
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
2007*4882a593Smuzhiyun 			  tx, rx);
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	return rc;
2010*4882a593Smuzhiyun }
2011*4882a593Smuzhiyun 
bnx2x_set_rx_buf_size(struct bnx2x * bp)2012*4882a593Smuzhiyun static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2013*4882a593Smuzhiyun {
2014*4882a593Smuzhiyun 	int i;
2015*4882a593Smuzhiyun 
2016*4882a593Smuzhiyun 	for_each_queue(bp, i) {
2017*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[i];
2018*4882a593Smuzhiyun 		u32 mtu;
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun 		/* Always use a mini-jumbo MTU for the FCoE L2 ring */
2021*4882a593Smuzhiyun 		if (IS_FCOE_IDX(i))
2022*4882a593Smuzhiyun 			/*
2023*4882a593Smuzhiyun 			 * Although there are no IP frames expected to arrive to
2024*4882a593Smuzhiyun 			 * this ring we still want to add an
2025*4882a593Smuzhiyun 			 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2026*4882a593Smuzhiyun 			 * overrun attack.
2027*4882a593Smuzhiyun 			 */
2028*4882a593Smuzhiyun 			mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2029*4882a593Smuzhiyun 		else
2030*4882a593Smuzhiyun 			mtu = bp->dev->mtu;
2031*4882a593Smuzhiyun 		fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2032*4882a593Smuzhiyun 				  IP_HEADER_ALIGNMENT_PADDING +
2033*4882a593Smuzhiyun 				  ETH_OVERHEAD +
2034*4882a593Smuzhiyun 				  mtu +
2035*4882a593Smuzhiyun 				  BNX2X_FW_RX_ALIGN_END;
2036*4882a593Smuzhiyun 		fp->rx_buf_size = SKB_DATA_ALIGN(fp->rx_buf_size);
2037*4882a593Smuzhiyun 		/* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2038*4882a593Smuzhiyun 		if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2039*4882a593Smuzhiyun 			fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2040*4882a593Smuzhiyun 		else
2041*4882a593Smuzhiyun 			fp->rx_frag_size = 0;
2042*4882a593Smuzhiyun 	}
2043*4882a593Smuzhiyun }
2044*4882a593Smuzhiyun 
bnx2x_init_rss(struct bnx2x * bp)2045*4882a593Smuzhiyun static int bnx2x_init_rss(struct bnx2x *bp)
2046*4882a593Smuzhiyun {
2047*4882a593Smuzhiyun 	int i;
2048*4882a593Smuzhiyun 	u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	/* Prepare the initial contents for the indirection table if RSS is
2051*4882a593Smuzhiyun 	 * enabled
2052*4882a593Smuzhiyun 	 */
2053*4882a593Smuzhiyun 	for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2054*4882a593Smuzhiyun 		bp->rss_conf_obj.ind_table[i] =
2055*4882a593Smuzhiyun 			bp->fp->cl_id +
2056*4882a593Smuzhiyun 			ethtool_rxfh_indir_default(i, num_eth_queues);
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 	/*
2059*4882a593Smuzhiyun 	 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2060*4882a593Smuzhiyun 	 * per-port, so if explicit configuration is needed , do it only
2061*4882a593Smuzhiyun 	 * for a PMF.
2062*4882a593Smuzhiyun 	 *
2063*4882a593Smuzhiyun 	 * For 57712 and newer on the other hand it's a per-function
2064*4882a593Smuzhiyun 	 * configuration.
2065*4882a593Smuzhiyun 	 */
2066*4882a593Smuzhiyun 	return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun 
bnx2x_rss(struct bnx2x * bp,struct bnx2x_rss_config_obj * rss_obj,bool config_hash,bool enable)2069*4882a593Smuzhiyun int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2070*4882a593Smuzhiyun 	      bool config_hash, bool enable)
2071*4882a593Smuzhiyun {
2072*4882a593Smuzhiyun 	struct bnx2x_config_rss_params params = {NULL};
2073*4882a593Smuzhiyun 
2074*4882a593Smuzhiyun 	/* Although RSS is meaningless when there is a single HW queue we
2075*4882a593Smuzhiyun 	 * still need it enabled in order to have HW Rx hash generated.
2076*4882a593Smuzhiyun 	 *
2077*4882a593Smuzhiyun 	 * if (!is_eth_multi(bp))
2078*4882a593Smuzhiyun 	 *      bp->multi_mode = ETH_RSS_MODE_DISABLED;
2079*4882a593Smuzhiyun 	 */
2080*4882a593Smuzhiyun 
2081*4882a593Smuzhiyun 	params.rss_obj = rss_obj;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	__set_bit(RAMROD_COMP_WAIT, &params.ramrod_flags);
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun 	if (enable) {
2086*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_MODE_REGULAR, &params.rss_flags);
2087*4882a593Smuzhiyun 
2088*4882a593Smuzhiyun 		/* RSS configuration */
2089*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_IPV4, &params.rss_flags);
2090*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_IPV4_TCP, &params.rss_flags);
2091*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_IPV6, &params.rss_flags);
2092*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_IPV6_TCP, &params.rss_flags);
2093*4882a593Smuzhiyun 		if (rss_obj->udp_rss_v4)
2094*4882a593Smuzhiyun 			__set_bit(BNX2X_RSS_IPV4_UDP, &params.rss_flags);
2095*4882a593Smuzhiyun 		if (rss_obj->udp_rss_v6)
2096*4882a593Smuzhiyun 			__set_bit(BNX2X_RSS_IPV6_UDP, &params.rss_flags);
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 		if (!CHIP_IS_E1x(bp)) {
2099*4882a593Smuzhiyun 			/* valid only for TUNN_MODE_VXLAN tunnel mode */
2100*4882a593Smuzhiyun 			__set_bit(BNX2X_RSS_IPV4_VXLAN, &params.rss_flags);
2101*4882a593Smuzhiyun 			__set_bit(BNX2X_RSS_IPV6_VXLAN, &params.rss_flags);
2102*4882a593Smuzhiyun 
2103*4882a593Smuzhiyun 			/* valid only for TUNN_MODE_GRE tunnel mode */
2104*4882a593Smuzhiyun 			__set_bit(BNX2X_RSS_TUNN_INNER_HDRS, &params.rss_flags);
2105*4882a593Smuzhiyun 		}
2106*4882a593Smuzhiyun 	} else {
2107*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_MODE_DISABLED, &params.rss_flags);
2108*4882a593Smuzhiyun 	}
2109*4882a593Smuzhiyun 
2110*4882a593Smuzhiyun 	/* Hash bits */
2111*4882a593Smuzhiyun 	params.rss_result_mask = MULTI_MASK;
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	if (config_hash) {
2116*4882a593Smuzhiyun 		/* RSS keys */
2117*4882a593Smuzhiyun 		netdev_rss_key_fill(params.rss_key, T_ETH_RSS_KEY * 4);
2118*4882a593Smuzhiyun 		__set_bit(BNX2X_RSS_SET_SRCH, &params.rss_flags);
2119*4882a593Smuzhiyun 	}
2120*4882a593Smuzhiyun 
2121*4882a593Smuzhiyun 	if (IS_PF(bp))
2122*4882a593Smuzhiyun 		return bnx2x_config_rss(bp, &params);
2123*4882a593Smuzhiyun 	else
2124*4882a593Smuzhiyun 		return bnx2x_vfpf_config_rss(bp, &params);
2125*4882a593Smuzhiyun }
2126*4882a593Smuzhiyun 
bnx2x_init_hw(struct bnx2x * bp,u32 load_code)2127*4882a593Smuzhiyun static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2128*4882a593Smuzhiyun {
2129*4882a593Smuzhiyun 	struct bnx2x_func_state_params func_params = {NULL};
2130*4882a593Smuzhiyun 
2131*4882a593Smuzhiyun 	/* Prepare parameters for function state transitions */
2132*4882a593Smuzhiyun 	__set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2133*4882a593Smuzhiyun 
2134*4882a593Smuzhiyun 	func_params.f_obj = &bp->func_obj;
2135*4882a593Smuzhiyun 	func_params.cmd = BNX2X_F_CMD_HW_INIT;
2136*4882a593Smuzhiyun 
2137*4882a593Smuzhiyun 	func_params.params.hw_init.load_phase = load_code;
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	return bnx2x_func_state_change(bp, &func_params);
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun /*
2143*4882a593Smuzhiyun  * Cleans the object that have internal lists without sending
2144*4882a593Smuzhiyun  * ramrods. Should be run when interrupts are disabled.
2145*4882a593Smuzhiyun  */
bnx2x_squeeze_objects(struct bnx2x * bp)2146*4882a593Smuzhiyun void bnx2x_squeeze_objects(struct bnx2x *bp)
2147*4882a593Smuzhiyun {
2148*4882a593Smuzhiyun 	int rc;
2149*4882a593Smuzhiyun 	unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2150*4882a593Smuzhiyun 	struct bnx2x_mcast_ramrod_params rparam = {NULL};
2151*4882a593Smuzhiyun 	struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2152*4882a593Smuzhiyun 
2153*4882a593Smuzhiyun 	/***************** Cleanup MACs' object first *************************/
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	/* Wait for completion of requested */
2156*4882a593Smuzhiyun 	__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2157*4882a593Smuzhiyun 	/* Perform a dry cleanup */
2158*4882a593Smuzhiyun 	__set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2159*4882a593Smuzhiyun 
2160*4882a593Smuzhiyun 	/* Clean ETH primary MAC */
2161*4882a593Smuzhiyun 	__set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2162*4882a593Smuzhiyun 	rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2163*4882a593Smuzhiyun 				 &ramrod_flags);
2164*4882a593Smuzhiyun 	if (rc != 0)
2165*4882a593Smuzhiyun 		BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	/* Cleanup UC list */
2168*4882a593Smuzhiyun 	vlan_mac_flags = 0;
2169*4882a593Smuzhiyun 	__set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2170*4882a593Smuzhiyun 	rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2171*4882a593Smuzhiyun 				 &ramrod_flags);
2172*4882a593Smuzhiyun 	if (rc != 0)
2173*4882a593Smuzhiyun 		BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 	/***************** Now clean mcast object *****************************/
2176*4882a593Smuzhiyun 	rparam.mcast_obj = &bp->mcast_obj;
2177*4882a593Smuzhiyun 	__set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2178*4882a593Smuzhiyun 
2179*4882a593Smuzhiyun 	/* Add a DEL command... - Since we're doing a driver cleanup only,
2180*4882a593Smuzhiyun 	 * we take a lock surrounding both the initial send and the CONTs,
2181*4882a593Smuzhiyun 	 * as we don't want a true completion to disrupt us in the middle.
2182*4882a593Smuzhiyun 	 */
2183*4882a593Smuzhiyun 	netif_addr_lock_bh(bp->dev);
2184*4882a593Smuzhiyun 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2185*4882a593Smuzhiyun 	if (rc < 0)
2186*4882a593Smuzhiyun 		BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2187*4882a593Smuzhiyun 			  rc);
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	/* ...and wait until all pending commands are cleared */
2190*4882a593Smuzhiyun 	rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2191*4882a593Smuzhiyun 	while (rc != 0) {
2192*4882a593Smuzhiyun 		if (rc < 0) {
2193*4882a593Smuzhiyun 			BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2194*4882a593Smuzhiyun 				  rc);
2195*4882a593Smuzhiyun 			netif_addr_unlock_bh(bp->dev);
2196*4882a593Smuzhiyun 			return;
2197*4882a593Smuzhiyun 		}
2198*4882a593Smuzhiyun 
2199*4882a593Smuzhiyun 		rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2200*4882a593Smuzhiyun 	}
2201*4882a593Smuzhiyun 	netif_addr_unlock_bh(bp->dev);
2202*4882a593Smuzhiyun }
2203*4882a593Smuzhiyun 
2204*4882a593Smuzhiyun #ifndef BNX2X_STOP_ON_ERROR
2205*4882a593Smuzhiyun #define LOAD_ERROR_EXIT(bp, label) \
2206*4882a593Smuzhiyun 	do { \
2207*4882a593Smuzhiyun 		(bp)->state = BNX2X_STATE_ERROR; \
2208*4882a593Smuzhiyun 		goto label; \
2209*4882a593Smuzhiyun 	} while (0)
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2212*4882a593Smuzhiyun 	do { \
2213*4882a593Smuzhiyun 		bp->cnic_loaded = false; \
2214*4882a593Smuzhiyun 		goto label; \
2215*4882a593Smuzhiyun 	} while (0)
2216*4882a593Smuzhiyun #else /*BNX2X_STOP_ON_ERROR*/
2217*4882a593Smuzhiyun #define LOAD_ERROR_EXIT(bp, label) \
2218*4882a593Smuzhiyun 	do { \
2219*4882a593Smuzhiyun 		(bp)->state = BNX2X_STATE_ERROR; \
2220*4882a593Smuzhiyun 		(bp)->panic = 1; \
2221*4882a593Smuzhiyun 		return -EBUSY; \
2222*4882a593Smuzhiyun 	} while (0)
2223*4882a593Smuzhiyun #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2224*4882a593Smuzhiyun 	do { \
2225*4882a593Smuzhiyun 		bp->cnic_loaded = false; \
2226*4882a593Smuzhiyun 		(bp)->panic = 1; \
2227*4882a593Smuzhiyun 		return -EBUSY; \
2228*4882a593Smuzhiyun 	} while (0)
2229*4882a593Smuzhiyun #endif /*BNX2X_STOP_ON_ERROR*/
2230*4882a593Smuzhiyun 
bnx2x_free_fw_stats_mem(struct bnx2x * bp)2231*4882a593Smuzhiyun static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2232*4882a593Smuzhiyun {
2233*4882a593Smuzhiyun 	BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2234*4882a593Smuzhiyun 		       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2235*4882a593Smuzhiyun 	return;
2236*4882a593Smuzhiyun }
2237*4882a593Smuzhiyun 
bnx2x_alloc_fw_stats_mem(struct bnx2x * bp)2238*4882a593Smuzhiyun static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2239*4882a593Smuzhiyun {
2240*4882a593Smuzhiyun 	int num_groups, vf_headroom = 0;
2241*4882a593Smuzhiyun 	int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	/* number of queues for statistics is number of eth queues + FCoE */
2244*4882a593Smuzhiyun 	u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 	/* Total number of FW statistics requests =
2247*4882a593Smuzhiyun 	 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2248*4882a593Smuzhiyun 	 * and fcoe l2 queue) stats + num of queues (which includes another 1
2249*4882a593Smuzhiyun 	 * for fcoe l2 queue if applicable)
2250*4882a593Smuzhiyun 	 */
2251*4882a593Smuzhiyun 	bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	/* vf stats appear in the request list, but their data is allocated by
2254*4882a593Smuzhiyun 	 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2255*4882a593Smuzhiyun 	 * it is used to determine where to place the vf stats queries in the
2256*4882a593Smuzhiyun 	 * request struct
2257*4882a593Smuzhiyun 	 */
2258*4882a593Smuzhiyun 	if (IS_SRIOV(bp))
2259*4882a593Smuzhiyun 		vf_headroom = bnx2x_vf_headroom(bp);
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	/* Request is built from stats_query_header and an array of
2262*4882a593Smuzhiyun 	 * stats_query_cmd_group each of which contains
2263*4882a593Smuzhiyun 	 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2264*4882a593Smuzhiyun 	 * configured in the stats_query_header.
2265*4882a593Smuzhiyun 	 */
2266*4882a593Smuzhiyun 	num_groups =
2267*4882a593Smuzhiyun 		(((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2268*4882a593Smuzhiyun 		 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2269*4882a593Smuzhiyun 		 1 : 0));
2270*4882a593Smuzhiyun 
2271*4882a593Smuzhiyun 	DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2272*4882a593Smuzhiyun 	   bp->fw_stats_num, vf_headroom, num_groups);
2273*4882a593Smuzhiyun 	bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2274*4882a593Smuzhiyun 		num_groups * sizeof(struct stats_query_cmd_group);
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun 	/* Data for statistics requests + stats_counter
2277*4882a593Smuzhiyun 	 * stats_counter holds per-STORM counters that are incremented
2278*4882a593Smuzhiyun 	 * when STORM has finished with the current request.
2279*4882a593Smuzhiyun 	 * memory for FCoE offloaded statistics are counted anyway,
2280*4882a593Smuzhiyun 	 * even if they will not be sent.
2281*4882a593Smuzhiyun 	 * VF stats are not accounted for here as the data of VF stats is stored
2282*4882a593Smuzhiyun 	 * in memory allocated by the VF, not here.
2283*4882a593Smuzhiyun 	 */
2284*4882a593Smuzhiyun 	bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2285*4882a593Smuzhiyun 		sizeof(struct per_pf_stats) +
2286*4882a593Smuzhiyun 		sizeof(struct fcoe_statistics_params) +
2287*4882a593Smuzhiyun 		sizeof(struct per_queue_stats) * num_queue_stats +
2288*4882a593Smuzhiyun 		sizeof(struct stats_counter);
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2291*4882a593Smuzhiyun 				       bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2292*4882a593Smuzhiyun 	if (!bp->fw_stats)
2293*4882a593Smuzhiyun 		goto alloc_mem_err;
2294*4882a593Smuzhiyun 
2295*4882a593Smuzhiyun 	/* Set shortcuts */
2296*4882a593Smuzhiyun 	bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2297*4882a593Smuzhiyun 	bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2298*4882a593Smuzhiyun 	bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2299*4882a593Smuzhiyun 		((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2300*4882a593Smuzhiyun 	bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2301*4882a593Smuzhiyun 		bp->fw_stats_req_sz;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2304*4882a593Smuzhiyun 	   U64_HI(bp->fw_stats_req_mapping),
2305*4882a593Smuzhiyun 	   U64_LO(bp->fw_stats_req_mapping));
2306*4882a593Smuzhiyun 	DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2307*4882a593Smuzhiyun 	   U64_HI(bp->fw_stats_data_mapping),
2308*4882a593Smuzhiyun 	   U64_LO(bp->fw_stats_data_mapping));
2309*4882a593Smuzhiyun 	return 0;
2310*4882a593Smuzhiyun 
2311*4882a593Smuzhiyun alloc_mem_err:
2312*4882a593Smuzhiyun 	bnx2x_free_fw_stats_mem(bp);
2313*4882a593Smuzhiyun 	BNX2X_ERR("Can't allocate FW stats memory\n");
2314*4882a593Smuzhiyun 	return -ENOMEM;
2315*4882a593Smuzhiyun }
2316*4882a593Smuzhiyun 
2317*4882a593Smuzhiyun /* send load request to mcp and analyze response */
bnx2x_nic_load_request(struct bnx2x * bp,u32 * load_code)2318*4882a593Smuzhiyun static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2319*4882a593Smuzhiyun {
2320*4882a593Smuzhiyun 	u32 param;
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun 	/* init fw_seq */
2323*4882a593Smuzhiyun 	bp->fw_seq =
2324*4882a593Smuzhiyun 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2325*4882a593Smuzhiyun 		 DRV_MSG_SEQ_NUMBER_MASK);
2326*4882a593Smuzhiyun 	BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2327*4882a593Smuzhiyun 
2328*4882a593Smuzhiyun 	/* Get current FW pulse sequence */
2329*4882a593Smuzhiyun 	bp->fw_drv_pulse_wr_seq =
2330*4882a593Smuzhiyun 		(SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2331*4882a593Smuzhiyun 		 DRV_PULSE_SEQ_MASK);
2332*4882a593Smuzhiyun 	BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 	param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2335*4882a593Smuzhiyun 
2336*4882a593Smuzhiyun 	if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2337*4882a593Smuzhiyun 		param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2338*4882a593Smuzhiyun 
2339*4882a593Smuzhiyun 	/* load request */
2340*4882a593Smuzhiyun 	(*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2341*4882a593Smuzhiyun 
2342*4882a593Smuzhiyun 	/* if mcp fails to respond we must abort */
2343*4882a593Smuzhiyun 	if (!(*load_code)) {
2344*4882a593Smuzhiyun 		BNX2X_ERR("MCP response failure, aborting\n");
2345*4882a593Smuzhiyun 		return -EBUSY;
2346*4882a593Smuzhiyun 	}
2347*4882a593Smuzhiyun 
2348*4882a593Smuzhiyun 	/* If mcp refused (e.g. other port is in diagnostic mode) we
2349*4882a593Smuzhiyun 	 * must abort
2350*4882a593Smuzhiyun 	 */
2351*4882a593Smuzhiyun 	if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2352*4882a593Smuzhiyun 		BNX2X_ERR("MCP refused load request, aborting\n");
2353*4882a593Smuzhiyun 		return -EBUSY;
2354*4882a593Smuzhiyun 	}
2355*4882a593Smuzhiyun 	return 0;
2356*4882a593Smuzhiyun }
2357*4882a593Smuzhiyun 
2358*4882a593Smuzhiyun /* check whether another PF has already loaded FW to chip. In
2359*4882a593Smuzhiyun  * virtualized environments a pf from another VM may have already
2360*4882a593Smuzhiyun  * initialized the device including loading FW
2361*4882a593Smuzhiyun  */
bnx2x_compare_fw_ver(struct bnx2x * bp,u32 load_code,bool print_err)2362*4882a593Smuzhiyun int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2363*4882a593Smuzhiyun {
2364*4882a593Smuzhiyun 	/* is another pf loaded on this engine? */
2365*4882a593Smuzhiyun 	if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2366*4882a593Smuzhiyun 	    load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2367*4882a593Smuzhiyun 		u8 loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng;
2368*4882a593Smuzhiyun 		u32 loaded_fw;
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun 		/* read loaded FW from chip */
2371*4882a593Smuzhiyun 		loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2372*4882a593Smuzhiyun 
2373*4882a593Smuzhiyun 		loaded_fw_major = loaded_fw & 0xff;
2374*4882a593Smuzhiyun 		loaded_fw_minor = (loaded_fw >> 8) & 0xff;
2375*4882a593Smuzhiyun 		loaded_fw_rev = (loaded_fw >> 16) & 0xff;
2376*4882a593Smuzhiyun 		loaded_fw_eng = (loaded_fw >> 24) & 0xff;
2377*4882a593Smuzhiyun 
2378*4882a593Smuzhiyun 		DP(BNX2X_MSG_SP, "loaded fw 0x%x major 0x%x minor 0x%x rev 0x%x eng 0x%x\n",
2379*4882a593Smuzhiyun 		   loaded_fw, loaded_fw_major, loaded_fw_minor, loaded_fw_rev, loaded_fw_eng);
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 		/* abort nic load if version mismatch */
2382*4882a593Smuzhiyun 		if (loaded_fw_major != BCM_5710_FW_MAJOR_VERSION ||
2383*4882a593Smuzhiyun 		    loaded_fw_minor != BCM_5710_FW_MINOR_VERSION ||
2384*4882a593Smuzhiyun 		    loaded_fw_eng != BCM_5710_FW_ENGINEERING_VERSION ||
2385*4882a593Smuzhiyun 		    loaded_fw_rev < BCM_5710_FW_REVISION_VERSION_V15) {
2386*4882a593Smuzhiyun 			if (print_err)
2387*4882a593Smuzhiyun 				BNX2X_ERR("loaded FW incompatible. Aborting\n");
2388*4882a593Smuzhiyun 			else
2389*4882a593Smuzhiyun 				BNX2X_DEV_INFO("loaded FW incompatible, possibly due to MF UNDI\n");
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun 			return -EBUSY;
2392*4882a593Smuzhiyun 		}
2393*4882a593Smuzhiyun 	}
2394*4882a593Smuzhiyun 	return 0;
2395*4882a593Smuzhiyun }
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun /* returns the "mcp load_code" according to global load_count array */
bnx2x_nic_load_no_mcp(struct bnx2x * bp,int port)2398*4882a593Smuzhiyun static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2399*4882a593Smuzhiyun {
2400*4882a593Smuzhiyun 	int path = BP_PATH(bp);
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
2403*4882a593Smuzhiyun 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2404*4882a593Smuzhiyun 	   bnx2x_load_count[path][2]);
2405*4882a593Smuzhiyun 	bnx2x_load_count[path][0]++;
2406*4882a593Smuzhiyun 	bnx2x_load_count[path][1 + port]++;
2407*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
2408*4882a593Smuzhiyun 	   path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2409*4882a593Smuzhiyun 	   bnx2x_load_count[path][2]);
2410*4882a593Smuzhiyun 	if (bnx2x_load_count[path][0] == 1)
2411*4882a593Smuzhiyun 		return FW_MSG_CODE_DRV_LOAD_COMMON;
2412*4882a593Smuzhiyun 	else if (bnx2x_load_count[path][1 + port] == 1)
2413*4882a593Smuzhiyun 		return FW_MSG_CODE_DRV_LOAD_PORT;
2414*4882a593Smuzhiyun 	else
2415*4882a593Smuzhiyun 		return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun /* mark PMF if applicable */
bnx2x_nic_load_pmf(struct bnx2x * bp,u32 load_code)2419*4882a593Smuzhiyun static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2420*4882a593Smuzhiyun {
2421*4882a593Smuzhiyun 	if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2422*4882a593Smuzhiyun 	    (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2423*4882a593Smuzhiyun 	    (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2424*4882a593Smuzhiyun 		bp->port.pmf = 1;
2425*4882a593Smuzhiyun 		/* We need the barrier to ensure the ordering between the
2426*4882a593Smuzhiyun 		 * writing to bp->port.pmf here and reading it from the
2427*4882a593Smuzhiyun 		 * bnx2x_periodic_task().
2428*4882a593Smuzhiyun 		 */
2429*4882a593Smuzhiyun 		smp_mb();
2430*4882a593Smuzhiyun 	} else {
2431*4882a593Smuzhiyun 		bp->port.pmf = 0;
2432*4882a593Smuzhiyun 	}
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2435*4882a593Smuzhiyun }
2436*4882a593Smuzhiyun 
bnx2x_nic_load_afex_dcc(struct bnx2x * bp,int load_code)2437*4882a593Smuzhiyun static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2438*4882a593Smuzhiyun {
2439*4882a593Smuzhiyun 	if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2440*4882a593Smuzhiyun 	     (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2441*4882a593Smuzhiyun 	    (bp->common.shmem2_base)) {
2442*4882a593Smuzhiyun 		if (SHMEM2_HAS(bp, dcc_support))
2443*4882a593Smuzhiyun 			SHMEM2_WR(bp, dcc_support,
2444*4882a593Smuzhiyun 				  (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2445*4882a593Smuzhiyun 				   SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2446*4882a593Smuzhiyun 		if (SHMEM2_HAS(bp, afex_driver_support))
2447*4882a593Smuzhiyun 			SHMEM2_WR(bp, afex_driver_support,
2448*4882a593Smuzhiyun 				  SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2449*4882a593Smuzhiyun 	}
2450*4882a593Smuzhiyun 
2451*4882a593Smuzhiyun 	/* Set AFEX default VLAN tag to an invalid value */
2452*4882a593Smuzhiyun 	bp->afex_def_vlan_tag = -1;
2453*4882a593Smuzhiyun }
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun /**
2456*4882a593Smuzhiyun  * bnx2x_bz_fp - zero content of the fastpath structure.
2457*4882a593Smuzhiyun  *
2458*4882a593Smuzhiyun  * @bp:		driver handle
2459*4882a593Smuzhiyun  * @index:	fastpath index to be zeroed
2460*4882a593Smuzhiyun  *
2461*4882a593Smuzhiyun  * Makes sure the contents of the bp->fp[index].napi is kept
2462*4882a593Smuzhiyun  * intact.
2463*4882a593Smuzhiyun  */
bnx2x_bz_fp(struct bnx2x * bp,int index)2464*4882a593Smuzhiyun static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2465*4882a593Smuzhiyun {
2466*4882a593Smuzhiyun 	struct bnx2x_fastpath *fp = &bp->fp[index];
2467*4882a593Smuzhiyun 	int cos;
2468*4882a593Smuzhiyun 	struct napi_struct orig_napi = fp->napi;
2469*4882a593Smuzhiyun 	struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2470*4882a593Smuzhiyun 
2471*4882a593Smuzhiyun 	/* bzero bnx2x_fastpath contents */
2472*4882a593Smuzhiyun 	if (fp->tpa_info)
2473*4882a593Smuzhiyun 		memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2474*4882a593Smuzhiyun 		       sizeof(struct bnx2x_agg_info));
2475*4882a593Smuzhiyun 	memset(fp, 0, sizeof(*fp));
2476*4882a593Smuzhiyun 
2477*4882a593Smuzhiyun 	/* Restore the NAPI object as it has been already initialized */
2478*4882a593Smuzhiyun 	fp->napi = orig_napi;
2479*4882a593Smuzhiyun 	fp->tpa_info = orig_tpa_info;
2480*4882a593Smuzhiyun 	fp->bp = bp;
2481*4882a593Smuzhiyun 	fp->index = index;
2482*4882a593Smuzhiyun 	if (IS_ETH_FP(fp))
2483*4882a593Smuzhiyun 		fp->max_cos = bp->max_cos;
2484*4882a593Smuzhiyun 	else
2485*4882a593Smuzhiyun 		/* Special queues support only one CoS */
2486*4882a593Smuzhiyun 		fp->max_cos = 1;
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun 	/* Init txdata pointers */
2489*4882a593Smuzhiyun 	if (IS_FCOE_FP(fp))
2490*4882a593Smuzhiyun 		fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2491*4882a593Smuzhiyun 	if (IS_ETH_FP(fp))
2492*4882a593Smuzhiyun 		for_each_cos_in_tx_queue(fp, cos)
2493*4882a593Smuzhiyun 			fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2494*4882a593Smuzhiyun 				BNX2X_NUM_ETH_QUEUES(bp) + index];
2495*4882a593Smuzhiyun 
2496*4882a593Smuzhiyun 	/* set the tpa flag for each queue. The tpa flag determines the queue
2497*4882a593Smuzhiyun 	 * minimal size so it must be set prior to queue memory allocation
2498*4882a593Smuzhiyun 	 */
2499*4882a593Smuzhiyun 	if (bp->dev->features & NETIF_F_LRO)
2500*4882a593Smuzhiyun 		fp->mode = TPA_MODE_LRO;
2501*4882a593Smuzhiyun 	else if (bp->dev->features & NETIF_F_GRO_HW)
2502*4882a593Smuzhiyun 		fp->mode = TPA_MODE_GRO;
2503*4882a593Smuzhiyun 	else
2504*4882a593Smuzhiyun 		fp->mode = TPA_MODE_DISABLED;
2505*4882a593Smuzhiyun 
2506*4882a593Smuzhiyun 	/* We don't want TPA if it's disabled in bp
2507*4882a593Smuzhiyun 	 * or if this is an FCoE L2 ring.
2508*4882a593Smuzhiyun 	 */
2509*4882a593Smuzhiyun 	if (bp->disable_tpa || IS_FCOE_FP(fp))
2510*4882a593Smuzhiyun 		fp->mode = TPA_MODE_DISABLED;
2511*4882a593Smuzhiyun }
2512*4882a593Smuzhiyun 
bnx2x_set_os_driver_state(struct bnx2x * bp,u32 state)2513*4882a593Smuzhiyun void bnx2x_set_os_driver_state(struct bnx2x *bp, u32 state)
2514*4882a593Smuzhiyun {
2515*4882a593Smuzhiyun 	u32 cur;
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	if (!IS_MF_BD(bp) || !SHMEM2_HAS(bp, os_driver_state) || IS_VF(bp))
2518*4882a593Smuzhiyun 		return;
2519*4882a593Smuzhiyun 
2520*4882a593Smuzhiyun 	cur = SHMEM2_RD(bp, os_driver_state[BP_FW_MB_IDX(bp)]);
2521*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Driver state %08x-->%08x\n",
2522*4882a593Smuzhiyun 	   cur, state);
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	SHMEM2_WR(bp, os_driver_state[BP_FW_MB_IDX(bp)], state);
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun 
bnx2x_load_cnic(struct bnx2x * bp)2527*4882a593Smuzhiyun int bnx2x_load_cnic(struct bnx2x *bp)
2528*4882a593Smuzhiyun {
2529*4882a593Smuzhiyun 	int i, rc, port = BP_PORT(bp);
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2532*4882a593Smuzhiyun 
2533*4882a593Smuzhiyun 	mutex_init(&bp->cnic_mutex);
2534*4882a593Smuzhiyun 
2535*4882a593Smuzhiyun 	if (IS_PF(bp)) {
2536*4882a593Smuzhiyun 		rc = bnx2x_alloc_mem_cnic(bp);
2537*4882a593Smuzhiyun 		if (rc) {
2538*4882a593Smuzhiyun 			BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2539*4882a593Smuzhiyun 			LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2540*4882a593Smuzhiyun 		}
2541*4882a593Smuzhiyun 	}
2542*4882a593Smuzhiyun 
2543*4882a593Smuzhiyun 	rc = bnx2x_alloc_fp_mem_cnic(bp);
2544*4882a593Smuzhiyun 	if (rc) {
2545*4882a593Smuzhiyun 		BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2546*4882a593Smuzhiyun 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2547*4882a593Smuzhiyun 	}
2548*4882a593Smuzhiyun 
2549*4882a593Smuzhiyun 	/* Update the number of queues with the cnic queues */
2550*4882a593Smuzhiyun 	rc = bnx2x_set_real_num_queues(bp, 1);
2551*4882a593Smuzhiyun 	if (rc) {
2552*4882a593Smuzhiyun 		BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2553*4882a593Smuzhiyun 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2554*4882a593Smuzhiyun 	}
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun 	/* Add all CNIC NAPI objects */
2557*4882a593Smuzhiyun 	bnx2x_add_all_napi_cnic(bp);
2558*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "cnic napi added\n");
2559*4882a593Smuzhiyun 	bnx2x_napi_enable_cnic(bp);
2560*4882a593Smuzhiyun 
2561*4882a593Smuzhiyun 	rc = bnx2x_init_hw_func_cnic(bp);
2562*4882a593Smuzhiyun 	if (rc)
2563*4882a593Smuzhiyun 		LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2564*4882a593Smuzhiyun 
2565*4882a593Smuzhiyun 	bnx2x_nic_init_cnic(bp);
2566*4882a593Smuzhiyun 
2567*4882a593Smuzhiyun 	if (IS_PF(bp)) {
2568*4882a593Smuzhiyun 		/* Enable Timer scan */
2569*4882a593Smuzhiyun 		REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 		/* setup cnic queues */
2572*4882a593Smuzhiyun 		for_each_cnic_queue(bp, i) {
2573*4882a593Smuzhiyun 			rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2574*4882a593Smuzhiyun 			if (rc) {
2575*4882a593Smuzhiyun 				BNX2X_ERR("Queue setup failed\n");
2576*4882a593Smuzhiyun 				LOAD_ERROR_EXIT(bp, load_error_cnic2);
2577*4882a593Smuzhiyun 			}
2578*4882a593Smuzhiyun 		}
2579*4882a593Smuzhiyun 	}
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	/* Initialize Rx filter. */
2582*4882a593Smuzhiyun 	bnx2x_set_rx_mode_inner(bp);
2583*4882a593Smuzhiyun 
2584*4882a593Smuzhiyun 	/* re-read iscsi info */
2585*4882a593Smuzhiyun 	bnx2x_get_iscsi_info(bp);
2586*4882a593Smuzhiyun 	bnx2x_setup_cnic_irq_info(bp);
2587*4882a593Smuzhiyun 	bnx2x_setup_cnic_info(bp);
2588*4882a593Smuzhiyun 	bp->cnic_loaded = true;
2589*4882a593Smuzhiyun 	if (bp->state == BNX2X_STATE_OPEN)
2590*4882a593Smuzhiyun 		bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2593*4882a593Smuzhiyun 
2594*4882a593Smuzhiyun 	return 0;
2595*4882a593Smuzhiyun 
2596*4882a593Smuzhiyun #ifndef BNX2X_STOP_ON_ERROR
2597*4882a593Smuzhiyun load_error_cnic2:
2598*4882a593Smuzhiyun 	/* Disable Timer scan */
2599*4882a593Smuzhiyun 	REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2600*4882a593Smuzhiyun 
2601*4882a593Smuzhiyun load_error_cnic1:
2602*4882a593Smuzhiyun 	bnx2x_napi_disable_cnic(bp);
2603*4882a593Smuzhiyun 	/* Update the number of queues without the cnic queues */
2604*4882a593Smuzhiyun 	if (bnx2x_set_real_num_queues(bp, 0))
2605*4882a593Smuzhiyun 		BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2606*4882a593Smuzhiyun load_error_cnic0:
2607*4882a593Smuzhiyun 	BNX2X_ERR("CNIC-related load failed\n");
2608*4882a593Smuzhiyun 	bnx2x_free_fp_mem_cnic(bp);
2609*4882a593Smuzhiyun 	bnx2x_free_mem_cnic(bp);
2610*4882a593Smuzhiyun 	return rc;
2611*4882a593Smuzhiyun #endif /* ! BNX2X_STOP_ON_ERROR */
2612*4882a593Smuzhiyun }
2613*4882a593Smuzhiyun 
2614*4882a593Smuzhiyun /* must be called with rtnl_lock */
bnx2x_nic_load(struct bnx2x * bp,int load_mode)2615*4882a593Smuzhiyun int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2616*4882a593Smuzhiyun {
2617*4882a593Smuzhiyun 	int port = BP_PORT(bp);
2618*4882a593Smuzhiyun 	int i, rc = 0, load_code = 0;
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2621*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP,
2622*4882a593Smuzhiyun 	   "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
2625*4882a593Smuzhiyun 	if (unlikely(bp->panic)) {
2626*4882a593Smuzhiyun 		BNX2X_ERR("Can't load NIC when there is panic\n");
2627*4882a593Smuzhiyun 		return -EPERM;
2628*4882a593Smuzhiyun 	}
2629*4882a593Smuzhiyun #endif
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2632*4882a593Smuzhiyun 
2633*4882a593Smuzhiyun 	/* zero the structure w/o any lock, before SP handler is initialized */
2634*4882a593Smuzhiyun 	memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2635*4882a593Smuzhiyun 	__set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2636*4882a593Smuzhiyun 		&bp->last_reported_link.link_report_flags);
2637*4882a593Smuzhiyun 
2638*4882a593Smuzhiyun 	if (IS_PF(bp))
2639*4882a593Smuzhiyun 		/* must be called before memory allocation and HW init */
2640*4882a593Smuzhiyun 		bnx2x_ilt_set_info(bp);
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	/*
2643*4882a593Smuzhiyun 	 * Zero fastpath structures preserving invariants like napi, which are
2644*4882a593Smuzhiyun 	 * allocated only once, fp index, max_cos, bp pointer.
2645*4882a593Smuzhiyun 	 * Also set fp->mode and txdata_ptr.
2646*4882a593Smuzhiyun 	 */
2647*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2648*4882a593Smuzhiyun 	for_each_queue(bp, i)
2649*4882a593Smuzhiyun 		bnx2x_bz_fp(bp, i);
2650*4882a593Smuzhiyun 	memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2651*4882a593Smuzhiyun 				  bp->num_cnic_queues) *
2652*4882a593Smuzhiyun 				  sizeof(struct bnx2x_fp_txdata));
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun 	bp->fcoe_init = false;
2655*4882a593Smuzhiyun 
2656*4882a593Smuzhiyun 	/* Set the receive queues buffer size */
2657*4882a593Smuzhiyun 	bnx2x_set_rx_buf_size(bp);
2658*4882a593Smuzhiyun 
2659*4882a593Smuzhiyun 	if (IS_PF(bp)) {
2660*4882a593Smuzhiyun 		rc = bnx2x_alloc_mem(bp);
2661*4882a593Smuzhiyun 		if (rc) {
2662*4882a593Smuzhiyun 			BNX2X_ERR("Unable to allocate bp memory\n");
2663*4882a593Smuzhiyun 			return rc;
2664*4882a593Smuzhiyun 		}
2665*4882a593Smuzhiyun 	}
2666*4882a593Smuzhiyun 
2667*4882a593Smuzhiyun 	/* need to be done after alloc mem, since it's self adjusting to amount
2668*4882a593Smuzhiyun 	 * of memory available for RSS queues
2669*4882a593Smuzhiyun 	 */
2670*4882a593Smuzhiyun 	rc = bnx2x_alloc_fp_mem(bp);
2671*4882a593Smuzhiyun 	if (rc) {
2672*4882a593Smuzhiyun 		BNX2X_ERR("Unable to allocate memory for fps\n");
2673*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error0);
2674*4882a593Smuzhiyun 	}
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	/* Allocated memory for FW statistics  */
2677*4882a593Smuzhiyun 	rc = bnx2x_alloc_fw_stats_mem(bp);
2678*4882a593Smuzhiyun 	if (rc)
2679*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error0);
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	/* request pf to initialize status blocks */
2682*4882a593Smuzhiyun 	if (IS_VF(bp)) {
2683*4882a593Smuzhiyun 		rc = bnx2x_vfpf_init(bp);
2684*4882a593Smuzhiyun 		if (rc)
2685*4882a593Smuzhiyun 			LOAD_ERROR_EXIT(bp, load_error0);
2686*4882a593Smuzhiyun 	}
2687*4882a593Smuzhiyun 
2688*4882a593Smuzhiyun 	/* As long as bnx2x_alloc_mem() may possibly update
2689*4882a593Smuzhiyun 	 * bp->num_queues, bnx2x_set_real_num_queues() should always
2690*4882a593Smuzhiyun 	 * come after it. At this stage cnic queues are not counted.
2691*4882a593Smuzhiyun 	 */
2692*4882a593Smuzhiyun 	rc = bnx2x_set_real_num_queues(bp, 0);
2693*4882a593Smuzhiyun 	if (rc) {
2694*4882a593Smuzhiyun 		BNX2X_ERR("Unable to set real_num_queues\n");
2695*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error0);
2696*4882a593Smuzhiyun 	}
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	/* configure multi cos mappings in kernel.
2699*4882a593Smuzhiyun 	 * this configuration may be overridden by a multi class queue
2700*4882a593Smuzhiyun 	 * discipline or by a dcbx negotiation result.
2701*4882a593Smuzhiyun 	 */
2702*4882a593Smuzhiyun 	bnx2x_setup_tc(bp->dev, bp->max_cos);
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	/* Add all NAPI objects */
2705*4882a593Smuzhiyun 	bnx2x_add_all_napi(bp);
2706*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "napi added\n");
2707*4882a593Smuzhiyun 	bnx2x_napi_enable(bp);
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	if (IS_PF(bp)) {
2710*4882a593Smuzhiyun 		/* set pf load just before approaching the MCP */
2711*4882a593Smuzhiyun 		bnx2x_set_pf_load(bp);
2712*4882a593Smuzhiyun 
2713*4882a593Smuzhiyun 		/* if mcp exists send load request and analyze response */
2714*4882a593Smuzhiyun 		if (!BP_NOMCP(bp)) {
2715*4882a593Smuzhiyun 			/* attempt to load pf */
2716*4882a593Smuzhiyun 			rc = bnx2x_nic_load_request(bp, &load_code);
2717*4882a593Smuzhiyun 			if (rc)
2718*4882a593Smuzhiyun 				LOAD_ERROR_EXIT(bp, load_error1);
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 			/* what did mcp say? */
2721*4882a593Smuzhiyun 			rc = bnx2x_compare_fw_ver(bp, load_code, true);
2722*4882a593Smuzhiyun 			if (rc) {
2723*4882a593Smuzhiyun 				bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2724*4882a593Smuzhiyun 				LOAD_ERROR_EXIT(bp, load_error2);
2725*4882a593Smuzhiyun 			}
2726*4882a593Smuzhiyun 		} else {
2727*4882a593Smuzhiyun 			load_code = bnx2x_nic_load_no_mcp(bp, port);
2728*4882a593Smuzhiyun 		}
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 		/* mark pmf if applicable */
2731*4882a593Smuzhiyun 		bnx2x_nic_load_pmf(bp, load_code);
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 		/* Init Function state controlling object */
2734*4882a593Smuzhiyun 		bnx2x__init_func_obj(bp);
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 		/* Initialize HW */
2737*4882a593Smuzhiyun 		rc = bnx2x_init_hw(bp, load_code);
2738*4882a593Smuzhiyun 		if (rc) {
2739*4882a593Smuzhiyun 			BNX2X_ERR("HW init failed, aborting\n");
2740*4882a593Smuzhiyun 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2741*4882a593Smuzhiyun 			LOAD_ERROR_EXIT(bp, load_error2);
2742*4882a593Smuzhiyun 		}
2743*4882a593Smuzhiyun 	}
2744*4882a593Smuzhiyun 
2745*4882a593Smuzhiyun 	bnx2x_pre_irq_nic_init(bp);
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	/* Connect to IRQs */
2748*4882a593Smuzhiyun 	rc = bnx2x_setup_irqs(bp);
2749*4882a593Smuzhiyun 	if (rc) {
2750*4882a593Smuzhiyun 		BNX2X_ERR("setup irqs failed\n");
2751*4882a593Smuzhiyun 		if (IS_PF(bp))
2752*4882a593Smuzhiyun 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2753*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error2);
2754*4882a593Smuzhiyun 	}
2755*4882a593Smuzhiyun 
2756*4882a593Smuzhiyun 	/* Init per-function objects */
2757*4882a593Smuzhiyun 	if (IS_PF(bp)) {
2758*4882a593Smuzhiyun 		/* Setup NIC internals and enable interrupts */
2759*4882a593Smuzhiyun 		bnx2x_post_irq_nic_init(bp, load_code);
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 		bnx2x_init_bp_objs(bp);
2762*4882a593Smuzhiyun 		bnx2x_iov_nic_init(bp);
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun 		/* Set AFEX default VLAN tag to an invalid value */
2765*4882a593Smuzhiyun 		bp->afex_def_vlan_tag = -1;
2766*4882a593Smuzhiyun 		bnx2x_nic_load_afex_dcc(bp, load_code);
2767*4882a593Smuzhiyun 		bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2768*4882a593Smuzhiyun 		rc = bnx2x_func_start(bp);
2769*4882a593Smuzhiyun 		if (rc) {
2770*4882a593Smuzhiyun 			BNX2X_ERR("Function start failed!\n");
2771*4882a593Smuzhiyun 			bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2772*4882a593Smuzhiyun 
2773*4882a593Smuzhiyun 			LOAD_ERROR_EXIT(bp, load_error3);
2774*4882a593Smuzhiyun 		}
2775*4882a593Smuzhiyun 
2776*4882a593Smuzhiyun 		/* Send LOAD_DONE command to MCP */
2777*4882a593Smuzhiyun 		if (!BP_NOMCP(bp)) {
2778*4882a593Smuzhiyun 			load_code = bnx2x_fw_command(bp,
2779*4882a593Smuzhiyun 						     DRV_MSG_CODE_LOAD_DONE, 0);
2780*4882a593Smuzhiyun 			if (!load_code) {
2781*4882a593Smuzhiyun 				BNX2X_ERR("MCP response failure, aborting\n");
2782*4882a593Smuzhiyun 				rc = -EBUSY;
2783*4882a593Smuzhiyun 				LOAD_ERROR_EXIT(bp, load_error3);
2784*4882a593Smuzhiyun 			}
2785*4882a593Smuzhiyun 		}
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 		/* initialize FW coalescing state machines in RAM */
2788*4882a593Smuzhiyun 		bnx2x_update_coalesce(bp);
2789*4882a593Smuzhiyun 	}
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 	/* setup the leading queue */
2792*4882a593Smuzhiyun 	rc = bnx2x_setup_leading(bp);
2793*4882a593Smuzhiyun 	if (rc) {
2794*4882a593Smuzhiyun 		BNX2X_ERR("Setup leading failed!\n");
2795*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error3);
2796*4882a593Smuzhiyun 	}
2797*4882a593Smuzhiyun 
2798*4882a593Smuzhiyun 	/* set up the rest of the queues */
2799*4882a593Smuzhiyun 	for_each_nondefault_eth_queue(bp, i) {
2800*4882a593Smuzhiyun 		if (IS_PF(bp))
2801*4882a593Smuzhiyun 			rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2802*4882a593Smuzhiyun 		else /* VF */
2803*4882a593Smuzhiyun 			rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2804*4882a593Smuzhiyun 		if (rc) {
2805*4882a593Smuzhiyun 			BNX2X_ERR("Queue %d setup failed\n", i);
2806*4882a593Smuzhiyun 			LOAD_ERROR_EXIT(bp, load_error3);
2807*4882a593Smuzhiyun 		}
2808*4882a593Smuzhiyun 	}
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 	/* setup rss */
2811*4882a593Smuzhiyun 	rc = bnx2x_init_rss(bp);
2812*4882a593Smuzhiyun 	if (rc) {
2813*4882a593Smuzhiyun 		BNX2X_ERR("PF RSS init failed\n");
2814*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error3);
2815*4882a593Smuzhiyun 	}
2816*4882a593Smuzhiyun 
2817*4882a593Smuzhiyun 	/* Now when Clients are configured we are ready to work */
2818*4882a593Smuzhiyun 	bp->state = BNX2X_STATE_OPEN;
2819*4882a593Smuzhiyun 
2820*4882a593Smuzhiyun 	/* Configure a ucast MAC */
2821*4882a593Smuzhiyun 	if (IS_PF(bp))
2822*4882a593Smuzhiyun 		rc = bnx2x_set_eth_mac(bp, true);
2823*4882a593Smuzhiyun 	else /* vf */
2824*4882a593Smuzhiyun 		rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2825*4882a593Smuzhiyun 					   true);
2826*4882a593Smuzhiyun 	if (rc) {
2827*4882a593Smuzhiyun 		BNX2X_ERR("Setting Ethernet MAC failed\n");
2828*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error3);
2829*4882a593Smuzhiyun 	}
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 	if (IS_PF(bp) && bp->pending_max) {
2832*4882a593Smuzhiyun 		bnx2x_update_max_mf_config(bp, bp->pending_max);
2833*4882a593Smuzhiyun 		bp->pending_max = 0;
2834*4882a593Smuzhiyun 	}
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	bp->force_link_down = false;
2837*4882a593Smuzhiyun 	if (bp->port.pmf) {
2838*4882a593Smuzhiyun 		rc = bnx2x_initial_phy_init(bp, load_mode);
2839*4882a593Smuzhiyun 		if (rc)
2840*4882a593Smuzhiyun 			LOAD_ERROR_EXIT(bp, load_error3);
2841*4882a593Smuzhiyun 	}
2842*4882a593Smuzhiyun 	bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun 	/* Start fast path */
2845*4882a593Smuzhiyun 
2846*4882a593Smuzhiyun 	/* Re-configure vlan filters */
2847*4882a593Smuzhiyun 	rc = bnx2x_vlan_reconfigure_vid(bp);
2848*4882a593Smuzhiyun 	if (rc)
2849*4882a593Smuzhiyun 		LOAD_ERROR_EXIT(bp, load_error3);
2850*4882a593Smuzhiyun 
2851*4882a593Smuzhiyun 	/* Initialize Rx filter. */
2852*4882a593Smuzhiyun 	bnx2x_set_rx_mode_inner(bp);
2853*4882a593Smuzhiyun 
2854*4882a593Smuzhiyun 	if (bp->flags & PTP_SUPPORTED) {
2855*4882a593Smuzhiyun 		bnx2x_register_phc(bp);
2856*4882a593Smuzhiyun 		bnx2x_init_ptp(bp);
2857*4882a593Smuzhiyun 		bnx2x_configure_ptp_filters(bp);
2858*4882a593Smuzhiyun 	}
2859*4882a593Smuzhiyun 	/* Start Tx */
2860*4882a593Smuzhiyun 	switch (load_mode) {
2861*4882a593Smuzhiyun 	case LOAD_NORMAL:
2862*4882a593Smuzhiyun 		/* Tx queue should be only re-enabled */
2863*4882a593Smuzhiyun 		netif_tx_wake_all_queues(bp->dev);
2864*4882a593Smuzhiyun 		break;
2865*4882a593Smuzhiyun 
2866*4882a593Smuzhiyun 	case LOAD_OPEN:
2867*4882a593Smuzhiyun 		netif_tx_start_all_queues(bp->dev);
2868*4882a593Smuzhiyun 		smp_mb__after_atomic();
2869*4882a593Smuzhiyun 		break;
2870*4882a593Smuzhiyun 
2871*4882a593Smuzhiyun 	case LOAD_DIAG:
2872*4882a593Smuzhiyun 	case LOAD_LOOPBACK_EXT:
2873*4882a593Smuzhiyun 		bp->state = BNX2X_STATE_DIAG;
2874*4882a593Smuzhiyun 		break;
2875*4882a593Smuzhiyun 
2876*4882a593Smuzhiyun 	default:
2877*4882a593Smuzhiyun 		break;
2878*4882a593Smuzhiyun 	}
2879*4882a593Smuzhiyun 
2880*4882a593Smuzhiyun 	if (bp->port.pmf)
2881*4882a593Smuzhiyun 		bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2882*4882a593Smuzhiyun 	else
2883*4882a593Smuzhiyun 		bnx2x__link_status_update(bp);
2884*4882a593Smuzhiyun 
2885*4882a593Smuzhiyun 	/* start the timer */
2886*4882a593Smuzhiyun 	mod_timer(&bp->timer, jiffies + bp->current_interval);
2887*4882a593Smuzhiyun 
2888*4882a593Smuzhiyun 	if (CNIC_ENABLED(bp))
2889*4882a593Smuzhiyun 		bnx2x_load_cnic(bp);
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	if (IS_PF(bp))
2892*4882a593Smuzhiyun 		bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2893*4882a593Smuzhiyun 
2894*4882a593Smuzhiyun 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2895*4882a593Smuzhiyun 		/* mark driver is loaded in shmem2 */
2896*4882a593Smuzhiyun 		u32 val;
2897*4882a593Smuzhiyun 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2898*4882a593Smuzhiyun 		val &= ~DRV_FLAGS_MTU_MASK;
2899*4882a593Smuzhiyun 		val |= (bp->dev->mtu << DRV_FLAGS_MTU_SHIFT);
2900*4882a593Smuzhiyun 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2901*4882a593Smuzhiyun 			  val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2902*4882a593Smuzhiyun 			  DRV_FLAGS_CAPABILITIES_LOADED_L2);
2903*4882a593Smuzhiyun 	}
2904*4882a593Smuzhiyun 
2905*4882a593Smuzhiyun 	/* Wait for all pending SP commands to complete */
2906*4882a593Smuzhiyun 	if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2907*4882a593Smuzhiyun 		BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2908*4882a593Smuzhiyun 		bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2909*4882a593Smuzhiyun 		return -EBUSY;
2910*4882a593Smuzhiyun 	}
2911*4882a593Smuzhiyun 
2912*4882a593Smuzhiyun 	/* Update driver data for On-Chip MFW dump. */
2913*4882a593Smuzhiyun 	if (IS_PF(bp))
2914*4882a593Smuzhiyun 		bnx2x_update_mfw_dump(bp);
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 	/* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2917*4882a593Smuzhiyun 	if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2918*4882a593Smuzhiyun 		bnx2x_dcbx_init(bp, false);
2919*4882a593Smuzhiyun 
2920*4882a593Smuzhiyun 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2921*4882a593Smuzhiyun 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_ACTIVE);
2922*4882a593Smuzhiyun 
2923*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2924*4882a593Smuzhiyun 
2925*4882a593Smuzhiyun 	return 0;
2926*4882a593Smuzhiyun 
2927*4882a593Smuzhiyun #ifndef BNX2X_STOP_ON_ERROR
2928*4882a593Smuzhiyun load_error3:
2929*4882a593Smuzhiyun 	if (IS_PF(bp)) {
2930*4882a593Smuzhiyun 		bnx2x_int_disable_sync(bp, 1);
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun 		/* Clean queueable objects */
2933*4882a593Smuzhiyun 		bnx2x_squeeze_objects(bp);
2934*4882a593Smuzhiyun 	}
2935*4882a593Smuzhiyun 
2936*4882a593Smuzhiyun 	/* Free SKBs, SGEs, TPA pool and driver internals */
2937*4882a593Smuzhiyun 	bnx2x_free_skbs(bp);
2938*4882a593Smuzhiyun 	for_each_rx_queue(bp, i)
2939*4882a593Smuzhiyun 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2940*4882a593Smuzhiyun 
2941*4882a593Smuzhiyun 	/* Release IRQs */
2942*4882a593Smuzhiyun 	bnx2x_free_irq(bp);
2943*4882a593Smuzhiyun load_error2:
2944*4882a593Smuzhiyun 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
2945*4882a593Smuzhiyun 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2946*4882a593Smuzhiyun 		bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2947*4882a593Smuzhiyun 	}
2948*4882a593Smuzhiyun 
2949*4882a593Smuzhiyun 	bp->port.pmf = 0;
2950*4882a593Smuzhiyun load_error1:
2951*4882a593Smuzhiyun 	bnx2x_napi_disable(bp);
2952*4882a593Smuzhiyun 	bnx2x_del_all_napi(bp);
2953*4882a593Smuzhiyun 
2954*4882a593Smuzhiyun 	/* clear pf_load status, as it was already set */
2955*4882a593Smuzhiyun 	if (IS_PF(bp))
2956*4882a593Smuzhiyun 		bnx2x_clear_pf_load(bp);
2957*4882a593Smuzhiyun load_error0:
2958*4882a593Smuzhiyun 	bnx2x_free_fw_stats_mem(bp);
2959*4882a593Smuzhiyun 	bnx2x_free_fp_mem(bp);
2960*4882a593Smuzhiyun 	bnx2x_free_mem(bp);
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 	return rc;
2963*4882a593Smuzhiyun #endif /* ! BNX2X_STOP_ON_ERROR */
2964*4882a593Smuzhiyun }
2965*4882a593Smuzhiyun 
bnx2x_drain_tx_queues(struct bnx2x * bp)2966*4882a593Smuzhiyun int bnx2x_drain_tx_queues(struct bnx2x *bp)
2967*4882a593Smuzhiyun {
2968*4882a593Smuzhiyun 	u8 rc = 0, cos, i;
2969*4882a593Smuzhiyun 
2970*4882a593Smuzhiyun 	/* Wait until tx fastpath tasks complete */
2971*4882a593Smuzhiyun 	for_each_tx_queue(bp, i) {
2972*4882a593Smuzhiyun 		struct bnx2x_fastpath *fp = &bp->fp[i];
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 		for_each_cos_in_tx_queue(fp, cos)
2975*4882a593Smuzhiyun 			rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2976*4882a593Smuzhiyun 		if (rc)
2977*4882a593Smuzhiyun 			return rc;
2978*4882a593Smuzhiyun 	}
2979*4882a593Smuzhiyun 	return 0;
2980*4882a593Smuzhiyun }
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun /* must be called with rtnl_lock */
bnx2x_nic_unload(struct bnx2x * bp,int unload_mode,bool keep_link)2983*4882a593Smuzhiyun int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2984*4882a593Smuzhiyun {
2985*4882a593Smuzhiyun 	int i;
2986*4882a593Smuzhiyun 	bool global = false;
2987*4882a593Smuzhiyun 
2988*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun 	if (!IS_MF_SD_STORAGE_PERSONALITY_ONLY(bp))
2991*4882a593Smuzhiyun 		bnx2x_set_os_driver_state(bp, OS_DRIVER_STATE_DISABLED);
2992*4882a593Smuzhiyun 
2993*4882a593Smuzhiyun 	/* mark driver is unloaded in shmem2 */
2994*4882a593Smuzhiyun 	if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2995*4882a593Smuzhiyun 		u32 val;
2996*4882a593Smuzhiyun 		val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2997*4882a593Smuzhiyun 		SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2998*4882a593Smuzhiyun 			  val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2999*4882a593Smuzhiyun 	}
3000*4882a593Smuzhiyun 
3001*4882a593Smuzhiyun 	if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
3002*4882a593Smuzhiyun 	    (bp->state == BNX2X_STATE_CLOSED ||
3003*4882a593Smuzhiyun 	     bp->state == BNX2X_STATE_ERROR)) {
3004*4882a593Smuzhiyun 		/* We can get here if the driver has been unloaded
3005*4882a593Smuzhiyun 		 * during parity error recovery and is either waiting for a
3006*4882a593Smuzhiyun 		 * leader to complete or for other functions to unload and
3007*4882a593Smuzhiyun 		 * then ifdown has been issued. In this case we want to
3008*4882a593Smuzhiyun 		 * unload and let other functions to complete a recovery
3009*4882a593Smuzhiyun 		 * process.
3010*4882a593Smuzhiyun 		 */
3011*4882a593Smuzhiyun 		bp->recovery_state = BNX2X_RECOVERY_DONE;
3012*4882a593Smuzhiyun 		bp->is_leader = 0;
3013*4882a593Smuzhiyun 		bnx2x_release_leader_lock(bp);
3014*4882a593Smuzhiyun 		smp_mb();
3015*4882a593Smuzhiyun 
3016*4882a593Smuzhiyun 		DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
3017*4882a593Smuzhiyun 		BNX2X_ERR("Can't unload in closed or error state\n");
3018*4882a593Smuzhiyun 		return -EINVAL;
3019*4882a593Smuzhiyun 	}
3020*4882a593Smuzhiyun 
3021*4882a593Smuzhiyun 	/* Nothing to do during unload if previous bnx2x_nic_load()
3022*4882a593Smuzhiyun 	 * have not completed successfully - all resources are released.
3023*4882a593Smuzhiyun 	 *
3024*4882a593Smuzhiyun 	 * we can get here only after unsuccessful ndo_* callback, during which
3025*4882a593Smuzhiyun 	 * dev->IFF_UP flag is still on.
3026*4882a593Smuzhiyun 	 */
3027*4882a593Smuzhiyun 	if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
3028*4882a593Smuzhiyun 		return 0;
3029*4882a593Smuzhiyun 
3030*4882a593Smuzhiyun 	/* It's important to set the bp->state to the value different from
3031*4882a593Smuzhiyun 	 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
3032*4882a593Smuzhiyun 	 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
3033*4882a593Smuzhiyun 	 */
3034*4882a593Smuzhiyun 	bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
3035*4882a593Smuzhiyun 	smp_mb();
3036*4882a593Smuzhiyun 
3037*4882a593Smuzhiyun 	/* indicate to VFs that the PF is going down */
3038*4882a593Smuzhiyun 	bnx2x_iov_channel_down(bp);
3039*4882a593Smuzhiyun 
3040*4882a593Smuzhiyun 	if (CNIC_LOADED(bp))
3041*4882a593Smuzhiyun 		bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
3042*4882a593Smuzhiyun 
3043*4882a593Smuzhiyun 	/* Stop Tx */
3044*4882a593Smuzhiyun 	bnx2x_tx_disable(bp);
3045*4882a593Smuzhiyun 	netdev_reset_tc(bp->dev);
3046*4882a593Smuzhiyun 
3047*4882a593Smuzhiyun 	bp->rx_mode = BNX2X_RX_MODE_NONE;
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun 	del_timer_sync(&bp->timer);
3050*4882a593Smuzhiyun 
3051*4882a593Smuzhiyun 	if (IS_PF(bp) && !BP_NOMCP(bp)) {
3052*4882a593Smuzhiyun 		/* Set ALWAYS_ALIVE bit in shmem */
3053*4882a593Smuzhiyun 		bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
3054*4882a593Smuzhiyun 		bnx2x_drv_pulse(bp);
3055*4882a593Smuzhiyun 		bnx2x_stats_handle(bp, STATS_EVENT_STOP);
3056*4882a593Smuzhiyun 		bnx2x_save_statistics(bp);
3057*4882a593Smuzhiyun 	}
3058*4882a593Smuzhiyun 
3059*4882a593Smuzhiyun 	/* wait till consumers catch up with producers in all queues.
3060*4882a593Smuzhiyun 	 * If we're recovering, FW can't write to host so no reason
3061*4882a593Smuzhiyun 	 * to wait for the queues to complete all Tx.
3062*4882a593Smuzhiyun 	 */
3063*4882a593Smuzhiyun 	if (unload_mode != UNLOAD_RECOVERY)
3064*4882a593Smuzhiyun 		bnx2x_drain_tx_queues(bp);
3065*4882a593Smuzhiyun 
3066*4882a593Smuzhiyun 	/* if VF indicate to PF this function is going down (PF will delete sp
3067*4882a593Smuzhiyun 	 * elements and clear initializations
3068*4882a593Smuzhiyun 	 */
3069*4882a593Smuzhiyun 	if (IS_VF(bp)) {
3070*4882a593Smuzhiyun 		bnx2x_clear_vlan_info(bp);
3071*4882a593Smuzhiyun 		bnx2x_vfpf_close_vf(bp);
3072*4882a593Smuzhiyun 	} else if (unload_mode != UNLOAD_RECOVERY) {
3073*4882a593Smuzhiyun 		/* if this is a normal/close unload need to clean up chip*/
3074*4882a593Smuzhiyun 		bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3075*4882a593Smuzhiyun 	} else {
3076*4882a593Smuzhiyun 		/* Send the UNLOAD_REQUEST to the MCP */
3077*4882a593Smuzhiyun 		bnx2x_send_unload_req(bp, unload_mode);
3078*4882a593Smuzhiyun 
3079*4882a593Smuzhiyun 		/* Prevent transactions to host from the functions on the
3080*4882a593Smuzhiyun 		 * engine that doesn't reset global blocks in case of global
3081*4882a593Smuzhiyun 		 * attention once global blocks are reset and gates are opened
3082*4882a593Smuzhiyun 		 * (the engine which leader will perform the recovery
3083*4882a593Smuzhiyun 		 * last).
3084*4882a593Smuzhiyun 		 */
3085*4882a593Smuzhiyun 		if (!CHIP_IS_E1x(bp))
3086*4882a593Smuzhiyun 			bnx2x_pf_disable(bp);
3087*4882a593Smuzhiyun 
3088*4882a593Smuzhiyun 		/* Disable HW interrupts, NAPI */
3089*4882a593Smuzhiyun 		bnx2x_netif_stop(bp, 1);
3090*4882a593Smuzhiyun 		/* Delete all NAPI objects */
3091*4882a593Smuzhiyun 		bnx2x_del_all_napi(bp);
3092*4882a593Smuzhiyun 		if (CNIC_LOADED(bp))
3093*4882a593Smuzhiyun 			bnx2x_del_all_napi_cnic(bp);
3094*4882a593Smuzhiyun 		/* Release IRQs */
3095*4882a593Smuzhiyun 		bnx2x_free_irq(bp);
3096*4882a593Smuzhiyun 
3097*4882a593Smuzhiyun 		/* Report UNLOAD_DONE to MCP */
3098*4882a593Smuzhiyun 		bnx2x_send_unload_done(bp, false);
3099*4882a593Smuzhiyun 	}
3100*4882a593Smuzhiyun 
3101*4882a593Smuzhiyun 	/*
3102*4882a593Smuzhiyun 	 * At this stage no more interrupts will arrive so we may safely clean
3103*4882a593Smuzhiyun 	 * the queueable objects here in case they failed to get cleaned so far.
3104*4882a593Smuzhiyun 	 */
3105*4882a593Smuzhiyun 	if (IS_PF(bp))
3106*4882a593Smuzhiyun 		bnx2x_squeeze_objects(bp);
3107*4882a593Smuzhiyun 
3108*4882a593Smuzhiyun 	/* There should be no more pending SP commands at this stage */
3109*4882a593Smuzhiyun 	bp->sp_state = 0;
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 	bp->port.pmf = 0;
3112*4882a593Smuzhiyun 
3113*4882a593Smuzhiyun 	/* clear pending work in rtnl task */
3114*4882a593Smuzhiyun 	bp->sp_rtnl_state = 0;
3115*4882a593Smuzhiyun 	smp_mb();
3116*4882a593Smuzhiyun 
3117*4882a593Smuzhiyun 	/* Free SKBs, SGEs, TPA pool and driver internals */
3118*4882a593Smuzhiyun 	bnx2x_free_skbs(bp);
3119*4882a593Smuzhiyun 	if (CNIC_LOADED(bp))
3120*4882a593Smuzhiyun 		bnx2x_free_skbs_cnic(bp);
3121*4882a593Smuzhiyun 	for_each_rx_queue(bp, i)
3122*4882a593Smuzhiyun 		bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3123*4882a593Smuzhiyun 
3124*4882a593Smuzhiyun 	bnx2x_free_fp_mem(bp);
3125*4882a593Smuzhiyun 	if (CNIC_LOADED(bp))
3126*4882a593Smuzhiyun 		bnx2x_free_fp_mem_cnic(bp);
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	if (IS_PF(bp)) {
3129*4882a593Smuzhiyun 		if (CNIC_LOADED(bp))
3130*4882a593Smuzhiyun 			bnx2x_free_mem_cnic(bp);
3131*4882a593Smuzhiyun 	}
3132*4882a593Smuzhiyun 	bnx2x_free_mem(bp);
3133*4882a593Smuzhiyun 
3134*4882a593Smuzhiyun 	bp->state = BNX2X_STATE_CLOSED;
3135*4882a593Smuzhiyun 	bp->cnic_loaded = false;
3136*4882a593Smuzhiyun 
3137*4882a593Smuzhiyun 	/* Clear driver version indication in shmem */
3138*4882a593Smuzhiyun 	if (IS_PF(bp) && !BP_NOMCP(bp))
3139*4882a593Smuzhiyun 		bnx2x_update_mng_version(bp);
3140*4882a593Smuzhiyun 
3141*4882a593Smuzhiyun 	/* Check if there are pending parity attentions. If there are - set
3142*4882a593Smuzhiyun 	 * RECOVERY_IN_PROGRESS.
3143*4882a593Smuzhiyun 	 */
3144*4882a593Smuzhiyun 	if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3145*4882a593Smuzhiyun 		bnx2x_set_reset_in_progress(bp);
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun 		/* Set RESET_IS_GLOBAL if needed */
3148*4882a593Smuzhiyun 		if (global)
3149*4882a593Smuzhiyun 			bnx2x_set_reset_global(bp);
3150*4882a593Smuzhiyun 	}
3151*4882a593Smuzhiyun 
3152*4882a593Smuzhiyun 	/* The last driver must disable a "close the gate" if there is no
3153*4882a593Smuzhiyun 	 * parity attention or "process kill" pending.
3154*4882a593Smuzhiyun 	 */
3155*4882a593Smuzhiyun 	if (IS_PF(bp) &&
3156*4882a593Smuzhiyun 	    !bnx2x_clear_pf_load(bp) &&
3157*4882a593Smuzhiyun 	    bnx2x_reset_is_done(bp, BP_PATH(bp)))
3158*4882a593Smuzhiyun 		bnx2x_disable_close_the_gate(bp);
3159*4882a593Smuzhiyun 
3160*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun 	return 0;
3163*4882a593Smuzhiyun }
3164*4882a593Smuzhiyun 
bnx2x_set_power_state(struct bnx2x * bp,pci_power_t state)3165*4882a593Smuzhiyun int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3166*4882a593Smuzhiyun {
3167*4882a593Smuzhiyun 	u16 pmcsr;
3168*4882a593Smuzhiyun 
3169*4882a593Smuzhiyun 	/* If there is no power capability, silently succeed */
3170*4882a593Smuzhiyun 	if (!bp->pdev->pm_cap) {
3171*4882a593Smuzhiyun 		BNX2X_DEV_INFO("No power capability. Breaking.\n");
3172*4882a593Smuzhiyun 		return 0;
3173*4882a593Smuzhiyun 	}
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3176*4882a593Smuzhiyun 
3177*4882a593Smuzhiyun 	switch (state) {
3178*4882a593Smuzhiyun 	case PCI_D0:
3179*4882a593Smuzhiyun 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3180*4882a593Smuzhiyun 				      ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3181*4882a593Smuzhiyun 				       PCI_PM_CTRL_PME_STATUS));
3182*4882a593Smuzhiyun 
3183*4882a593Smuzhiyun 		if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3184*4882a593Smuzhiyun 			/* delay required during transition out of D3hot */
3185*4882a593Smuzhiyun 			msleep(20);
3186*4882a593Smuzhiyun 		break;
3187*4882a593Smuzhiyun 
3188*4882a593Smuzhiyun 	case PCI_D3hot:
3189*4882a593Smuzhiyun 		/* If there are other clients above don't
3190*4882a593Smuzhiyun 		   shut down the power */
3191*4882a593Smuzhiyun 		if (atomic_read(&bp->pdev->enable_cnt) != 1)
3192*4882a593Smuzhiyun 			return 0;
3193*4882a593Smuzhiyun 		/* Don't shut down the power for emulation and FPGA */
3194*4882a593Smuzhiyun 		if (CHIP_REV_IS_SLOW(bp))
3195*4882a593Smuzhiyun 			return 0;
3196*4882a593Smuzhiyun 
3197*4882a593Smuzhiyun 		pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3198*4882a593Smuzhiyun 		pmcsr |= 3;
3199*4882a593Smuzhiyun 
3200*4882a593Smuzhiyun 		if (bp->wol)
3201*4882a593Smuzhiyun 			pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 		pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3204*4882a593Smuzhiyun 				      pmcsr);
3205*4882a593Smuzhiyun 
3206*4882a593Smuzhiyun 		/* No more memory access after this point until
3207*4882a593Smuzhiyun 		* device is brought back to D0.
3208*4882a593Smuzhiyun 		*/
3209*4882a593Smuzhiyun 		break;
3210*4882a593Smuzhiyun 
3211*4882a593Smuzhiyun 	default:
3212*4882a593Smuzhiyun 		dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3213*4882a593Smuzhiyun 		return -EINVAL;
3214*4882a593Smuzhiyun 	}
3215*4882a593Smuzhiyun 	return 0;
3216*4882a593Smuzhiyun }
3217*4882a593Smuzhiyun 
3218*4882a593Smuzhiyun /*
3219*4882a593Smuzhiyun  * net_device service functions
3220*4882a593Smuzhiyun  */
bnx2x_poll(struct napi_struct * napi,int budget)3221*4882a593Smuzhiyun static int bnx2x_poll(struct napi_struct *napi, int budget)
3222*4882a593Smuzhiyun {
3223*4882a593Smuzhiyun 	struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3224*4882a593Smuzhiyun 						 napi);
3225*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
3226*4882a593Smuzhiyun 	int rx_work_done;
3227*4882a593Smuzhiyun 	u8 cos;
3228*4882a593Smuzhiyun 
3229*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
3230*4882a593Smuzhiyun 	if (unlikely(bp->panic)) {
3231*4882a593Smuzhiyun 		napi_complete(napi);
3232*4882a593Smuzhiyun 		return 0;
3233*4882a593Smuzhiyun 	}
3234*4882a593Smuzhiyun #endif
3235*4882a593Smuzhiyun 	for_each_cos_in_tx_queue(fp, cos)
3236*4882a593Smuzhiyun 		if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3237*4882a593Smuzhiyun 			bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	rx_work_done = (bnx2x_has_rx_work(fp)) ? bnx2x_rx_int(fp, budget) : 0;
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 	if (rx_work_done < budget) {
3242*4882a593Smuzhiyun 		/* No need to update SB for FCoE L2 ring as long as
3243*4882a593Smuzhiyun 		 * it's connected to the default SB and the SB
3244*4882a593Smuzhiyun 		 * has been updated when NAPI was scheduled.
3245*4882a593Smuzhiyun 		 */
3246*4882a593Smuzhiyun 		if (IS_FCOE_FP(fp)) {
3247*4882a593Smuzhiyun 			napi_complete_done(napi, rx_work_done);
3248*4882a593Smuzhiyun 		} else {
3249*4882a593Smuzhiyun 			bnx2x_update_fpsb_idx(fp);
3250*4882a593Smuzhiyun 			/* bnx2x_has_rx_work() reads the status block,
3251*4882a593Smuzhiyun 			 * thus we need to ensure that status block indices
3252*4882a593Smuzhiyun 			 * have been actually read (bnx2x_update_fpsb_idx)
3253*4882a593Smuzhiyun 			 * prior to this check (bnx2x_has_rx_work) so that
3254*4882a593Smuzhiyun 			 * we won't write the "newer" value of the status block
3255*4882a593Smuzhiyun 			 * to IGU (if there was a DMA right after
3256*4882a593Smuzhiyun 			 * bnx2x_has_rx_work and if there is no rmb, the memory
3257*4882a593Smuzhiyun 			 * reading (bnx2x_update_fpsb_idx) may be postponed
3258*4882a593Smuzhiyun 			 * to right before bnx2x_ack_sb). In this case there
3259*4882a593Smuzhiyun 			 * will never be another interrupt until there is
3260*4882a593Smuzhiyun 			 * another update of the status block, while there
3261*4882a593Smuzhiyun 			 * is still unhandled work.
3262*4882a593Smuzhiyun 			 */
3263*4882a593Smuzhiyun 			rmb();
3264*4882a593Smuzhiyun 
3265*4882a593Smuzhiyun 			if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3266*4882a593Smuzhiyun 				if (napi_complete_done(napi, rx_work_done)) {
3267*4882a593Smuzhiyun 					/* Re-enable interrupts */
3268*4882a593Smuzhiyun 					DP(NETIF_MSG_RX_STATUS,
3269*4882a593Smuzhiyun 					   "Update index to %d\n", fp->fp_hc_idx);
3270*4882a593Smuzhiyun 					bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3271*4882a593Smuzhiyun 						     le16_to_cpu(fp->fp_hc_idx),
3272*4882a593Smuzhiyun 						     IGU_INT_ENABLE, 1);
3273*4882a593Smuzhiyun 				}
3274*4882a593Smuzhiyun 			} else {
3275*4882a593Smuzhiyun 				rx_work_done = budget;
3276*4882a593Smuzhiyun 			}
3277*4882a593Smuzhiyun 		}
3278*4882a593Smuzhiyun 	}
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	return rx_work_done;
3281*4882a593Smuzhiyun }
3282*4882a593Smuzhiyun 
3283*4882a593Smuzhiyun /* we split the first BD into headers and data BDs
3284*4882a593Smuzhiyun  * to ease the pain of our fellow microcode engineers
3285*4882a593Smuzhiyun  * we use one mapping for both BDs
3286*4882a593Smuzhiyun  */
bnx2x_tx_split(struct bnx2x * bp,struct bnx2x_fp_txdata * txdata,struct sw_tx_bd * tx_buf,struct eth_tx_start_bd ** tx_bd,u16 hlen,u16 bd_prod)3287*4882a593Smuzhiyun static u16 bnx2x_tx_split(struct bnx2x *bp,
3288*4882a593Smuzhiyun 			  struct bnx2x_fp_txdata *txdata,
3289*4882a593Smuzhiyun 			  struct sw_tx_bd *tx_buf,
3290*4882a593Smuzhiyun 			  struct eth_tx_start_bd **tx_bd, u16 hlen,
3291*4882a593Smuzhiyun 			  u16 bd_prod)
3292*4882a593Smuzhiyun {
3293*4882a593Smuzhiyun 	struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3294*4882a593Smuzhiyun 	struct eth_tx_bd *d_tx_bd;
3295*4882a593Smuzhiyun 	dma_addr_t mapping;
3296*4882a593Smuzhiyun 	int old_len = le16_to_cpu(h_tx_bd->nbytes);
3297*4882a593Smuzhiyun 
3298*4882a593Smuzhiyun 	/* first fix first BD */
3299*4882a593Smuzhiyun 	h_tx_bd->nbytes = cpu_to_le16(hlen);
3300*4882a593Smuzhiyun 
3301*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED,	"TSO split header size is %d (%x:%x)\n",
3302*4882a593Smuzhiyun 	   h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3303*4882a593Smuzhiyun 
3304*4882a593Smuzhiyun 	/* now get a new data BD
3305*4882a593Smuzhiyun 	 * (after the pbd) and fill it */
3306*4882a593Smuzhiyun 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3307*4882a593Smuzhiyun 	d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3308*4882a593Smuzhiyun 
3309*4882a593Smuzhiyun 	mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3310*4882a593Smuzhiyun 			   le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3311*4882a593Smuzhiyun 
3312*4882a593Smuzhiyun 	d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3313*4882a593Smuzhiyun 	d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3314*4882a593Smuzhiyun 	d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3315*4882a593Smuzhiyun 
3316*4882a593Smuzhiyun 	/* this marks the BD as one that has no individual mapping */
3317*4882a593Smuzhiyun 	tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3318*4882a593Smuzhiyun 
3319*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED,
3320*4882a593Smuzhiyun 	   "TSO split data size is %d (%x:%x)\n",
3321*4882a593Smuzhiyun 	   d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3322*4882a593Smuzhiyun 
3323*4882a593Smuzhiyun 	/* update tx_bd */
3324*4882a593Smuzhiyun 	*tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun 	return bd_prod;
3327*4882a593Smuzhiyun }
3328*4882a593Smuzhiyun 
3329*4882a593Smuzhiyun #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3330*4882a593Smuzhiyun #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
bnx2x_csum_fix(unsigned char * t_header,u16 csum,s8 fix)3331*4882a593Smuzhiyun static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3332*4882a593Smuzhiyun {
3333*4882a593Smuzhiyun 	__sum16 tsum = (__force __sum16) csum;
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 	if (fix > 0)
3336*4882a593Smuzhiyun 		tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3337*4882a593Smuzhiyun 				  csum_partial(t_header - fix, fix, 0)));
3338*4882a593Smuzhiyun 
3339*4882a593Smuzhiyun 	else if (fix < 0)
3340*4882a593Smuzhiyun 		tsum = ~csum_fold(csum_add((__force __wsum) csum,
3341*4882a593Smuzhiyun 				  csum_partial(t_header, -fix, 0)));
3342*4882a593Smuzhiyun 
3343*4882a593Smuzhiyun 	return bswab16(tsum);
3344*4882a593Smuzhiyun }
3345*4882a593Smuzhiyun 
bnx2x_xmit_type(struct bnx2x * bp,struct sk_buff * skb)3346*4882a593Smuzhiyun static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3347*4882a593Smuzhiyun {
3348*4882a593Smuzhiyun 	u32 rc;
3349*4882a593Smuzhiyun 	__u8 prot = 0;
3350*4882a593Smuzhiyun 	__be16 protocol;
3351*4882a593Smuzhiyun 
3352*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL)
3353*4882a593Smuzhiyun 		return XMIT_PLAIN;
3354*4882a593Smuzhiyun 
3355*4882a593Smuzhiyun 	protocol = vlan_get_protocol(skb);
3356*4882a593Smuzhiyun 	if (protocol == htons(ETH_P_IPV6)) {
3357*4882a593Smuzhiyun 		rc = XMIT_CSUM_V6;
3358*4882a593Smuzhiyun 		prot = ipv6_hdr(skb)->nexthdr;
3359*4882a593Smuzhiyun 	} else {
3360*4882a593Smuzhiyun 		rc = XMIT_CSUM_V4;
3361*4882a593Smuzhiyun 		prot = ip_hdr(skb)->protocol;
3362*4882a593Smuzhiyun 	}
3363*4882a593Smuzhiyun 
3364*4882a593Smuzhiyun 	if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3365*4882a593Smuzhiyun 		if (inner_ip_hdr(skb)->version == 6) {
3366*4882a593Smuzhiyun 			rc |= XMIT_CSUM_ENC_V6;
3367*4882a593Smuzhiyun 			if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3368*4882a593Smuzhiyun 				rc |= XMIT_CSUM_TCP;
3369*4882a593Smuzhiyun 		} else {
3370*4882a593Smuzhiyun 			rc |= XMIT_CSUM_ENC_V4;
3371*4882a593Smuzhiyun 			if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3372*4882a593Smuzhiyun 				rc |= XMIT_CSUM_TCP;
3373*4882a593Smuzhiyun 		}
3374*4882a593Smuzhiyun 	}
3375*4882a593Smuzhiyun 	if (prot == IPPROTO_TCP)
3376*4882a593Smuzhiyun 		rc |= XMIT_CSUM_TCP;
3377*4882a593Smuzhiyun 
3378*4882a593Smuzhiyun 	if (skb_is_gso(skb)) {
3379*4882a593Smuzhiyun 		if (skb_is_gso_v6(skb)) {
3380*4882a593Smuzhiyun 			rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3381*4882a593Smuzhiyun 			if (rc & XMIT_CSUM_ENC)
3382*4882a593Smuzhiyun 				rc |= XMIT_GSO_ENC_V6;
3383*4882a593Smuzhiyun 		} else {
3384*4882a593Smuzhiyun 			rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3385*4882a593Smuzhiyun 			if (rc & XMIT_CSUM_ENC)
3386*4882a593Smuzhiyun 				rc |= XMIT_GSO_ENC_V4;
3387*4882a593Smuzhiyun 		}
3388*4882a593Smuzhiyun 	}
3389*4882a593Smuzhiyun 
3390*4882a593Smuzhiyun 	return rc;
3391*4882a593Smuzhiyun }
3392*4882a593Smuzhiyun 
3393*4882a593Smuzhiyun /* VXLAN: 4 = 1 (for linear data BD) + 3 (2 for PBD and last BD) */
3394*4882a593Smuzhiyun #define BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS         4
3395*4882a593Smuzhiyun 
3396*4882a593Smuzhiyun /* Regular: 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3397*4882a593Smuzhiyun #define BNX2X_NUM_TSO_WIN_SUB_BDS               3
3398*4882a593Smuzhiyun 
3399*4882a593Smuzhiyun #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3400*4882a593Smuzhiyun /* check if packet requires linearization (packet is too fragmented)
3401*4882a593Smuzhiyun    no need to check fragmentation if page size > 8K (there will be no
3402*4882a593Smuzhiyun    violation to FW restrictions) */
bnx2x_pkt_req_lin(struct bnx2x * bp,struct sk_buff * skb,u32 xmit_type)3403*4882a593Smuzhiyun static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3404*4882a593Smuzhiyun 			     u32 xmit_type)
3405*4882a593Smuzhiyun {
3406*4882a593Smuzhiyun 	int first_bd_sz = 0, num_tso_win_sub = BNX2X_NUM_TSO_WIN_SUB_BDS;
3407*4882a593Smuzhiyun 	int to_copy = 0, hlen = 0;
3408*4882a593Smuzhiyun 
3409*4882a593Smuzhiyun 	if (xmit_type & XMIT_GSO_ENC)
3410*4882a593Smuzhiyun 		num_tso_win_sub = BNX2X_NUM_VXLAN_TSO_WIN_SUB_BDS;
3411*4882a593Smuzhiyun 
3412*4882a593Smuzhiyun 	if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - num_tso_win_sub)) {
3413*4882a593Smuzhiyun 		if (xmit_type & XMIT_GSO) {
3414*4882a593Smuzhiyun 			unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3415*4882a593Smuzhiyun 			int wnd_size = MAX_FETCH_BD - num_tso_win_sub;
3416*4882a593Smuzhiyun 			/* Number of windows to check */
3417*4882a593Smuzhiyun 			int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3418*4882a593Smuzhiyun 			int wnd_idx = 0;
3419*4882a593Smuzhiyun 			int frag_idx = 0;
3420*4882a593Smuzhiyun 			u32 wnd_sum = 0;
3421*4882a593Smuzhiyun 
3422*4882a593Smuzhiyun 			/* Headers length */
3423*4882a593Smuzhiyun 			if (xmit_type & XMIT_GSO_ENC)
3424*4882a593Smuzhiyun 				hlen = (int)(skb_inner_transport_header(skb) -
3425*4882a593Smuzhiyun 					     skb->data) +
3426*4882a593Smuzhiyun 					     inner_tcp_hdrlen(skb);
3427*4882a593Smuzhiyun 			else
3428*4882a593Smuzhiyun 				hlen = (int)(skb_transport_header(skb) -
3429*4882a593Smuzhiyun 					     skb->data) + tcp_hdrlen(skb);
3430*4882a593Smuzhiyun 
3431*4882a593Smuzhiyun 			/* Amount of data (w/o headers) on linear part of SKB*/
3432*4882a593Smuzhiyun 			first_bd_sz = skb_headlen(skb) - hlen;
3433*4882a593Smuzhiyun 
3434*4882a593Smuzhiyun 			wnd_sum  = first_bd_sz;
3435*4882a593Smuzhiyun 
3436*4882a593Smuzhiyun 			/* Calculate the first sum - it's special */
3437*4882a593Smuzhiyun 			for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3438*4882a593Smuzhiyun 				wnd_sum +=
3439*4882a593Smuzhiyun 					skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3440*4882a593Smuzhiyun 
3441*4882a593Smuzhiyun 			/* If there was data on linear skb data - check it */
3442*4882a593Smuzhiyun 			if (first_bd_sz > 0) {
3443*4882a593Smuzhiyun 				if (unlikely(wnd_sum < lso_mss)) {
3444*4882a593Smuzhiyun 					to_copy = 1;
3445*4882a593Smuzhiyun 					goto exit_lbl;
3446*4882a593Smuzhiyun 				}
3447*4882a593Smuzhiyun 
3448*4882a593Smuzhiyun 				wnd_sum -= first_bd_sz;
3449*4882a593Smuzhiyun 			}
3450*4882a593Smuzhiyun 
3451*4882a593Smuzhiyun 			/* Others are easier: run through the frag list and
3452*4882a593Smuzhiyun 			   check all windows */
3453*4882a593Smuzhiyun 			for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3454*4882a593Smuzhiyun 				wnd_sum +=
3455*4882a593Smuzhiyun 			  skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3456*4882a593Smuzhiyun 
3457*4882a593Smuzhiyun 				if (unlikely(wnd_sum < lso_mss)) {
3458*4882a593Smuzhiyun 					to_copy = 1;
3459*4882a593Smuzhiyun 					break;
3460*4882a593Smuzhiyun 				}
3461*4882a593Smuzhiyun 				wnd_sum -=
3462*4882a593Smuzhiyun 					skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3463*4882a593Smuzhiyun 			}
3464*4882a593Smuzhiyun 		} else {
3465*4882a593Smuzhiyun 			/* in non-LSO too fragmented packet should always
3466*4882a593Smuzhiyun 			   be linearized */
3467*4882a593Smuzhiyun 			to_copy = 1;
3468*4882a593Smuzhiyun 		}
3469*4882a593Smuzhiyun 	}
3470*4882a593Smuzhiyun 
3471*4882a593Smuzhiyun exit_lbl:
3472*4882a593Smuzhiyun 	if (unlikely(to_copy))
3473*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
3474*4882a593Smuzhiyun 		   "Linearization IS REQUIRED for %s packet. num_frags %d  hlen %d  first_bd_sz %d\n",
3475*4882a593Smuzhiyun 		   (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3476*4882a593Smuzhiyun 		   skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3477*4882a593Smuzhiyun 
3478*4882a593Smuzhiyun 	return to_copy;
3479*4882a593Smuzhiyun }
3480*4882a593Smuzhiyun #endif
3481*4882a593Smuzhiyun 
3482*4882a593Smuzhiyun /**
3483*4882a593Smuzhiyun  * bnx2x_set_pbd_gso - update PBD in GSO case.
3484*4882a593Smuzhiyun  *
3485*4882a593Smuzhiyun  * @skb:	packet skb
3486*4882a593Smuzhiyun  * @pbd:	parse BD
3487*4882a593Smuzhiyun  * @xmit_type:	xmit flags
3488*4882a593Smuzhiyun  */
bnx2x_set_pbd_gso(struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)3489*4882a593Smuzhiyun static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3490*4882a593Smuzhiyun 			      struct eth_tx_parse_bd_e1x *pbd,
3491*4882a593Smuzhiyun 			      u32 xmit_type)
3492*4882a593Smuzhiyun {
3493*4882a593Smuzhiyun 	pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3494*4882a593Smuzhiyun 	pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3495*4882a593Smuzhiyun 	pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3496*4882a593Smuzhiyun 
3497*4882a593Smuzhiyun 	if (xmit_type & XMIT_GSO_V4) {
3498*4882a593Smuzhiyun 		pbd->ip_id = bswab16(ip_hdr(skb)->id);
3499*4882a593Smuzhiyun 		pbd->tcp_pseudo_csum =
3500*4882a593Smuzhiyun 			bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3501*4882a593Smuzhiyun 						   ip_hdr(skb)->daddr,
3502*4882a593Smuzhiyun 						   0, IPPROTO_TCP, 0));
3503*4882a593Smuzhiyun 	} else {
3504*4882a593Smuzhiyun 		pbd->tcp_pseudo_csum =
3505*4882a593Smuzhiyun 			bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3506*4882a593Smuzhiyun 						 &ipv6_hdr(skb)->daddr,
3507*4882a593Smuzhiyun 						 0, IPPROTO_TCP, 0));
3508*4882a593Smuzhiyun 	}
3509*4882a593Smuzhiyun 
3510*4882a593Smuzhiyun 	pbd->global_data |=
3511*4882a593Smuzhiyun 		cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3512*4882a593Smuzhiyun }
3513*4882a593Smuzhiyun 
3514*4882a593Smuzhiyun /**
3515*4882a593Smuzhiyun  * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3516*4882a593Smuzhiyun  *
3517*4882a593Smuzhiyun  * @bp:			driver handle
3518*4882a593Smuzhiyun  * @skb:		packet skb
3519*4882a593Smuzhiyun  * @parsing_data:	data to be updated
3520*4882a593Smuzhiyun  * @xmit_type:		xmit flags
3521*4882a593Smuzhiyun  *
3522*4882a593Smuzhiyun  * 57712/578xx related, when skb has encapsulation
3523*4882a593Smuzhiyun  */
bnx2x_set_pbd_csum_enc(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3524*4882a593Smuzhiyun static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3525*4882a593Smuzhiyun 				 u32 *parsing_data, u32 xmit_type)
3526*4882a593Smuzhiyun {
3527*4882a593Smuzhiyun 	*parsing_data |=
3528*4882a593Smuzhiyun 		((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3529*4882a593Smuzhiyun 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3530*4882a593Smuzhiyun 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3531*4882a593Smuzhiyun 
3532*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM_TCP) {
3533*4882a593Smuzhiyun 		*parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3534*4882a593Smuzhiyun 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3535*4882a593Smuzhiyun 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3536*4882a593Smuzhiyun 
3537*4882a593Smuzhiyun 		return skb_inner_transport_header(skb) +
3538*4882a593Smuzhiyun 			inner_tcp_hdrlen(skb) - skb->data;
3539*4882a593Smuzhiyun 	}
3540*4882a593Smuzhiyun 
3541*4882a593Smuzhiyun 	/* We support checksum offload for TCP and UDP only.
3542*4882a593Smuzhiyun 	 * No need to pass the UDP header length - it's a constant.
3543*4882a593Smuzhiyun 	 */
3544*4882a593Smuzhiyun 	return skb_inner_transport_header(skb) +
3545*4882a593Smuzhiyun 		sizeof(struct udphdr) - skb->data;
3546*4882a593Smuzhiyun }
3547*4882a593Smuzhiyun 
3548*4882a593Smuzhiyun /**
3549*4882a593Smuzhiyun  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3550*4882a593Smuzhiyun  *
3551*4882a593Smuzhiyun  * @bp:			driver handle
3552*4882a593Smuzhiyun  * @skb:		packet skb
3553*4882a593Smuzhiyun  * @parsing_data:	data to be updated
3554*4882a593Smuzhiyun  * @xmit_type:		xmit flags
3555*4882a593Smuzhiyun  *
3556*4882a593Smuzhiyun  * 57712/578xx related
3557*4882a593Smuzhiyun  */
bnx2x_set_pbd_csum_e2(struct bnx2x * bp,struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3558*4882a593Smuzhiyun static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3559*4882a593Smuzhiyun 				u32 *parsing_data, u32 xmit_type)
3560*4882a593Smuzhiyun {
3561*4882a593Smuzhiyun 	*parsing_data |=
3562*4882a593Smuzhiyun 		((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3563*4882a593Smuzhiyun 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3564*4882a593Smuzhiyun 		ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3565*4882a593Smuzhiyun 
3566*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM_TCP) {
3567*4882a593Smuzhiyun 		*parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3568*4882a593Smuzhiyun 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3569*4882a593Smuzhiyun 			ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3570*4882a593Smuzhiyun 
3571*4882a593Smuzhiyun 		return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3572*4882a593Smuzhiyun 	}
3573*4882a593Smuzhiyun 	/* We support checksum offload for TCP and UDP only.
3574*4882a593Smuzhiyun 	 * No need to pass the UDP header length - it's a constant.
3575*4882a593Smuzhiyun 	 */
3576*4882a593Smuzhiyun 	return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3577*4882a593Smuzhiyun }
3578*4882a593Smuzhiyun 
3579*4882a593Smuzhiyun /* set FW indication according to inner or outer protocols if tunneled */
bnx2x_set_sbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_start_bd * tx_start_bd,u32 xmit_type)3580*4882a593Smuzhiyun static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3581*4882a593Smuzhiyun 			       struct eth_tx_start_bd *tx_start_bd,
3582*4882a593Smuzhiyun 			       u32 xmit_type)
3583*4882a593Smuzhiyun {
3584*4882a593Smuzhiyun 	tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3585*4882a593Smuzhiyun 
3586*4882a593Smuzhiyun 	if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3587*4882a593Smuzhiyun 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3588*4882a593Smuzhiyun 
3589*4882a593Smuzhiyun 	if (!(xmit_type & XMIT_CSUM_TCP))
3590*4882a593Smuzhiyun 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3591*4882a593Smuzhiyun }
3592*4882a593Smuzhiyun 
3593*4882a593Smuzhiyun /**
3594*4882a593Smuzhiyun  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3595*4882a593Smuzhiyun  *
3596*4882a593Smuzhiyun  * @bp:		driver handle
3597*4882a593Smuzhiyun  * @skb:	packet skb
3598*4882a593Smuzhiyun  * @pbd:	parse BD to be updated
3599*4882a593Smuzhiyun  * @xmit_type:	xmit flags
3600*4882a593Smuzhiyun  */
bnx2x_set_pbd_csum(struct bnx2x * bp,struct sk_buff * skb,struct eth_tx_parse_bd_e1x * pbd,u32 xmit_type)3601*4882a593Smuzhiyun static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3602*4882a593Smuzhiyun 			     struct eth_tx_parse_bd_e1x *pbd,
3603*4882a593Smuzhiyun 			     u32 xmit_type)
3604*4882a593Smuzhiyun {
3605*4882a593Smuzhiyun 	u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3606*4882a593Smuzhiyun 
3607*4882a593Smuzhiyun 	/* for now NS flag is not used in Linux */
3608*4882a593Smuzhiyun 	pbd->global_data =
3609*4882a593Smuzhiyun 		cpu_to_le16(hlen |
3610*4882a593Smuzhiyun 			    ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3611*4882a593Smuzhiyun 			     ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3612*4882a593Smuzhiyun 
3613*4882a593Smuzhiyun 	pbd->ip_hlen_w = (skb_transport_header(skb) -
3614*4882a593Smuzhiyun 			skb_network_header(skb)) >> 1;
3615*4882a593Smuzhiyun 
3616*4882a593Smuzhiyun 	hlen += pbd->ip_hlen_w;
3617*4882a593Smuzhiyun 
3618*4882a593Smuzhiyun 	/* We support checksum offload for TCP and UDP only */
3619*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM_TCP)
3620*4882a593Smuzhiyun 		hlen += tcp_hdrlen(skb) / 2;
3621*4882a593Smuzhiyun 	else
3622*4882a593Smuzhiyun 		hlen += sizeof(struct udphdr) / 2;
3623*4882a593Smuzhiyun 
3624*4882a593Smuzhiyun 	pbd->total_hlen_w = cpu_to_le16(hlen);
3625*4882a593Smuzhiyun 	hlen = hlen*2;
3626*4882a593Smuzhiyun 
3627*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM_TCP) {
3628*4882a593Smuzhiyun 		pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3629*4882a593Smuzhiyun 
3630*4882a593Smuzhiyun 	} else {
3631*4882a593Smuzhiyun 		s8 fix = SKB_CS_OFF(skb); /* signed! */
3632*4882a593Smuzhiyun 
3633*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
3634*4882a593Smuzhiyun 		   "hlen %d  fix %d  csum before fix %x\n",
3635*4882a593Smuzhiyun 		   le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3636*4882a593Smuzhiyun 
3637*4882a593Smuzhiyun 		/* HW bug: fixup the CSUM */
3638*4882a593Smuzhiyun 		pbd->tcp_pseudo_csum =
3639*4882a593Smuzhiyun 			bnx2x_csum_fix(skb_transport_header(skb),
3640*4882a593Smuzhiyun 				       SKB_CS(skb), fix);
3641*4882a593Smuzhiyun 
3642*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3643*4882a593Smuzhiyun 		   pbd->tcp_pseudo_csum);
3644*4882a593Smuzhiyun 	}
3645*4882a593Smuzhiyun 
3646*4882a593Smuzhiyun 	return hlen;
3647*4882a593Smuzhiyun }
3648*4882a593Smuzhiyun 
bnx2x_update_pbds_gso_enc(struct sk_buff * skb,struct eth_tx_parse_bd_e2 * pbd_e2,struct eth_tx_parse_2nd_bd * pbd2,u16 * global_data,u32 xmit_type)3649*4882a593Smuzhiyun static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3650*4882a593Smuzhiyun 				      struct eth_tx_parse_bd_e2 *pbd_e2,
3651*4882a593Smuzhiyun 				      struct eth_tx_parse_2nd_bd *pbd2,
3652*4882a593Smuzhiyun 				      u16 *global_data,
3653*4882a593Smuzhiyun 				      u32 xmit_type)
3654*4882a593Smuzhiyun {
3655*4882a593Smuzhiyun 	u16 hlen_w = 0;
3656*4882a593Smuzhiyun 	u8 outerip_off, outerip_len = 0;
3657*4882a593Smuzhiyun 
3658*4882a593Smuzhiyun 	/* from outer IP to transport */
3659*4882a593Smuzhiyun 	hlen_w = (skb_inner_transport_header(skb) -
3660*4882a593Smuzhiyun 		  skb_network_header(skb)) >> 1;
3661*4882a593Smuzhiyun 
3662*4882a593Smuzhiyun 	/* transport len */
3663*4882a593Smuzhiyun 	hlen_w += inner_tcp_hdrlen(skb) >> 1;
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3666*4882a593Smuzhiyun 
3667*4882a593Smuzhiyun 	/* outer IP header info */
3668*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM_V4) {
3669*4882a593Smuzhiyun 		struct iphdr *iph = ip_hdr(skb);
3670*4882a593Smuzhiyun 		u32 csum = (__force u32)(~iph->check) -
3671*4882a593Smuzhiyun 			   (__force u32)iph->tot_len -
3672*4882a593Smuzhiyun 			   (__force u32)iph->frag_off;
3673*4882a593Smuzhiyun 
3674*4882a593Smuzhiyun 		outerip_len = iph->ihl << 1;
3675*4882a593Smuzhiyun 
3676*4882a593Smuzhiyun 		pbd2->fw_ip_csum_wo_len_flags_frag =
3677*4882a593Smuzhiyun 			bswab16(csum_fold((__force __wsum)csum));
3678*4882a593Smuzhiyun 	} else {
3679*4882a593Smuzhiyun 		pbd2->fw_ip_hdr_to_payload_w =
3680*4882a593Smuzhiyun 			hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3681*4882a593Smuzhiyun 		pbd_e2->data.tunnel_data.flags |=
3682*4882a593Smuzhiyun 			ETH_TUNNEL_DATA_IPV6_OUTER;
3683*4882a593Smuzhiyun 	}
3684*4882a593Smuzhiyun 
3685*4882a593Smuzhiyun 	pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3686*4882a593Smuzhiyun 
3687*4882a593Smuzhiyun 	pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 	/* inner IP header info */
3690*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM_ENC_V4) {
3691*4882a593Smuzhiyun 		pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3692*4882a593Smuzhiyun 
3693*4882a593Smuzhiyun 		pbd_e2->data.tunnel_data.pseudo_csum =
3694*4882a593Smuzhiyun 			bswab16(~csum_tcpudp_magic(
3695*4882a593Smuzhiyun 					inner_ip_hdr(skb)->saddr,
3696*4882a593Smuzhiyun 					inner_ip_hdr(skb)->daddr,
3697*4882a593Smuzhiyun 					0, IPPROTO_TCP, 0));
3698*4882a593Smuzhiyun 	} else {
3699*4882a593Smuzhiyun 		pbd_e2->data.tunnel_data.pseudo_csum =
3700*4882a593Smuzhiyun 			bswab16(~csum_ipv6_magic(
3701*4882a593Smuzhiyun 					&inner_ipv6_hdr(skb)->saddr,
3702*4882a593Smuzhiyun 					&inner_ipv6_hdr(skb)->daddr,
3703*4882a593Smuzhiyun 					0, IPPROTO_TCP, 0));
3704*4882a593Smuzhiyun 	}
3705*4882a593Smuzhiyun 
3706*4882a593Smuzhiyun 	outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3707*4882a593Smuzhiyun 
3708*4882a593Smuzhiyun 	*global_data |=
3709*4882a593Smuzhiyun 		outerip_off |
3710*4882a593Smuzhiyun 		(outerip_len <<
3711*4882a593Smuzhiyun 			ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3712*4882a593Smuzhiyun 		((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3713*4882a593Smuzhiyun 			ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3714*4882a593Smuzhiyun 
3715*4882a593Smuzhiyun 	if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3716*4882a593Smuzhiyun 		SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3717*4882a593Smuzhiyun 		pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3718*4882a593Smuzhiyun 	}
3719*4882a593Smuzhiyun }
3720*4882a593Smuzhiyun 
bnx2x_set_ipv6_ext_e2(struct sk_buff * skb,u32 * parsing_data,u32 xmit_type)3721*4882a593Smuzhiyun static inline void bnx2x_set_ipv6_ext_e2(struct sk_buff *skb, u32 *parsing_data,
3722*4882a593Smuzhiyun 					 u32 xmit_type)
3723*4882a593Smuzhiyun {
3724*4882a593Smuzhiyun 	struct ipv6hdr *ipv6;
3725*4882a593Smuzhiyun 
3726*4882a593Smuzhiyun 	if (!(xmit_type & (XMIT_GSO_ENC_V6 | XMIT_GSO_V6)))
3727*4882a593Smuzhiyun 		return;
3728*4882a593Smuzhiyun 
3729*4882a593Smuzhiyun 	if (xmit_type & XMIT_GSO_ENC_V6)
3730*4882a593Smuzhiyun 		ipv6 = inner_ipv6_hdr(skb);
3731*4882a593Smuzhiyun 	else /* XMIT_GSO_V6 */
3732*4882a593Smuzhiyun 		ipv6 = ipv6_hdr(skb);
3733*4882a593Smuzhiyun 
3734*4882a593Smuzhiyun 	if (ipv6->nexthdr == NEXTHDR_IPV6)
3735*4882a593Smuzhiyun 		*parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3736*4882a593Smuzhiyun }
3737*4882a593Smuzhiyun 
3738*4882a593Smuzhiyun /* called with netif_tx_lock
3739*4882a593Smuzhiyun  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3740*4882a593Smuzhiyun  * netif_wake_queue()
3741*4882a593Smuzhiyun  */
bnx2x_start_xmit(struct sk_buff * skb,struct net_device * dev)3742*4882a593Smuzhiyun netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3743*4882a593Smuzhiyun {
3744*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
3745*4882a593Smuzhiyun 
3746*4882a593Smuzhiyun 	struct netdev_queue *txq;
3747*4882a593Smuzhiyun 	struct bnx2x_fp_txdata *txdata;
3748*4882a593Smuzhiyun 	struct sw_tx_bd *tx_buf;
3749*4882a593Smuzhiyun 	struct eth_tx_start_bd *tx_start_bd, *first_bd;
3750*4882a593Smuzhiyun 	struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3751*4882a593Smuzhiyun 	struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3752*4882a593Smuzhiyun 	struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3753*4882a593Smuzhiyun 	struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3754*4882a593Smuzhiyun 	u32 pbd_e2_parsing_data = 0;
3755*4882a593Smuzhiyun 	u16 pkt_prod, bd_prod;
3756*4882a593Smuzhiyun 	int nbd, txq_index;
3757*4882a593Smuzhiyun 	dma_addr_t mapping;
3758*4882a593Smuzhiyun 	u32 xmit_type = bnx2x_xmit_type(bp, skb);
3759*4882a593Smuzhiyun 	int i;
3760*4882a593Smuzhiyun 	u8 hlen = 0;
3761*4882a593Smuzhiyun 	__le16 pkt_size = 0;
3762*4882a593Smuzhiyun 	struct ethhdr *eth;
3763*4882a593Smuzhiyun 	u8 mac_type = UNICAST_ADDRESS;
3764*4882a593Smuzhiyun 
3765*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
3766*4882a593Smuzhiyun 	if (unlikely(bp->panic))
3767*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3768*4882a593Smuzhiyun #endif
3769*4882a593Smuzhiyun 
3770*4882a593Smuzhiyun 	txq_index = skb_get_queue_mapping(skb);
3771*4882a593Smuzhiyun 	txq = netdev_get_tx_queue(dev, txq_index);
3772*4882a593Smuzhiyun 
3773*4882a593Smuzhiyun 	BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3774*4882a593Smuzhiyun 
3775*4882a593Smuzhiyun 	txdata = &bp->bnx2x_txq[txq_index];
3776*4882a593Smuzhiyun 
3777*4882a593Smuzhiyun 	/* enable this debug print to view the transmission queue being used
3778*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3779*4882a593Smuzhiyun 	   txq_index, fp_index, txdata_index); */
3780*4882a593Smuzhiyun 
3781*4882a593Smuzhiyun 	/* enable this debug print to view the transmission details
3782*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED,
3783*4882a593Smuzhiyun 	   "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3784*4882a593Smuzhiyun 	   txdata->cid, fp_index, txdata_index, txdata, fp); */
3785*4882a593Smuzhiyun 
3786*4882a593Smuzhiyun 	if (unlikely(bnx2x_tx_avail(bp, txdata) <
3787*4882a593Smuzhiyun 			skb_shinfo(skb)->nr_frags +
3788*4882a593Smuzhiyun 			BDS_PER_TX_PKT +
3789*4882a593Smuzhiyun 			NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3790*4882a593Smuzhiyun 		/* Handle special storage cases separately */
3791*4882a593Smuzhiyun 		if (txdata->tx_ring_size == 0) {
3792*4882a593Smuzhiyun 			struct bnx2x_eth_q_stats *q_stats =
3793*4882a593Smuzhiyun 				bnx2x_fp_qstats(bp, txdata->parent_fp);
3794*4882a593Smuzhiyun 			q_stats->driver_filtered_tx_pkt++;
3795*4882a593Smuzhiyun 			dev_kfree_skb(skb);
3796*4882a593Smuzhiyun 			return NETDEV_TX_OK;
3797*4882a593Smuzhiyun 		}
3798*4882a593Smuzhiyun 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3799*4882a593Smuzhiyun 		netif_tx_stop_queue(txq);
3800*4882a593Smuzhiyun 		BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
3803*4882a593Smuzhiyun 	}
3804*4882a593Smuzhiyun 
3805*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED,
3806*4882a593Smuzhiyun 	   "queue[%d]: SKB: summed %x  protocol %x protocol(%x,%x) gso type %x  xmit_type %x len %d\n",
3807*4882a593Smuzhiyun 	   txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3808*4882a593Smuzhiyun 	   ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3809*4882a593Smuzhiyun 	   skb->len);
3810*4882a593Smuzhiyun 
3811*4882a593Smuzhiyun 	eth = (struct ethhdr *)skb->data;
3812*4882a593Smuzhiyun 
3813*4882a593Smuzhiyun 	/* set flag according to packet type (UNICAST_ADDRESS is default)*/
3814*4882a593Smuzhiyun 	if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3815*4882a593Smuzhiyun 		if (is_broadcast_ether_addr(eth->h_dest))
3816*4882a593Smuzhiyun 			mac_type = BROADCAST_ADDRESS;
3817*4882a593Smuzhiyun 		else
3818*4882a593Smuzhiyun 			mac_type = MULTICAST_ADDRESS;
3819*4882a593Smuzhiyun 	}
3820*4882a593Smuzhiyun 
3821*4882a593Smuzhiyun #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3822*4882a593Smuzhiyun 	/* First, check if we need to linearize the skb (due to FW
3823*4882a593Smuzhiyun 	   restrictions). No need to check fragmentation if page size > 8K
3824*4882a593Smuzhiyun 	   (there will be no violation to FW restrictions) */
3825*4882a593Smuzhiyun 	if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3826*4882a593Smuzhiyun 		/* Statistics of linearization */
3827*4882a593Smuzhiyun 		bp->lin_cnt++;
3828*4882a593Smuzhiyun 		if (skb_linearize(skb) != 0) {
3829*4882a593Smuzhiyun 			DP(NETIF_MSG_TX_QUEUED,
3830*4882a593Smuzhiyun 			   "SKB linearization failed - silently dropping this SKB\n");
3831*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
3832*4882a593Smuzhiyun 			return NETDEV_TX_OK;
3833*4882a593Smuzhiyun 		}
3834*4882a593Smuzhiyun 	}
3835*4882a593Smuzhiyun #endif
3836*4882a593Smuzhiyun 	/* Map skb linear data for DMA */
3837*4882a593Smuzhiyun 	mapping = dma_map_single(&bp->pdev->dev, skb->data,
3838*4882a593Smuzhiyun 				 skb_headlen(skb), DMA_TO_DEVICE);
3839*4882a593Smuzhiyun 	if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3840*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
3841*4882a593Smuzhiyun 		   "SKB mapping failed - silently dropping this SKB\n");
3842*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
3843*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3844*4882a593Smuzhiyun 	}
3845*4882a593Smuzhiyun 	/*
3846*4882a593Smuzhiyun 	Please read carefully. First we use one BD which we mark as start,
3847*4882a593Smuzhiyun 	then we have a parsing info BD (used for TSO or xsum),
3848*4882a593Smuzhiyun 	and only then we have the rest of the TSO BDs.
3849*4882a593Smuzhiyun 	(don't forget to mark the last one as last,
3850*4882a593Smuzhiyun 	and to unmap only AFTER you write to the BD ...)
3851*4882a593Smuzhiyun 	And above all, all pdb sizes are in words - NOT DWORDS!
3852*4882a593Smuzhiyun 	*/
3853*4882a593Smuzhiyun 
3854*4882a593Smuzhiyun 	/* get current pkt produced now - advance it just before sending packet
3855*4882a593Smuzhiyun 	 * since mapping of pages may fail and cause packet to be dropped
3856*4882a593Smuzhiyun 	 */
3857*4882a593Smuzhiyun 	pkt_prod = txdata->tx_pkt_prod;
3858*4882a593Smuzhiyun 	bd_prod = TX_BD(txdata->tx_bd_prod);
3859*4882a593Smuzhiyun 
3860*4882a593Smuzhiyun 	/* get a tx_buf and first BD
3861*4882a593Smuzhiyun 	 * tx_start_bd may be changed during SPLIT,
3862*4882a593Smuzhiyun 	 * but first_bd will always stay first
3863*4882a593Smuzhiyun 	 */
3864*4882a593Smuzhiyun 	tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3865*4882a593Smuzhiyun 	tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3866*4882a593Smuzhiyun 	first_bd = tx_start_bd;
3867*4882a593Smuzhiyun 
3868*4882a593Smuzhiyun 	tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3869*4882a593Smuzhiyun 
3870*4882a593Smuzhiyun 	if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3871*4882a593Smuzhiyun 		if (!(bp->flags & TX_TIMESTAMPING_EN)) {
3872*4882a593Smuzhiyun 			bp->eth_stats.ptp_skip_tx_ts++;
3873*4882a593Smuzhiyun 			BNX2X_ERR("Tx timestamping was not enabled, this packet will not be timestamped\n");
3874*4882a593Smuzhiyun 		} else if (bp->ptp_tx_skb) {
3875*4882a593Smuzhiyun 			bp->eth_stats.ptp_skip_tx_ts++;
3876*4882a593Smuzhiyun 			netdev_err_once(bp->dev,
3877*4882a593Smuzhiyun 					"Device supports only a single outstanding packet to timestamp, this packet won't be timestamped\n");
3878*4882a593Smuzhiyun 		} else {
3879*4882a593Smuzhiyun 			skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3880*4882a593Smuzhiyun 			/* schedule check for Tx timestamp */
3881*4882a593Smuzhiyun 			bp->ptp_tx_skb = skb_get(skb);
3882*4882a593Smuzhiyun 			bp->ptp_tx_start = jiffies;
3883*4882a593Smuzhiyun 			schedule_work(&bp->ptp_task);
3884*4882a593Smuzhiyun 		}
3885*4882a593Smuzhiyun 	}
3886*4882a593Smuzhiyun 
3887*4882a593Smuzhiyun 	/* header nbd: indirectly zero other flags! */
3888*4882a593Smuzhiyun 	tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3889*4882a593Smuzhiyun 
3890*4882a593Smuzhiyun 	/* remember the first BD of the packet */
3891*4882a593Smuzhiyun 	tx_buf->first_bd = txdata->tx_bd_prod;
3892*4882a593Smuzhiyun 	tx_buf->skb = skb;
3893*4882a593Smuzhiyun 	tx_buf->flags = 0;
3894*4882a593Smuzhiyun 
3895*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED,
3896*4882a593Smuzhiyun 	   "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
3897*4882a593Smuzhiyun 	   pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3898*4882a593Smuzhiyun 
3899*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
3900*4882a593Smuzhiyun 		tx_start_bd->vlan_or_ethertype =
3901*4882a593Smuzhiyun 		    cpu_to_le16(skb_vlan_tag_get(skb));
3902*4882a593Smuzhiyun 		tx_start_bd->bd_flags.as_bitfield |=
3903*4882a593Smuzhiyun 		    (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3904*4882a593Smuzhiyun 	} else {
3905*4882a593Smuzhiyun 		/* when transmitting in a vf, start bd must hold the ethertype
3906*4882a593Smuzhiyun 		 * for fw to enforce it
3907*4882a593Smuzhiyun 		 */
3908*4882a593Smuzhiyun 		u16 vlan_tci = 0;
3909*4882a593Smuzhiyun #ifndef BNX2X_STOP_ON_ERROR
3910*4882a593Smuzhiyun 		if (IS_VF(bp)) {
3911*4882a593Smuzhiyun #endif
3912*4882a593Smuzhiyun 			/* Still need to consider inband vlan for enforced */
3913*4882a593Smuzhiyun 			if (__vlan_get_tag(skb, &vlan_tci)) {
3914*4882a593Smuzhiyun 				tx_start_bd->vlan_or_ethertype =
3915*4882a593Smuzhiyun 					cpu_to_le16(ntohs(eth->h_proto));
3916*4882a593Smuzhiyun 			} else {
3917*4882a593Smuzhiyun 				tx_start_bd->bd_flags.as_bitfield |=
3918*4882a593Smuzhiyun 					(X_ETH_INBAND_VLAN <<
3919*4882a593Smuzhiyun 					 ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3920*4882a593Smuzhiyun 				tx_start_bd->vlan_or_ethertype =
3921*4882a593Smuzhiyun 					cpu_to_le16(vlan_tci);
3922*4882a593Smuzhiyun 			}
3923*4882a593Smuzhiyun #ifndef BNX2X_STOP_ON_ERROR
3924*4882a593Smuzhiyun 		} else {
3925*4882a593Smuzhiyun 			/* used by FW for packet accounting */
3926*4882a593Smuzhiyun 			tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3927*4882a593Smuzhiyun 		}
3928*4882a593Smuzhiyun #endif
3929*4882a593Smuzhiyun 	}
3930*4882a593Smuzhiyun 
3931*4882a593Smuzhiyun 	nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3932*4882a593Smuzhiyun 
3933*4882a593Smuzhiyun 	/* turn on parsing and get a BD */
3934*4882a593Smuzhiyun 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3935*4882a593Smuzhiyun 
3936*4882a593Smuzhiyun 	if (xmit_type & XMIT_CSUM)
3937*4882a593Smuzhiyun 		bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3938*4882a593Smuzhiyun 
3939*4882a593Smuzhiyun 	if (!CHIP_IS_E1x(bp)) {
3940*4882a593Smuzhiyun 		pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3941*4882a593Smuzhiyun 		memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun 		if (xmit_type & XMIT_CSUM_ENC) {
3944*4882a593Smuzhiyun 			u16 global_data = 0;
3945*4882a593Smuzhiyun 
3946*4882a593Smuzhiyun 			/* Set PBD in enc checksum offload case */
3947*4882a593Smuzhiyun 			hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3948*4882a593Smuzhiyun 						      &pbd_e2_parsing_data,
3949*4882a593Smuzhiyun 						      xmit_type);
3950*4882a593Smuzhiyun 
3951*4882a593Smuzhiyun 			/* turn on 2nd parsing and get a BD */
3952*4882a593Smuzhiyun 			bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3953*4882a593Smuzhiyun 
3954*4882a593Smuzhiyun 			pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3955*4882a593Smuzhiyun 
3956*4882a593Smuzhiyun 			memset(pbd2, 0, sizeof(*pbd2));
3957*4882a593Smuzhiyun 
3958*4882a593Smuzhiyun 			pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3959*4882a593Smuzhiyun 				(skb_inner_network_header(skb) -
3960*4882a593Smuzhiyun 				 skb->data) >> 1;
3961*4882a593Smuzhiyun 
3962*4882a593Smuzhiyun 			if (xmit_type & XMIT_GSO_ENC)
3963*4882a593Smuzhiyun 				bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3964*4882a593Smuzhiyun 							  &global_data,
3965*4882a593Smuzhiyun 							  xmit_type);
3966*4882a593Smuzhiyun 
3967*4882a593Smuzhiyun 			pbd2->global_data = cpu_to_le16(global_data);
3968*4882a593Smuzhiyun 
3969*4882a593Smuzhiyun 			/* add addition parse BD indication to start BD */
3970*4882a593Smuzhiyun 			SET_FLAG(tx_start_bd->general_data,
3971*4882a593Smuzhiyun 				 ETH_TX_START_BD_PARSE_NBDS, 1);
3972*4882a593Smuzhiyun 			/* set encapsulation flag in start BD */
3973*4882a593Smuzhiyun 			SET_FLAG(tx_start_bd->general_data,
3974*4882a593Smuzhiyun 				 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3975*4882a593Smuzhiyun 
3976*4882a593Smuzhiyun 			tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3977*4882a593Smuzhiyun 
3978*4882a593Smuzhiyun 			nbd++;
3979*4882a593Smuzhiyun 		} else if (xmit_type & XMIT_CSUM) {
3980*4882a593Smuzhiyun 			/* Set PBD in checksum offload case w/o encapsulation */
3981*4882a593Smuzhiyun 			hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3982*4882a593Smuzhiyun 						     &pbd_e2_parsing_data,
3983*4882a593Smuzhiyun 						     xmit_type);
3984*4882a593Smuzhiyun 		}
3985*4882a593Smuzhiyun 
3986*4882a593Smuzhiyun 		bnx2x_set_ipv6_ext_e2(skb, &pbd_e2_parsing_data, xmit_type);
3987*4882a593Smuzhiyun 		/* Add the macs to the parsing BD if this is a vf or if
3988*4882a593Smuzhiyun 		 * Tx Switching is enabled.
3989*4882a593Smuzhiyun 		 */
3990*4882a593Smuzhiyun 		if (IS_VF(bp)) {
3991*4882a593Smuzhiyun 			/* override GRE parameters in BD */
3992*4882a593Smuzhiyun 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3993*4882a593Smuzhiyun 					      &pbd_e2->data.mac_addr.src_mid,
3994*4882a593Smuzhiyun 					      &pbd_e2->data.mac_addr.src_lo,
3995*4882a593Smuzhiyun 					      eth->h_source);
3996*4882a593Smuzhiyun 
3997*4882a593Smuzhiyun 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3998*4882a593Smuzhiyun 					      &pbd_e2->data.mac_addr.dst_mid,
3999*4882a593Smuzhiyun 					      &pbd_e2->data.mac_addr.dst_lo,
4000*4882a593Smuzhiyun 					      eth->h_dest);
4001*4882a593Smuzhiyun 		} else {
4002*4882a593Smuzhiyun 			if (bp->flags & TX_SWITCHING)
4003*4882a593Smuzhiyun 				bnx2x_set_fw_mac_addr(
4004*4882a593Smuzhiyun 						&pbd_e2->data.mac_addr.dst_hi,
4005*4882a593Smuzhiyun 						&pbd_e2->data.mac_addr.dst_mid,
4006*4882a593Smuzhiyun 						&pbd_e2->data.mac_addr.dst_lo,
4007*4882a593Smuzhiyun 						eth->h_dest);
4008*4882a593Smuzhiyun #ifdef BNX2X_STOP_ON_ERROR
4009*4882a593Smuzhiyun 			/* Enforce security is always set in Stop on Error -
4010*4882a593Smuzhiyun 			 * source mac should be present in the parsing BD
4011*4882a593Smuzhiyun 			 */
4012*4882a593Smuzhiyun 			bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
4013*4882a593Smuzhiyun 					      &pbd_e2->data.mac_addr.src_mid,
4014*4882a593Smuzhiyun 					      &pbd_e2->data.mac_addr.src_lo,
4015*4882a593Smuzhiyun 					      eth->h_source);
4016*4882a593Smuzhiyun #endif
4017*4882a593Smuzhiyun 		}
4018*4882a593Smuzhiyun 
4019*4882a593Smuzhiyun 		SET_FLAG(pbd_e2_parsing_data,
4020*4882a593Smuzhiyun 			 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
4021*4882a593Smuzhiyun 	} else {
4022*4882a593Smuzhiyun 		u16 global_data = 0;
4023*4882a593Smuzhiyun 		pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
4024*4882a593Smuzhiyun 		memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
4025*4882a593Smuzhiyun 		/* Set PBD in checksum offload case */
4026*4882a593Smuzhiyun 		if (xmit_type & XMIT_CSUM)
4027*4882a593Smuzhiyun 			hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
4028*4882a593Smuzhiyun 
4029*4882a593Smuzhiyun 		SET_FLAG(global_data,
4030*4882a593Smuzhiyun 			 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
4031*4882a593Smuzhiyun 		pbd_e1x->global_data |= cpu_to_le16(global_data);
4032*4882a593Smuzhiyun 	}
4033*4882a593Smuzhiyun 
4034*4882a593Smuzhiyun 	/* Setup the data pointer of the first BD of the packet */
4035*4882a593Smuzhiyun 	tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4036*4882a593Smuzhiyun 	tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4037*4882a593Smuzhiyun 	tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
4038*4882a593Smuzhiyun 	pkt_size = tx_start_bd->nbytes;
4039*4882a593Smuzhiyun 
4040*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED,
4041*4882a593Smuzhiyun 	   "first bd @%p  addr (%x:%x)  nbytes %d  flags %x  vlan %x\n",
4042*4882a593Smuzhiyun 	   tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
4043*4882a593Smuzhiyun 	   le16_to_cpu(tx_start_bd->nbytes),
4044*4882a593Smuzhiyun 	   tx_start_bd->bd_flags.as_bitfield,
4045*4882a593Smuzhiyun 	   le16_to_cpu(tx_start_bd->vlan_or_ethertype));
4046*4882a593Smuzhiyun 
4047*4882a593Smuzhiyun 	if (xmit_type & XMIT_GSO) {
4048*4882a593Smuzhiyun 
4049*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
4050*4882a593Smuzhiyun 		   "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
4051*4882a593Smuzhiyun 		   skb->len, hlen, skb_headlen(skb),
4052*4882a593Smuzhiyun 		   skb_shinfo(skb)->gso_size);
4053*4882a593Smuzhiyun 
4054*4882a593Smuzhiyun 		tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
4055*4882a593Smuzhiyun 
4056*4882a593Smuzhiyun 		if (unlikely(skb_headlen(skb) > hlen)) {
4057*4882a593Smuzhiyun 			nbd++;
4058*4882a593Smuzhiyun 			bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
4059*4882a593Smuzhiyun 						 &tx_start_bd, hlen,
4060*4882a593Smuzhiyun 						 bd_prod);
4061*4882a593Smuzhiyun 		}
4062*4882a593Smuzhiyun 		if (!CHIP_IS_E1x(bp))
4063*4882a593Smuzhiyun 			pbd_e2_parsing_data |=
4064*4882a593Smuzhiyun 				(skb_shinfo(skb)->gso_size <<
4065*4882a593Smuzhiyun 				 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
4066*4882a593Smuzhiyun 				 ETH_TX_PARSE_BD_E2_LSO_MSS;
4067*4882a593Smuzhiyun 		else
4068*4882a593Smuzhiyun 			bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
4069*4882a593Smuzhiyun 	}
4070*4882a593Smuzhiyun 
4071*4882a593Smuzhiyun 	/* Set the PBD's parsing_data field if not zero
4072*4882a593Smuzhiyun 	 * (for the chips newer than 57711).
4073*4882a593Smuzhiyun 	 */
4074*4882a593Smuzhiyun 	if (pbd_e2_parsing_data)
4075*4882a593Smuzhiyun 		pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
4076*4882a593Smuzhiyun 
4077*4882a593Smuzhiyun 	tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
4078*4882a593Smuzhiyun 
4079*4882a593Smuzhiyun 	/* Handle fragmented skb */
4080*4882a593Smuzhiyun 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4081*4882a593Smuzhiyun 		skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4082*4882a593Smuzhiyun 
4083*4882a593Smuzhiyun 		mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4084*4882a593Smuzhiyun 					   skb_frag_size(frag), DMA_TO_DEVICE);
4085*4882a593Smuzhiyun 		if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4086*4882a593Smuzhiyun 			unsigned int pkts_compl = 0, bytes_compl = 0;
4087*4882a593Smuzhiyun 
4088*4882a593Smuzhiyun 			DP(NETIF_MSG_TX_QUEUED,
4089*4882a593Smuzhiyun 			   "Unable to map page - dropping packet...\n");
4090*4882a593Smuzhiyun 
4091*4882a593Smuzhiyun 			/* we need unmap all buffers already mapped
4092*4882a593Smuzhiyun 			 * for this SKB;
4093*4882a593Smuzhiyun 			 * first_bd->nbd need to be properly updated
4094*4882a593Smuzhiyun 			 * before call to bnx2x_free_tx_pkt
4095*4882a593Smuzhiyun 			 */
4096*4882a593Smuzhiyun 			first_bd->nbd = cpu_to_le16(nbd);
4097*4882a593Smuzhiyun 			bnx2x_free_tx_pkt(bp, txdata,
4098*4882a593Smuzhiyun 					  TX_BD(txdata->tx_pkt_prod),
4099*4882a593Smuzhiyun 					  &pkts_compl, &bytes_compl);
4100*4882a593Smuzhiyun 			return NETDEV_TX_OK;
4101*4882a593Smuzhiyun 		}
4102*4882a593Smuzhiyun 
4103*4882a593Smuzhiyun 		bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4104*4882a593Smuzhiyun 		tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4105*4882a593Smuzhiyun 		if (total_pkt_bd == NULL)
4106*4882a593Smuzhiyun 			total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4107*4882a593Smuzhiyun 
4108*4882a593Smuzhiyun 		tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4109*4882a593Smuzhiyun 		tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4110*4882a593Smuzhiyun 		tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4111*4882a593Smuzhiyun 		le16_add_cpu(&pkt_size, skb_frag_size(frag));
4112*4882a593Smuzhiyun 		nbd++;
4113*4882a593Smuzhiyun 
4114*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
4115*4882a593Smuzhiyun 		   "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
4116*4882a593Smuzhiyun 		   i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4117*4882a593Smuzhiyun 		   le16_to_cpu(tx_data_bd->nbytes));
4118*4882a593Smuzhiyun 	}
4119*4882a593Smuzhiyun 
4120*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4121*4882a593Smuzhiyun 
4122*4882a593Smuzhiyun 	/* update with actual num BDs */
4123*4882a593Smuzhiyun 	first_bd->nbd = cpu_to_le16(nbd);
4124*4882a593Smuzhiyun 
4125*4882a593Smuzhiyun 	bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4126*4882a593Smuzhiyun 
4127*4882a593Smuzhiyun 	/* now send a tx doorbell, counting the next BD
4128*4882a593Smuzhiyun 	 * if the packet contains or ends with it
4129*4882a593Smuzhiyun 	 */
4130*4882a593Smuzhiyun 	if (TX_BD_POFF(bd_prod) < nbd)
4131*4882a593Smuzhiyun 		nbd++;
4132*4882a593Smuzhiyun 
4133*4882a593Smuzhiyun 	/* total_pkt_bytes should be set on the first data BD if
4134*4882a593Smuzhiyun 	 * it's not an LSO packet and there is more than one
4135*4882a593Smuzhiyun 	 * data BD. In this case pkt_size is limited by an MTU value.
4136*4882a593Smuzhiyun 	 * However we prefer to set it for an LSO packet (while we don't
4137*4882a593Smuzhiyun 	 * have to) in order to save some CPU cycles in a none-LSO
4138*4882a593Smuzhiyun 	 * case, when we much more care about them.
4139*4882a593Smuzhiyun 	 */
4140*4882a593Smuzhiyun 	if (total_pkt_bd != NULL)
4141*4882a593Smuzhiyun 		total_pkt_bd->total_pkt_bytes = pkt_size;
4142*4882a593Smuzhiyun 
4143*4882a593Smuzhiyun 	if (pbd_e1x)
4144*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
4145*4882a593Smuzhiyun 		   "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
4146*4882a593Smuzhiyun 		   pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4147*4882a593Smuzhiyun 		   pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4148*4882a593Smuzhiyun 		   pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4149*4882a593Smuzhiyun 		    le16_to_cpu(pbd_e1x->total_hlen_w));
4150*4882a593Smuzhiyun 	if (pbd_e2)
4151*4882a593Smuzhiyun 		DP(NETIF_MSG_TX_QUEUED,
4152*4882a593Smuzhiyun 		   "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
4153*4882a593Smuzhiyun 		   pbd_e2,
4154*4882a593Smuzhiyun 		   pbd_e2->data.mac_addr.dst_hi,
4155*4882a593Smuzhiyun 		   pbd_e2->data.mac_addr.dst_mid,
4156*4882a593Smuzhiyun 		   pbd_e2->data.mac_addr.dst_lo,
4157*4882a593Smuzhiyun 		   pbd_e2->data.mac_addr.src_hi,
4158*4882a593Smuzhiyun 		   pbd_e2->data.mac_addr.src_mid,
4159*4882a593Smuzhiyun 		   pbd_e2->data.mac_addr.src_lo,
4160*4882a593Smuzhiyun 		   pbd_e2->parsing_data);
4161*4882a593Smuzhiyun 	DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
4162*4882a593Smuzhiyun 
4163*4882a593Smuzhiyun 	netdev_tx_sent_queue(txq, skb->len);
4164*4882a593Smuzhiyun 
4165*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
4166*4882a593Smuzhiyun 
4167*4882a593Smuzhiyun 	txdata->tx_pkt_prod++;
4168*4882a593Smuzhiyun 	/*
4169*4882a593Smuzhiyun 	 * Make sure that the BD data is updated before updating the producer
4170*4882a593Smuzhiyun 	 * since FW might read the BD right after the producer is updated.
4171*4882a593Smuzhiyun 	 * This is only applicable for weak-ordered memory model archs such
4172*4882a593Smuzhiyun 	 * as IA-64. The following barrier is also mandatory since FW will
4173*4882a593Smuzhiyun 	 * assumes packets must have BDs.
4174*4882a593Smuzhiyun 	 */
4175*4882a593Smuzhiyun 	wmb();
4176*4882a593Smuzhiyun 
4177*4882a593Smuzhiyun 	txdata->tx_db.data.prod += nbd;
4178*4882a593Smuzhiyun 	/* make sure descriptor update is observed by HW */
4179*4882a593Smuzhiyun 	wmb();
4180*4882a593Smuzhiyun 
4181*4882a593Smuzhiyun 	DOORBELL_RELAXED(bp, txdata->cid, txdata->tx_db.raw);
4182*4882a593Smuzhiyun 
4183*4882a593Smuzhiyun 	txdata->tx_bd_prod += nbd;
4184*4882a593Smuzhiyun 
4185*4882a593Smuzhiyun 	if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4186*4882a593Smuzhiyun 		netif_tx_stop_queue(txq);
4187*4882a593Smuzhiyun 
4188*4882a593Smuzhiyun 		/* paired memory barrier is in bnx2x_tx_int(), we have to keep
4189*4882a593Smuzhiyun 		 * ordering of set_bit() in netif_tx_stop_queue() and read of
4190*4882a593Smuzhiyun 		 * fp->bd_tx_cons */
4191*4882a593Smuzhiyun 		smp_mb();
4192*4882a593Smuzhiyun 
4193*4882a593Smuzhiyun 		bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4194*4882a593Smuzhiyun 		if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4195*4882a593Smuzhiyun 			netif_tx_wake_queue(txq);
4196*4882a593Smuzhiyun 	}
4197*4882a593Smuzhiyun 	txdata->tx_pkt++;
4198*4882a593Smuzhiyun 
4199*4882a593Smuzhiyun 	return NETDEV_TX_OK;
4200*4882a593Smuzhiyun }
4201*4882a593Smuzhiyun 
bnx2x_get_c2s_mapping(struct bnx2x * bp,u8 * c2s_map,u8 * c2s_default)4202*4882a593Smuzhiyun void bnx2x_get_c2s_mapping(struct bnx2x *bp, u8 *c2s_map, u8 *c2s_default)
4203*4882a593Smuzhiyun {
4204*4882a593Smuzhiyun 	int mfw_vn = BP_FW_MB_IDX(bp);
4205*4882a593Smuzhiyun 	u32 tmp;
4206*4882a593Smuzhiyun 
4207*4882a593Smuzhiyun 	/* If the shmem shouldn't affect configuration, reflect */
4208*4882a593Smuzhiyun 	if (!IS_MF_BD(bp)) {
4209*4882a593Smuzhiyun 		int i;
4210*4882a593Smuzhiyun 
4211*4882a593Smuzhiyun 		for (i = 0; i < BNX2X_MAX_PRIORITY; i++)
4212*4882a593Smuzhiyun 			c2s_map[i] = i;
4213*4882a593Smuzhiyun 		*c2s_default = 0;
4214*4882a593Smuzhiyun 
4215*4882a593Smuzhiyun 		return;
4216*4882a593Smuzhiyun 	}
4217*4882a593Smuzhiyun 
4218*4882a593Smuzhiyun 	tmp = SHMEM2_RD(bp, c2s_pcp_map_lower[mfw_vn]);
4219*4882a593Smuzhiyun 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4220*4882a593Smuzhiyun 	c2s_map[0] = tmp & 0xff;
4221*4882a593Smuzhiyun 	c2s_map[1] = (tmp >> 8) & 0xff;
4222*4882a593Smuzhiyun 	c2s_map[2] = (tmp >> 16) & 0xff;
4223*4882a593Smuzhiyun 	c2s_map[3] = (tmp >> 24) & 0xff;
4224*4882a593Smuzhiyun 
4225*4882a593Smuzhiyun 	tmp = SHMEM2_RD(bp, c2s_pcp_map_upper[mfw_vn]);
4226*4882a593Smuzhiyun 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4227*4882a593Smuzhiyun 	c2s_map[4] = tmp & 0xff;
4228*4882a593Smuzhiyun 	c2s_map[5] = (tmp >> 8) & 0xff;
4229*4882a593Smuzhiyun 	c2s_map[6] = (tmp >> 16) & 0xff;
4230*4882a593Smuzhiyun 	c2s_map[7] = (tmp >> 24) & 0xff;
4231*4882a593Smuzhiyun 
4232*4882a593Smuzhiyun 	tmp = SHMEM2_RD(bp, c2s_pcp_map_default[mfw_vn]);
4233*4882a593Smuzhiyun 	tmp = (__force u32)be32_to_cpu((__force __be32)tmp);
4234*4882a593Smuzhiyun 	*c2s_default = (tmp >> (8 * mfw_vn)) & 0xff;
4235*4882a593Smuzhiyun }
4236*4882a593Smuzhiyun 
4237*4882a593Smuzhiyun /**
4238*4882a593Smuzhiyun  * bnx2x_setup_tc - routine to configure net_device for multi tc
4239*4882a593Smuzhiyun  *
4240*4882a593Smuzhiyun  * @dev: net device to configure
4241*4882a593Smuzhiyun  * @num_tc: number of traffic classes to enable
4242*4882a593Smuzhiyun  *
4243*4882a593Smuzhiyun  * callback connected to the ndo_setup_tc function pointer
4244*4882a593Smuzhiyun  */
bnx2x_setup_tc(struct net_device * dev,u8 num_tc)4245*4882a593Smuzhiyun int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4246*4882a593Smuzhiyun {
4247*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4248*4882a593Smuzhiyun 	u8 c2s_map[BNX2X_MAX_PRIORITY], c2s_def;
4249*4882a593Smuzhiyun 	int cos, prio, count, offset;
4250*4882a593Smuzhiyun 
4251*4882a593Smuzhiyun 	/* setup tc must be called under rtnl lock */
4252*4882a593Smuzhiyun 	ASSERT_RTNL();
4253*4882a593Smuzhiyun 
4254*4882a593Smuzhiyun 	/* no traffic classes requested. Aborting */
4255*4882a593Smuzhiyun 	if (!num_tc) {
4256*4882a593Smuzhiyun 		netdev_reset_tc(dev);
4257*4882a593Smuzhiyun 		return 0;
4258*4882a593Smuzhiyun 	}
4259*4882a593Smuzhiyun 
4260*4882a593Smuzhiyun 	/* requested to support too many traffic classes */
4261*4882a593Smuzhiyun 	if (num_tc > bp->max_cos) {
4262*4882a593Smuzhiyun 		BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4263*4882a593Smuzhiyun 			  num_tc, bp->max_cos);
4264*4882a593Smuzhiyun 		return -EINVAL;
4265*4882a593Smuzhiyun 	}
4266*4882a593Smuzhiyun 
4267*4882a593Smuzhiyun 	/* declare amount of supported traffic classes */
4268*4882a593Smuzhiyun 	if (netdev_set_num_tc(dev, num_tc)) {
4269*4882a593Smuzhiyun 		BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4270*4882a593Smuzhiyun 		return -EINVAL;
4271*4882a593Smuzhiyun 	}
4272*4882a593Smuzhiyun 
4273*4882a593Smuzhiyun 	bnx2x_get_c2s_mapping(bp, c2s_map, &c2s_def);
4274*4882a593Smuzhiyun 
4275*4882a593Smuzhiyun 	/* configure priority to traffic class mapping */
4276*4882a593Smuzhiyun 	for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4277*4882a593Smuzhiyun 		int outer_prio = c2s_map[prio];
4278*4882a593Smuzhiyun 
4279*4882a593Smuzhiyun 		netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[outer_prio]);
4280*4882a593Smuzhiyun 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4281*4882a593Smuzhiyun 		   "mapping priority %d to tc %d\n",
4282*4882a593Smuzhiyun 		   outer_prio, bp->prio_to_cos[outer_prio]);
4283*4882a593Smuzhiyun 	}
4284*4882a593Smuzhiyun 
4285*4882a593Smuzhiyun 	/* Use this configuration to differentiate tc0 from other COSes
4286*4882a593Smuzhiyun 	   This can be used for ets or pfc, and save the effort of setting
4287*4882a593Smuzhiyun 	   up a multio class queue disc or negotiating DCBX with a switch
4288*4882a593Smuzhiyun 	netdev_set_prio_tc_map(dev, 0, 0);
4289*4882a593Smuzhiyun 	DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4290*4882a593Smuzhiyun 	for (prio = 1; prio < 16; prio++) {
4291*4882a593Smuzhiyun 		netdev_set_prio_tc_map(dev, prio, 1);
4292*4882a593Smuzhiyun 		DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4293*4882a593Smuzhiyun 	} */
4294*4882a593Smuzhiyun 
4295*4882a593Smuzhiyun 	/* configure traffic class to transmission queue mapping */
4296*4882a593Smuzhiyun 	for (cos = 0; cos < bp->max_cos; cos++) {
4297*4882a593Smuzhiyun 		count = BNX2X_NUM_ETH_QUEUES(bp);
4298*4882a593Smuzhiyun 		offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4299*4882a593Smuzhiyun 		netdev_set_tc_queue(dev, cos, count, offset);
4300*4882a593Smuzhiyun 		DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4301*4882a593Smuzhiyun 		   "mapping tc %d to offset %d count %d\n",
4302*4882a593Smuzhiyun 		   cos, offset, count);
4303*4882a593Smuzhiyun 	}
4304*4882a593Smuzhiyun 
4305*4882a593Smuzhiyun 	return 0;
4306*4882a593Smuzhiyun }
4307*4882a593Smuzhiyun 
__bnx2x_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)4308*4882a593Smuzhiyun int __bnx2x_setup_tc(struct net_device *dev, enum tc_setup_type type,
4309*4882a593Smuzhiyun 		     void *type_data)
4310*4882a593Smuzhiyun {
4311*4882a593Smuzhiyun 	struct tc_mqprio_qopt *mqprio = type_data;
4312*4882a593Smuzhiyun 
4313*4882a593Smuzhiyun 	if (type != TC_SETUP_QDISC_MQPRIO)
4314*4882a593Smuzhiyun 		return -EOPNOTSUPP;
4315*4882a593Smuzhiyun 
4316*4882a593Smuzhiyun 	mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
4317*4882a593Smuzhiyun 
4318*4882a593Smuzhiyun 	return bnx2x_setup_tc(dev, mqprio->num_tc);
4319*4882a593Smuzhiyun }
4320*4882a593Smuzhiyun 
4321*4882a593Smuzhiyun /* called with rtnl_lock */
bnx2x_change_mac_addr(struct net_device * dev,void * p)4322*4882a593Smuzhiyun int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4323*4882a593Smuzhiyun {
4324*4882a593Smuzhiyun 	struct sockaddr *addr = p;
4325*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4326*4882a593Smuzhiyun 	int rc = 0;
4327*4882a593Smuzhiyun 
4328*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data)) {
4329*4882a593Smuzhiyun 		BNX2X_ERR("Requested MAC address is not valid\n");
4330*4882a593Smuzhiyun 		return -EINVAL;
4331*4882a593Smuzhiyun 	}
4332*4882a593Smuzhiyun 
4333*4882a593Smuzhiyun 	if (IS_MF_STORAGE_ONLY(bp)) {
4334*4882a593Smuzhiyun 		BNX2X_ERR("Can't change address on STORAGE ONLY function\n");
4335*4882a593Smuzhiyun 		return -EINVAL;
4336*4882a593Smuzhiyun 	}
4337*4882a593Smuzhiyun 
4338*4882a593Smuzhiyun 	if (netif_running(dev))  {
4339*4882a593Smuzhiyun 		rc = bnx2x_set_eth_mac(bp, false);
4340*4882a593Smuzhiyun 		if (rc)
4341*4882a593Smuzhiyun 			return rc;
4342*4882a593Smuzhiyun 	}
4343*4882a593Smuzhiyun 
4344*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4345*4882a593Smuzhiyun 
4346*4882a593Smuzhiyun 	if (netif_running(dev))
4347*4882a593Smuzhiyun 		rc = bnx2x_set_eth_mac(bp, true);
4348*4882a593Smuzhiyun 
4349*4882a593Smuzhiyun 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4350*4882a593Smuzhiyun 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4351*4882a593Smuzhiyun 
4352*4882a593Smuzhiyun 	return rc;
4353*4882a593Smuzhiyun }
4354*4882a593Smuzhiyun 
bnx2x_free_fp_mem_at(struct bnx2x * bp,int fp_index)4355*4882a593Smuzhiyun static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4356*4882a593Smuzhiyun {
4357*4882a593Smuzhiyun 	union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4358*4882a593Smuzhiyun 	struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4359*4882a593Smuzhiyun 	u8 cos;
4360*4882a593Smuzhiyun 
4361*4882a593Smuzhiyun 	/* Common */
4362*4882a593Smuzhiyun 
4363*4882a593Smuzhiyun 	if (IS_FCOE_IDX(fp_index)) {
4364*4882a593Smuzhiyun 		memset(sb, 0, sizeof(union host_hc_status_block));
4365*4882a593Smuzhiyun 		fp->status_blk_mapping = 0;
4366*4882a593Smuzhiyun 	} else {
4367*4882a593Smuzhiyun 		/* status blocks */
4368*4882a593Smuzhiyun 		if (!CHIP_IS_E1x(bp))
4369*4882a593Smuzhiyun 			BNX2X_PCI_FREE(sb->e2_sb,
4370*4882a593Smuzhiyun 				       bnx2x_fp(bp, fp_index,
4371*4882a593Smuzhiyun 						status_blk_mapping),
4372*4882a593Smuzhiyun 				       sizeof(struct host_hc_status_block_e2));
4373*4882a593Smuzhiyun 		else
4374*4882a593Smuzhiyun 			BNX2X_PCI_FREE(sb->e1x_sb,
4375*4882a593Smuzhiyun 				       bnx2x_fp(bp, fp_index,
4376*4882a593Smuzhiyun 						status_blk_mapping),
4377*4882a593Smuzhiyun 				       sizeof(struct host_hc_status_block_e1x));
4378*4882a593Smuzhiyun 	}
4379*4882a593Smuzhiyun 
4380*4882a593Smuzhiyun 	/* Rx */
4381*4882a593Smuzhiyun 	if (!skip_rx_queue(bp, fp_index)) {
4382*4882a593Smuzhiyun 		bnx2x_free_rx_bds(fp);
4383*4882a593Smuzhiyun 
4384*4882a593Smuzhiyun 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4385*4882a593Smuzhiyun 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4386*4882a593Smuzhiyun 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4387*4882a593Smuzhiyun 			       bnx2x_fp(bp, fp_index, rx_desc_mapping),
4388*4882a593Smuzhiyun 			       sizeof(struct eth_rx_bd) * NUM_RX_BD);
4389*4882a593Smuzhiyun 
4390*4882a593Smuzhiyun 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4391*4882a593Smuzhiyun 			       bnx2x_fp(bp, fp_index, rx_comp_mapping),
4392*4882a593Smuzhiyun 			       sizeof(struct eth_fast_path_rx_cqe) *
4393*4882a593Smuzhiyun 			       NUM_RCQ_BD);
4394*4882a593Smuzhiyun 
4395*4882a593Smuzhiyun 		/* SGE ring */
4396*4882a593Smuzhiyun 		BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4397*4882a593Smuzhiyun 		BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4398*4882a593Smuzhiyun 			       bnx2x_fp(bp, fp_index, rx_sge_mapping),
4399*4882a593Smuzhiyun 			       BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4400*4882a593Smuzhiyun 	}
4401*4882a593Smuzhiyun 
4402*4882a593Smuzhiyun 	/* Tx */
4403*4882a593Smuzhiyun 	if (!skip_tx_queue(bp, fp_index)) {
4404*4882a593Smuzhiyun 		/* fastpath tx rings: tx_buf tx_desc */
4405*4882a593Smuzhiyun 		for_each_cos_in_tx_queue(fp, cos) {
4406*4882a593Smuzhiyun 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4407*4882a593Smuzhiyun 
4408*4882a593Smuzhiyun 			DP(NETIF_MSG_IFDOWN,
4409*4882a593Smuzhiyun 			   "freeing tx memory of fp %d cos %d cid %d\n",
4410*4882a593Smuzhiyun 			   fp_index, cos, txdata->cid);
4411*4882a593Smuzhiyun 
4412*4882a593Smuzhiyun 			BNX2X_FREE(txdata->tx_buf_ring);
4413*4882a593Smuzhiyun 			BNX2X_PCI_FREE(txdata->tx_desc_ring,
4414*4882a593Smuzhiyun 				txdata->tx_desc_mapping,
4415*4882a593Smuzhiyun 				sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4416*4882a593Smuzhiyun 		}
4417*4882a593Smuzhiyun 	}
4418*4882a593Smuzhiyun 	/* end of fastpath */
4419*4882a593Smuzhiyun }
4420*4882a593Smuzhiyun 
bnx2x_free_fp_mem_cnic(struct bnx2x * bp)4421*4882a593Smuzhiyun static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4422*4882a593Smuzhiyun {
4423*4882a593Smuzhiyun 	int i;
4424*4882a593Smuzhiyun 	for_each_cnic_queue(bp, i)
4425*4882a593Smuzhiyun 		bnx2x_free_fp_mem_at(bp, i);
4426*4882a593Smuzhiyun }
4427*4882a593Smuzhiyun 
bnx2x_free_fp_mem(struct bnx2x * bp)4428*4882a593Smuzhiyun void bnx2x_free_fp_mem(struct bnx2x *bp)
4429*4882a593Smuzhiyun {
4430*4882a593Smuzhiyun 	int i;
4431*4882a593Smuzhiyun 	for_each_eth_queue(bp, i)
4432*4882a593Smuzhiyun 		bnx2x_free_fp_mem_at(bp, i);
4433*4882a593Smuzhiyun }
4434*4882a593Smuzhiyun 
set_sb_shortcuts(struct bnx2x * bp,int index)4435*4882a593Smuzhiyun static void set_sb_shortcuts(struct bnx2x *bp, int index)
4436*4882a593Smuzhiyun {
4437*4882a593Smuzhiyun 	union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4438*4882a593Smuzhiyun 	if (!CHIP_IS_E1x(bp)) {
4439*4882a593Smuzhiyun 		bnx2x_fp(bp, index, sb_index_values) =
4440*4882a593Smuzhiyun 			(__le16 *)status_blk.e2_sb->sb.index_values;
4441*4882a593Smuzhiyun 		bnx2x_fp(bp, index, sb_running_index) =
4442*4882a593Smuzhiyun 			(__le16 *)status_blk.e2_sb->sb.running_index;
4443*4882a593Smuzhiyun 	} else {
4444*4882a593Smuzhiyun 		bnx2x_fp(bp, index, sb_index_values) =
4445*4882a593Smuzhiyun 			(__le16 *)status_blk.e1x_sb->sb.index_values;
4446*4882a593Smuzhiyun 		bnx2x_fp(bp, index, sb_running_index) =
4447*4882a593Smuzhiyun 			(__le16 *)status_blk.e1x_sb->sb.running_index;
4448*4882a593Smuzhiyun 	}
4449*4882a593Smuzhiyun }
4450*4882a593Smuzhiyun 
4451*4882a593Smuzhiyun /* Returns the number of actually allocated BDs */
bnx2x_alloc_rx_bds(struct bnx2x_fastpath * fp,int rx_ring_size)4452*4882a593Smuzhiyun static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4453*4882a593Smuzhiyun 			      int rx_ring_size)
4454*4882a593Smuzhiyun {
4455*4882a593Smuzhiyun 	struct bnx2x *bp = fp->bp;
4456*4882a593Smuzhiyun 	u16 ring_prod, cqe_ring_prod;
4457*4882a593Smuzhiyun 	int i, failure_cnt = 0;
4458*4882a593Smuzhiyun 
4459*4882a593Smuzhiyun 	fp->rx_comp_cons = 0;
4460*4882a593Smuzhiyun 	cqe_ring_prod = ring_prod = 0;
4461*4882a593Smuzhiyun 
4462*4882a593Smuzhiyun 	/* This routine is called only during fo init so
4463*4882a593Smuzhiyun 	 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4464*4882a593Smuzhiyun 	 */
4465*4882a593Smuzhiyun 	for (i = 0; i < rx_ring_size; i++) {
4466*4882a593Smuzhiyun 		if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4467*4882a593Smuzhiyun 			failure_cnt++;
4468*4882a593Smuzhiyun 			continue;
4469*4882a593Smuzhiyun 		}
4470*4882a593Smuzhiyun 		ring_prod = NEXT_RX_IDX(ring_prod);
4471*4882a593Smuzhiyun 		cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4472*4882a593Smuzhiyun 		WARN_ON(ring_prod <= (i - failure_cnt));
4473*4882a593Smuzhiyun 	}
4474*4882a593Smuzhiyun 
4475*4882a593Smuzhiyun 	if (failure_cnt)
4476*4882a593Smuzhiyun 		BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4477*4882a593Smuzhiyun 			  i - failure_cnt, fp->index);
4478*4882a593Smuzhiyun 
4479*4882a593Smuzhiyun 	fp->rx_bd_prod = ring_prod;
4480*4882a593Smuzhiyun 	/* Limit the CQE producer by the CQE ring size */
4481*4882a593Smuzhiyun 	fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4482*4882a593Smuzhiyun 			       cqe_ring_prod);
4483*4882a593Smuzhiyun 
4484*4882a593Smuzhiyun 	bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4485*4882a593Smuzhiyun 
4486*4882a593Smuzhiyun 	return i - failure_cnt;
4487*4882a593Smuzhiyun }
4488*4882a593Smuzhiyun 
bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath * fp)4489*4882a593Smuzhiyun static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4490*4882a593Smuzhiyun {
4491*4882a593Smuzhiyun 	int i;
4492*4882a593Smuzhiyun 
4493*4882a593Smuzhiyun 	for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4494*4882a593Smuzhiyun 		struct eth_rx_cqe_next_page *nextpg;
4495*4882a593Smuzhiyun 
4496*4882a593Smuzhiyun 		nextpg = (struct eth_rx_cqe_next_page *)
4497*4882a593Smuzhiyun 			&fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4498*4882a593Smuzhiyun 		nextpg->addr_hi =
4499*4882a593Smuzhiyun 			cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4500*4882a593Smuzhiyun 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4501*4882a593Smuzhiyun 		nextpg->addr_lo =
4502*4882a593Smuzhiyun 			cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4503*4882a593Smuzhiyun 				   BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4504*4882a593Smuzhiyun 	}
4505*4882a593Smuzhiyun }
4506*4882a593Smuzhiyun 
bnx2x_alloc_fp_mem_at(struct bnx2x * bp,int index)4507*4882a593Smuzhiyun static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4508*4882a593Smuzhiyun {
4509*4882a593Smuzhiyun 	union host_hc_status_block *sb;
4510*4882a593Smuzhiyun 	struct bnx2x_fastpath *fp = &bp->fp[index];
4511*4882a593Smuzhiyun 	int ring_size = 0;
4512*4882a593Smuzhiyun 	u8 cos;
4513*4882a593Smuzhiyun 	int rx_ring_size = 0;
4514*4882a593Smuzhiyun 
4515*4882a593Smuzhiyun 	if (!bp->rx_ring_size && IS_MF_STORAGE_ONLY(bp)) {
4516*4882a593Smuzhiyun 		rx_ring_size = MIN_RX_SIZE_NONTPA;
4517*4882a593Smuzhiyun 		bp->rx_ring_size = rx_ring_size;
4518*4882a593Smuzhiyun 	} else if (!bp->rx_ring_size) {
4519*4882a593Smuzhiyun 		rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4520*4882a593Smuzhiyun 
4521*4882a593Smuzhiyun 		if (CHIP_IS_E3(bp)) {
4522*4882a593Smuzhiyun 			u32 cfg = SHMEM_RD(bp,
4523*4882a593Smuzhiyun 					   dev_info.port_hw_config[BP_PORT(bp)].
4524*4882a593Smuzhiyun 					   default_cfg);
4525*4882a593Smuzhiyun 
4526*4882a593Smuzhiyun 			/* Decrease ring size for 1G functions */
4527*4882a593Smuzhiyun 			if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4528*4882a593Smuzhiyun 			    PORT_HW_CFG_NET_SERDES_IF_SGMII)
4529*4882a593Smuzhiyun 				rx_ring_size /= 10;
4530*4882a593Smuzhiyun 		}
4531*4882a593Smuzhiyun 
4532*4882a593Smuzhiyun 		/* allocate at least number of buffers required by FW */
4533*4882a593Smuzhiyun 		rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4534*4882a593Smuzhiyun 				     MIN_RX_SIZE_TPA, rx_ring_size);
4535*4882a593Smuzhiyun 
4536*4882a593Smuzhiyun 		bp->rx_ring_size = rx_ring_size;
4537*4882a593Smuzhiyun 	} else /* if rx_ring_size specified - use it */
4538*4882a593Smuzhiyun 		rx_ring_size = bp->rx_ring_size;
4539*4882a593Smuzhiyun 
4540*4882a593Smuzhiyun 	DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4541*4882a593Smuzhiyun 
4542*4882a593Smuzhiyun 	/* Common */
4543*4882a593Smuzhiyun 	sb = &bnx2x_fp(bp, index, status_blk);
4544*4882a593Smuzhiyun 
4545*4882a593Smuzhiyun 	if (!IS_FCOE_IDX(index)) {
4546*4882a593Smuzhiyun 		/* status blocks */
4547*4882a593Smuzhiyun 		if (!CHIP_IS_E1x(bp)) {
4548*4882a593Smuzhiyun 			sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4549*4882a593Smuzhiyun 						    sizeof(struct host_hc_status_block_e2));
4550*4882a593Smuzhiyun 			if (!sb->e2_sb)
4551*4882a593Smuzhiyun 				goto alloc_mem_err;
4552*4882a593Smuzhiyun 		} else {
4553*4882a593Smuzhiyun 			sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4554*4882a593Smuzhiyun 						     sizeof(struct host_hc_status_block_e1x));
4555*4882a593Smuzhiyun 			if (!sb->e1x_sb)
4556*4882a593Smuzhiyun 				goto alloc_mem_err;
4557*4882a593Smuzhiyun 		}
4558*4882a593Smuzhiyun 	}
4559*4882a593Smuzhiyun 
4560*4882a593Smuzhiyun 	/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4561*4882a593Smuzhiyun 	 * set shortcuts for it.
4562*4882a593Smuzhiyun 	 */
4563*4882a593Smuzhiyun 	if (!IS_FCOE_IDX(index))
4564*4882a593Smuzhiyun 		set_sb_shortcuts(bp, index);
4565*4882a593Smuzhiyun 
4566*4882a593Smuzhiyun 	/* Tx */
4567*4882a593Smuzhiyun 	if (!skip_tx_queue(bp, index)) {
4568*4882a593Smuzhiyun 		/* fastpath tx rings: tx_buf tx_desc */
4569*4882a593Smuzhiyun 		for_each_cos_in_tx_queue(fp, cos) {
4570*4882a593Smuzhiyun 			struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4571*4882a593Smuzhiyun 
4572*4882a593Smuzhiyun 			DP(NETIF_MSG_IFUP,
4573*4882a593Smuzhiyun 			   "allocating tx memory of fp %d cos %d\n",
4574*4882a593Smuzhiyun 			   index, cos);
4575*4882a593Smuzhiyun 
4576*4882a593Smuzhiyun 			txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4577*4882a593Smuzhiyun 						      sizeof(struct sw_tx_bd),
4578*4882a593Smuzhiyun 						      GFP_KERNEL);
4579*4882a593Smuzhiyun 			if (!txdata->tx_buf_ring)
4580*4882a593Smuzhiyun 				goto alloc_mem_err;
4581*4882a593Smuzhiyun 			txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4582*4882a593Smuzhiyun 							       sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4583*4882a593Smuzhiyun 			if (!txdata->tx_desc_ring)
4584*4882a593Smuzhiyun 				goto alloc_mem_err;
4585*4882a593Smuzhiyun 		}
4586*4882a593Smuzhiyun 	}
4587*4882a593Smuzhiyun 
4588*4882a593Smuzhiyun 	/* Rx */
4589*4882a593Smuzhiyun 	if (!skip_rx_queue(bp, index)) {
4590*4882a593Smuzhiyun 		/* fastpath rx rings: rx_buf rx_desc rx_comp */
4591*4882a593Smuzhiyun 		bnx2x_fp(bp, index, rx_buf_ring) =
4592*4882a593Smuzhiyun 			kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4593*4882a593Smuzhiyun 		if (!bnx2x_fp(bp, index, rx_buf_ring))
4594*4882a593Smuzhiyun 			goto alloc_mem_err;
4595*4882a593Smuzhiyun 		bnx2x_fp(bp, index, rx_desc_ring) =
4596*4882a593Smuzhiyun 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4597*4882a593Smuzhiyun 					sizeof(struct eth_rx_bd) * NUM_RX_BD);
4598*4882a593Smuzhiyun 		if (!bnx2x_fp(bp, index, rx_desc_ring))
4599*4882a593Smuzhiyun 			goto alloc_mem_err;
4600*4882a593Smuzhiyun 
4601*4882a593Smuzhiyun 		/* Seed all CQEs by 1s */
4602*4882a593Smuzhiyun 		bnx2x_fp(bp, index, rx_comp_ring) =
4603*4882a593Smuzhiyun 			BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4604*4882a593Smuzhiyun 					 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4605*4882a593Smuzhiyun 		if (!bnx2x_fp(bp, index, rx_comp_ring))
4606*4882a593Smuzhiyun 			goto alloc_mem_err;
4607*4882a593Smuzhiyun 
4608*4882a593Smuzhiyun 		/* SGE ring */
4609*4882a593Smuzhiyun 		bnx2x_fp(bp, index, rx_page_ring) =
4610*4882a593Smuzhiyun 			kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4611*4882a593Smuzhiyun 				GFP_KERNEL);
4612*4882a593Smuzhiyun 		if (!bnx2x_fp(bp, index, rx_page_ring))
4613*4882a593Smuzhiyun 			goto alloc_mem_err;
4614*4882a593Smuzhiyun 		bnx2x_fp(bp, index, rx_sge_ring) =
4615*4882a593Smuzhiyun 			BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4616*4882a593Smuzhiyun 					BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4617*4882a593Smuzhiyun 		if (!bnx2x_fp(bp, index, rx_sge_ring))
4618*4882a593Smuzhiyun 			goto alloc_mem_err;
4619*4882a593Smuzhiyun 		/* RX BD ring */
4620*4882a593Smuzhiyun 		bnx2x_set_next_page_rx_bd(fp);
4621*4882a593Smuzhiyun 
4622*4882a593Smuzhiyun 		/* CQ ring */
4623*4882a593Smuzhiyun 		bnx2x_set_next_page_rx_cq(fp);
4624*4882a593Smuzhiyun 
4625*4882a593Smuzhiyun 		/* BDs */
4626*4882a593Smuzhiyun 		ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4627*4882a593Smuzhiyun 		if (ring_size < rx_ring_size)
4628*4882a593Smuzhiyun 			goto alloc_mem_err;
4629*4882a593Smuzhiyun 	}
4630*4882a593Smuzhiyun 
4631*4882a593Smuzhiyun 	return 0;
4632*4882a593Smuzhiyun 
4633*4882a593Smuzhiyun /* handles low memory cases */
4634*4882a593Smuzhiyun alloc_mem_err:
4635*4882a593Smuzhiyun 	BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4636*4882a593Smuzhiyun 						index, ring_size);
4637*4882a593Smuzhiyun 	/* FW will drop all packets if queue is not big enough,
4638*4882a593Smuzhiyun 	 * In these cases we disable the queue
4639*4882a593Smuzhiyun 	 * Min size is different for OOO, TPA and non-TPA queues
4640*4882a593Smuzhiyun 	 */
4641*4882a593Smuzhiyun 	if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
4642*4882a593Smuzhiyun 				MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4643*4882a593Smuzhiyun 			/* release memory allocated for this queue */
4644*4882a593Smuzhiyun 			bnx2x_free_fp_mem_at(bp, index);
4645*4882a593Smuzhiyun 			return -ENOMEM;
4646*4882a593Smuzhiyun 	}
4647*4882a593Smuzhiyun 	return 0;
4648*4882a593Smuzhiyun }
4649*4882a593Smuzhiyun 
bnx2x_alloc_fp_mem_cnic(struct bnx2x * bp)4650*4882a593Smuzhiyun static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4651*4882a593Smuzhiyun {
4652*4882a593Smuzhiyun 	if (!NO_FCOE(bp))
4653*4882a593Smuzhiyun 		/* FCoE */
4654*4882a593Smuzhiyun 		if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4655*4882a593Smuzhiyun 			/* we will fail load process instead of mark
4656*4882a593Smuzhiyun 			 * NO_FCOE_FLAG
4657*4882a593Smuzhiyun 			 */
4658*4882a593Smuzhiyun 			return -ENOMEM;
4659*4882a593Smuzhiyun 
4660*4882a593Smuzhiyun 	return 0;
4661*4882a593Smuzhiyun }
4662*4882a593Smuzhiyun 
bnx2x_alloc_fp_mem(struct bnx2x * bp)4663*4882a593Smuzhiyun static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4664*4882a593Smuzhiyun {
4665*4882a593Smuzhiyun 	int i;
4666*4882a593Smuzhiyun 
4667*4882a593Smuzhiyun 	/* 1. Allocate FP for leading - fatal if error
4668*4882a593Smuzhiyun 	 * 2. Allocate RSS - fix number of queues if error
4669*4882a593Smuzhiyun 	 */
4670*4882a593Smuzhiyun 
4671*4882a593Smuzhiyun 	/* leading */
4672*4882a593Smuzhiyun 	if (bnx2x_alloc_fp_mem_at(bp, 0))
4673*4882a593Smuzhiyun 		return -ENOMEM;
4674*4882a593Smuzhiyun 
4675*4882a593Smuzhiyun 	/* RSS */
4676*4882a593Smuzhiyun 	for_each_nondefault_eth_queue(bp, i)
4677*4882a593Smuzhiyun 		if (bnx2x_alloc_fp_mem_at(bp, i))
4678*4882a593Smuzhiyun 			break;
4679*4882a593Smuzhiyun 
4680*4882a593Smuzhiyun 	/* handle memory failures */
4681*4882a593Smuzhiyun 	if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4682*4882a593Smuzhiyun 		int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4683*4882a593Smuzhiyun 
4684*4882a593Smuzhiyun 		WARN_ON(delta < 0);
4685*4882a593Smuzhiyun 		bnx2x_shrink_eth_fp(bp, delta);
4686*4882a593Smuzhiyun 		if (CNIC_SUPPORT(bp))
4687*4882a593Smuzhiyun 			/* move non eth FPs next to last eth FP
4688*4882a593Smuzhiyun 			 * must be done in that order
4689*4882a593Smuzhiyun 			 * FCOE_IDX < FWD_IDX < OOO_IDX
4690*4882a593Smuzhiyun 			 */
4691*4882a593Smuzhiyun 
4692*4882a593Smuzhiyun 			/* move FCoE fp even NO_FCOE_FLAG is on */
4693*4882a593Smuzhiyun 			bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4694*4882a593Smuzhiyun 		bp->num_ethernet_queues -= delta;
4695*4882a593Smuzhiyun 		bp->num_queues = bp->num_ethernet_queues +
4696*4882a593Smuzhiyun 				 bp->num_cnic_queues;
4697*4882a593Smuzhiyun 		BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4698*4882a593Smuzhiyun 			  bp->num_queues + delta, bp->num_queues);
4699*4882a593Smuzhiyun 	}
4700*4882a593Smuzhiyun 
4701*4882a593Smuzhiyun 	return 0;
4702*4882a593Smuzhiyun }
4703*4882a593Smuzhiyun 
bnx2x_free_mem_bp(struct bnx2x * bp)4704*4882a593Smuzhiyun void bnx2x_free_mem_bp(struct bnx2x *bp)
4705*4882a593Smuzhiyun {
4706*4882a593Smuzhiyun 	int i;
4707*4882a593Smuzhiyun 
4708*4882a593Smuzhiyun 	for (i = 0; i < bp->fp_array_size; i++)
4709*4882a593Smuzhiyun 		kfree(bp->fp[i].tpa_info);
4710*4882a593Smuzhiyun 	kfree(bp->fp);
4711*4882a593Smuzhiyun 	kfree(bp->sp_objs);
4712*4882a593Smuzhiyun 	kfree(bp->fp_stats);
4713*4882a593Smuzhiyun 	kfree(bp->bnx2x_txq);
4714*4882a593Smuzhiyun 	kfree(bp->msix_table);
4715*4882a593Smuzhiyun 	kfree(bp->ilt);
4716*4882a593Smuzhiyun }
4717*4882a593Smuzhiyun 
bnx2x_alloc_mem_bp(struct bnx2x * bp)4718*4882a593Smuzhiyun int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4719*4882a593Smuzhiyun {
4720*4882a593Smuzhiyun 	struct bnx2x_fastpath *fp;
4721*4882a593Smuzhiyun 	struct msix_entry *tbl;
4722*4882a593Smuzhiyun 	struct bnx2x_ilt *ilt;
4723*4882a593Smuzhiyun 	int msix_table_size = 0;
4724*4882a593Smuzhiyun 	int fp_array_size, txq_array_size;
4725*4882a593Smuzhiyun 	int i;
4726*4882a593Smuzhiyun 
4727*4882a593Smuzhiyun 	/*
4728*4882a593Smuzhiyun 	 * The biggest MSI-X table we might need is as a maximum number of fast
4729*4882a593Smuzhiyun 	 * path IGU SBs plus default SB (for PF only).
4730*4882a593Smuzhiyun 	 */
4731*4882a593Smuzhiyun 	msix_table_size = bp->igu_sb_cnt;
4732*4882a593Smuzhiyun 	if (IS_PF(bp))
4733*4882a593Smuzhiyun 		msix_table_size++;
4734*4882a593Smuzhiyun 	BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4735*4882a593Smuzhiyun 
4736*4882a593Smuzhiyun 	/* fp array: RSS plus CNIC related L2 queues */
4737*4882a593Smuzhiyun 	fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4738*4882a593Smuzhiyun 	bp->fp_array_size = fp_array_size;
4739*4882a593Smuzhiyun 	BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4740*4882a593Smuzhiyun 
4741*4882a593Smuzhiyun 	fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4742*4882a593Smuzhiyun 	if (!fp)
4743*4882a593Smuzhiyun 		goto alloc_err;
4744*4882a593Smuzhiyun 	for (i = 0; i < bp->fp_array_size; i++) {
4745*4882a593Smuzhiyun 		fp[i].tpa_info =
4746*4882a593Smuzhiyun 			kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4747*4882a593Smuzhiyun 				sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4748*4882a593Smuzhiyun 		if (!(fp[i].tpa_info))
4749*4882a593Smuzhiyun 			goto alloc_err;
4750*4882a593Smuzhiyun 	}
4751*4882a593Smuzhiyun 
4752*4882a593Smuzhiyun 	bp->fp = fp;
4753*4882a593Smuzhiyun 
4754*4882a593Smuzhiyun 	/* allocate sp objs */
4755*4882a593Smuzhiyun 	bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4756*4882a593Smuzhiyun 			      GFP_KERNEL);
4757*4882a593Smuzhiyun 	if (!bp->sp_objs)
4758*4882a593Smuzhiyun 		goto alloc_err;
4759*4882a593Smuzhiyun 
4760*4882a593Smuzhiyun 	/* allocate fp_stats */
4761*4882a593Smuzhiyun 	bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4762*4882a593Smuzhiyun 			       GFP_KERNEL);
4763*4882a593Smuzhiyun 	if (!bp->fp_stats)
4764*4882a593Smuzhiyun 		goto alloc_err;
4765*4882a593Smuzhiyun 
4766*4882a593Smuzhiyun 	/* Allocate memory for the transmission queues array */
4767*4882a593Smuzhiyun 	txq_array_size =
4768*4882a593Smuzhiyun 		BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4769*4882a593Smuzhiyun 	BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4770*4882a593Smuzhiyun 
4771*4882a593Smuzhiyun 	bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4772*4882a593Smuzhiyun 				GFP_KERNEL);
4773*4882a593Smuzhiyun 	if (!bp->bnx2x_txq)
4774*4882a593Smuzhiyun 		goto alloc_err;
4775*4882a593Smuzhiyun 
4776*4882a593Smuzhiyun 	/* msix table */
4777*4882a593Smuzhiyun 	tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4778*4882a593Smuzhiyun 	if (!tbl)
4779*4882a593Smuzhiyun 		goto alloc_err;
4780*4882a593Smuzhiyun 	bp->msix_table = tbl;
4781*4882a593Smuzhiyun 
4782*4882a593Smuzhiyun 	/* ilt */
4783*4882a593Smuzhiyun 	ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4784*4882a593Smuzhiyun 	if (!ilt)
4785*4882a593Smuzhiyun 		goto alloc_err;
4786*4882a593Smuzhiyun 	bp->ilt = ilt;
4787*4882a593Smuzhiyun 
4788*4882a593Smuzhiyun 	return 0;
4789*4882a593Smuzhiyun alloc_err:
4790*4882a593Smuzhiyun 	bnx2x_free_mem_bp(bp);
4791*4882a593Smuzhiyun 	return -ENOMEM;
4792*4882a593Smuzhiyun }
4793*4882a593Smuzhiyun 
bnx2x_reload_if_running(struct net_device * dev)4794*4882a593Smuzhiyun int bnx2x_reload_if_running(struct net_device *dev)
4795*4882a593Smuzhiyun {
4796*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4797*4882a593Smuzhiyun 
4798*4882a593Smuzhiyun 	if (unlikely(!netif_running(dev)))
4799*4882a593Smuzhiyun 		return 0;
4800*4882a593Smuzhiyun 
4801*4882a593Smuzhiyun 	bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4802*4882a593Smuzhiyun 	return bnx2x_nic_load(bp, LOAD_NORMAL);
4803*4882a593Smuzhiyun }
4804*4882a593Smuzhiyun 
bnx2x_get_cur_phy_idx(struct bnx2x * bp)4805*4882a593Smuzhiyun int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4806*4882a593Smuzhiyun {
4807*4882a593Smuzhiyun 	u32 sel_phy_idx = 0;
4808*4882a593Smuzhiyun 	if (bp->link_params.num_phys <= 1)
4809*4882a593Smuzhiyun 		return INT_PHY;
4810*4882a593Smuzhiyun 
4811*4882a593Smuzhiyun 	if (bp->link_vars.link_up) {
4812*4882a593Smuzhiyun 		sel_phy_idx = EXT_PHY1;
4813*4882a593Smuzhiyun 		/* In case link is SERDES, check if the EXT_PHY2 is the one */
4814*4882a593Smuzhiyun 		if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4815*4882a593Smuzhiyun 		    (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4816*4882a593Smuzhiyun 			sel_phy_idx = EXT_PHY2;
4817*4882a593Smuzhiyun 	} else {
4818*4882a593Smuzhiyun 
4819*4882a593Smuzhiyun 		switch (bnx2x_phy_selection(&bp->link_params)) {
4820*4882a593Smuzhiyun 		case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4821*4882a593Smuzhiyun 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4822*4882a593Smuzhiyun 		case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4823*4882a593Smuzhiyun 		       sel_phy_idx = EXT_PHY1;
4824*4882a593Smuzhiyun 		       break;
4825*4882a593Smuzhiyun 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4826*4882a593Smuzhiyun 		case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4827*4882a593Smuzhiyun 		       sel_phy_idx = EXT_PHY2;
4828*4882a593Smuzhiyun 		       break;
4829*4882a593Smuzhiyun 		}
4830*4882a593Smuzhiyun 	}
4831*4882a593Smuzhiyun 
4832*4882a593Smuzhiyun 	return sel_phy_idx;
4833*4882a593Smuzhiyun }
bnx2x_get_link_cfg_idx(struct bnx2x * bp)4834*4882a593Smuzhiyun int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4835*4882a593Smuzhiyun {
4836*4882a593Smuzhiyun 	u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4837*4882a593Smuzhiyun 	/*
4838*4882a593Smuzhiyun 	 * The selected activated PHY is always after swapping (in case PHY
4839*4882a593Smuzhiyun 	 * swapping is enabled). So when swapping is enabled, we need to reverse
4840*4882a593Smuzhiyun 	 * the configuration
4841*4882a593Smuzhiyun 	 */
4842*4882a593Smuzhiyun 
4843*4882a593Smuzhiyun 	if (bp->link_params.multi_phy_config &
4844*4882a593Smuzhiyun 	    PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4845*4882a593Smuzhiyun 		if (sel_phy_idx == EXT_PHY1)
4846*4882a593Smuzhiyun 			sel_phy_idx = EXT_PHY2;
4847*4882a593Smuzhiyun 		else if (sel_phy_idx == EXT_PHY2)
4848*4882a593Smuzhiyun 			sel_phy_idx = EXT_PHY1;
4849*4882a593Smuzhiyun 	}
4850*4882a593Smuzhiyun 	return LINK_CONFIG_IDX(sel_phy_idx);
4851*4882a593Smuzhiyun }
4852*4882a593Smuzhiyun 
4853*4882a593Smuzhiyun #ifdef NETDEV_FCOE_WWNN
bnx2x_fcoe_get_wwn(struct net_device * dev,u64 * wwn,int type)4854*4882a593Smuzhiyun int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4855*4882a593Smuzhiyun {
4856*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4857*4882a593Smuzhiyun 	struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4858*4882a593Smuzhiyun 
4859*4882a593Smuzhiyun 	switch (type) {
4860*4882a593Smuzhiyun 	case NETDEV_FCOE_WWNN:
4861*4882a593Smuzhiyun 		*wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4862*4882a593Smuzhiyun 				cp->fcoe_wwn_node_name_lo);
4863*4882a593Smuzhiyun 		break;
4864*4882a593Smuzhiyun 	case NETDEV_FCOE_WWPN:
4865*4882a593Smuzhiyun 		*wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4866*4882a593Smuzhiyun 				cp->fcoe_wwn_port_name_lo);
4867*4882a593Smuzhiyun 		break;
4868*4882a593Smuzhiyun 	default:
4869*4882a593Smuzhiyun 		BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4870*4882a593Smuzhiyun 		return -EINVAL;
4871*4882a593Smuzhiyun 	}
4872*4882a593Smuzhiyun 
4873*4882a593Smuzhiyun 	return 0;
4874*4882a593Smuzhiyun }
4875*4882a593Smuzhiyun #endif
4876*4882a593Smuzhiyun 
4877*4882a593Smuzhiyun /* called with rtnl_lock */
bnx2x_change_mtu(struct net_device * dev,int new_mtu)4878*4882a593Smuzhiyun int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4879*4882a593Smuzhiyun {
4880*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4881*4882a593Smuzhiyun 
4882*4882a593Smuzhiyun 	if (pci_num_vf(bp->pdev)) {
4883*4882a593Smuzhiyun 		DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
4884*4882a593Smuzhiyun 		return -EPERM;
4885*4882a593Smuzhiyun 	}
4886*4882a593Smuzhiyun 
4887*4882a593Smuzhiyun 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4888*4882a593Smuzhiyun 		BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4889*4882a593Smuzhiyun 		return -EAGAIN;
4890*4882a593Smuzhiyun 	}
4891*4882a593Smuzhiyun 
4892*4882a593Smuzhiyun 	/* This does not race with packet allocation
4893*4882a593Smuzhiyun 	 * because the actual alloc size is
4894*4882a593Smuzhiyun 	 * only updated as part of load
4895*4882a593Smuzhiyun 	 */
4896*4882a593Smuzhiyun 	dev->mtu = new_mtu;
4897*4882a593Smuzhiyun 
4898*4882a593Smuzhiyun 	if (!bnx2x_mtu_allows_gro(new_mtu))
4899*4882a593Smuzhiyun 		dev->features &= ~NETIF_F_GRO_HW;
4900*4882a593Smuzhiyun 
4901*4882a593Smuzhiyun 	if (IS_PF(bp) && SHMEM2_HAS(bp, curr_cfg))
4902*4882a593Smuzhiyun 		SHMEM2_WR(bp, curr_cfg, CURR_CFG_MET_OS);
4903*4882a593Smuzhiyun 
4904*4882a593Smuzhiyun 	return bnx2x_reload_if_running(dev);
4905*4882a593Smuzhiyun }
4906*4882a593Smuzhiyun 
bnx2x_fix_features(struct net_device * dev,netdev_features_t features)4907*4882a593Smuzhiyun netdev_features_t bnx2x_fix_features(struct net_device *dev,
4908*4882a593Smuzhiyun 				     netdev_features_t features)
4909*4882a593Smuzhiyun {
4910*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4911*4882a593Smuzhiyun 
4912*4882a593Smuzhiyun 	if (pci_num_vf(bp->pdev)) {
4913*4882a593Smuzhiyun 		netdev_features_t changed = dev->features ^ features;
4914*4882a593Smuzhiyun 
4915*4882a593Smuzhiyun 		/* Revert the requested changes in features if they
4916*4882a593Smuzhiyun 		 * would require internal reload of PF in bnx2x_set_features().
4917*4882a593Smuzhiyun 		 */
4918*4882a593Smuzhiyun 		if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
4919*4882a593Smuzhiyun 			features &= ~NETIF_F_RXCSUM;
4920*4882a593Smuzhiyun 			features |= dev->features & NETIF_F_RXCSUM;
4921*4882a593Smuzhiyun 		}
4922*4882a593Smuzhiyun 
4923*4882a593Smuzhiyun 		if (changed & NETIF_F_LOOPBACK) {
4924*4882a593Smuzhiyun 			features &= ~NETIF_F_LOOPBACK;
4925*4882a593Smuzhiyun 			features |= dev->features & NETIF_F_LOOPBACK;
4926*4882a593Smuzhiyun 		}
4927*4882a593Smuzhiyun 	}
4928*4882a593Smuzhiyun 
4929*4882a593Smuzhiyun 	/* TPA requires Rx CSUM offloading */
4930*4882a593Smuzhiyun 	if (!(features & NETIF_F_RXCSUM))
4931*4882a593Smuzhiyun 		features &= ~NETIF_F_LRO;
4932*4882a593Smuzhiyun 
4933*4882a593Smuzhiyun 	if (!(features & NETIF_F_GRO) || !bnx2x_mtu_allows_gro(dev->mtu))
4934*4882a593Smuzhiyun 		features &= ~NETIF_F_GRO_HW;
4935*4882a593Smuzhiyun 	if (features & NETIF_F_GRO_HW)
4936*4882a593Smuzhiyun 		features &= ~NETIF_F_LRO;
4937*4882a593Smuzhiyun 
4938*4882a593Smuzhiyun 	return features;
4939*4882a593Smuzhiyun }
4940*4882a593Smuzhiyun 
bnx2x_set_features(struct net_device * dev,netdev_features_t features)4941*4882a593Smuzhiyun int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4942*4882a593Smuzhiyun {
4943*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4944*4882a593Smuzhiyun 	netdev_features_t changes = features ^ dev->features;
4945*4882a593Smuzhiyun 	bool bnx2x_reload = false;
4946*4882a593Smuzhiyun 	int rc;
4947*4882a593Smuzhiyun 
4948*4882a593Smuzhiyun 	/* VFs or non SRIOV PFs should be able to change loopback feature */
4949*4882a593Smuzhiyun 	if (!pci_num_vf(bp->pdev)) {
4950*4882a593Smuzhiyun 		if (features & NETIF_F_LOOPBACK) {
4951*4882a593Smuzhiyun 			if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4952*4882a593Smuzhiyun 				bp->link_params.loopback_mode = LOOPBACK_BMAC;
4953*4882a593Smuzhiyun 				bnx2x_reload = true;
4954*4882a593Smuzhiyun 			}
4955*4882a593Smuzhiyun 		} else {
4956*4882a593Smuzhiyun 			if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4957*4882a593Smuzhiyun 				bp->link_params.loopback_mode = LOOPBACK_NONE;
4958*4882a593Smuzhiyun 				bnx2x_reload = true;
4959*4882a593Smuzhiyun 			}
4960*4882a593Smuzhiyun 		}
4961*4882a593Smuzhiyun 	}
4962*4882a593Smuzhiyun 
4963*4882a593Smuzhiyun 	/* Don't care about GRO changes */
4964*4882a593Smuzhiyun 	changes &= ~NETIF_F_GRO;
4965*4882a593Smuzhiyun 
4966*4882a593Smuzhiyun 	if (changes)
4967*4882a593Smuzhiyun 		bnx2x_reload = true;
4968*4882a593Smuzhiyun 
4969*4882a593Smuzhiyun 	if (bnx2x_reload) {
4970*4882a593Smuzhiyun 		if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
4971*4882a593Smuzhiyun 			dev->features = features;
4972*4882a593Smuzhiyun 			rc = bnx2x_reload_if_running(dev);
4973*4882a593Smuzhiyun 			return rc ? rc : 1;
4974*4882a593Smuzhiyun 		}
4975*4882a593Smuzhiyun 		/* else: bnx2x_nic_load() will be called at end of recovery */
4976*4882a593Smuzhiyun 	}
4977*4882a593Smuzhiyun 
4978*4882a593Smuzhiyun 	return 0;
4979*4882a593Smuzhiyun }
4980*4882a593Smuzhiyun 
bnx2x_tx_timeout(struct net_device * dev,unsigned int txqueue)4981*4882a593Smuzhiyun void bnx2x_tx_timeout(struct net_device *dev, unsigned int txqueue)
4982*4882a593Smuzhiyun {
4983*4882a593Smuzhiyun 	struct bnx2x *bp = netdev_priv(dev);
4984*4882a593Smuzhiyun 
4985*4882a593Smuzhiyun 	/* We want the information of the dump logged,
4986*4882a593Smuzhiyun 	 * but calling bnx2x_panic() would kill all chances of recovery.
4987*4882a593Smuzhiyun 	 */
4988*4882a593Smuzhiyun 	if (!bp->panic)
4989*4882a593Smuzhiyun #ifndef BNX2X_STOP_ON_ERROR
4990*4882a593Smuzhiyun 		bnx2x_panic_dump(bp, false);
4991*4882a593Smuzhiyun #else
4992*4882a593Smuzhiyun 		bnx2x_panic();
4993*4882a593Smuzhiyun #endif
4994*4882a593Smuzhiyun 
4995*4882a593Smuzhiyun 	/* This allows the netif to be shutdown gracefully before resetting */
4996*4882a593Smuzhiyun 	bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4997*4882a593Smuzhiyun }
4998*4882a593Smuzhiyun 
bnx2x_suspend(struct device * dev_d)4999*4882a593Smuzhiyun static int __maybe_unused bnx2x_suspend(struct device *dev_d)
5000*4882a593Smuzhiyun {
5001*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev_d);
5002*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
5003*4882a593Smuzhiyun 	struct bnx2x *bp;
5004*4882a593Smuzhiyun 
5005*4882a593Smuzhiyun 	if (!dev) {
5006*4882a593Smuzhiyun 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5007*4882a593Smuzhiyun 		return -ENODEV;
5008*4882a593Smuzhiyun 	}
5009*4882a593Smuzhiyun 	bp = netdev_priv(dev);
5010*4882a593Smuzhiyun 
5011*4882a593Smuzhiyun 	rtnl_lock();
5012*4882a593Smuzhiyun 
5013*4882a593Smuzhiyun 	if (!netif_running(dev)) {
5014*4882a593Smuzhiyun 		rtnl_unlock();
5015*4882a593Smuzhiyun 		return 0;
5016*4882a593Smuzhiyun 	}
5017*4882a593Smuzhiyun 
5018*4882a593Smuzhiyun 	netif_device_detach(dev);
5019*4882a593Smuzhiyun 
5020*4882a593Smuzhiyun 	bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
5021*4882a593Smuzhiyun 
5022*4882a593Smuzhiyun 	rtnl_unlock();
5023*4882a593Smuzhiyun 
5024*4882a593Smuzhiyun 	return 0;
5025*4882a593Smuzhiyun }
5026*4882a593Smuzhiyun 
bnx2x_resume(struct device * dev_d)5027*4882a593Smuzhiyun static int __maybe_unused bnx2x_resume(struct device *dev_d)
5028*4882a593Smuzhiyun {
5029*4882a593Smuzhiyun 	struct pci_dev *pdev = to_pci_dev(dev_d);
5030*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
5031*4882a593Smuzhiyun 	struct bnx2x *bp;
5032*4882a593Smuzhiyun 	int rc;
5033*4882a593Smuzhiyun 
5034*4882a593Smuzhiyun 	if (!dev) {
5035*4882a593Smuzhiyun 		dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
5036*4882a593Smuzhiyun 		return -ENODEV;
5037*4882a593Smuzhiyun 	}
5038*4882a593Smuzhiyun 	bp = netdev_priv(dev);
5039*4882a593Smuzhiyun 
5040*4882a593Smuzhiyun 	if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
5041*4882a593Smuzhiyun 		BNX2X_ERR("Handling parity error recovery. Try again later\n");
5042*4882a593Smuzhiyun 		return -EAGAIN;
5043*4882a593Smuzhiyun 	}
5044*4882a593Smuzhiyun 
5045*4882a593Smuzhiyun 	rtnl_lock();
5046*4882a593Smuzhiyun 
5047*4882a593Smuzhiyun 	if (!netif_running(dev)) {
5048*4882a593Smuzhiyun 		rtnl_unlock();
5049*4882a593Smuzhiyun 		return 0;
5050*4882a593Smuzhiyun 	}
5051*4882a593Smuzhiyun 
5052*4882a593Smuzhiyun 	netif_device_attach(dev);
5053*4882a593Smuzhiyun 
5054*4882a593Smuzhiyun 	rc = bnx2x_nic_load(bp, LOAD_OPEN);
5055*4882a593Smuzhiyun 
5056*4882a593Smuzhiyun 	rtnl_unlock();
5057*4882a593Smuzhiyun 
5058*4882a593Smuzhiyun 	return rc;
5059*4882a593Smuzhiyun }
5060*4882a593Smuzhiyun 
5061*4882a593Smuzhiyun SIMPLE_DEV_PM_OPS(bnx2x_pm_ops, bnx2x_suspend, bnx2x_resume);
5062*4882a593Smuzhiyun 
bnx2x_set_ctx_validation(struct bnx2x * bp,struct eth_context * cxt,u32 cid)5063*4882a593Smuzhiyun void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
5064*4882a593Smuzhiyun 			      u32 cid)
5065*4882a593Smuzhiyun {
5066*4882a593Smuzhiyun 	if (!cxt) {
5067*4882a593Smuzhiyun 		BNX2X_ERR("bad context pointer %p\n", cxt);
5068*4882a593Smuzhiyun 		return;
5069*4882a593Smuzhiyun 	}
5070*4882a593Smuzhiyun 
5071*4882a593Smuzhiyun 	/* ustorm cxt validation */
5072*4882a593Smuzhiyun 	cxt->ustorm_ag_context.cdu_usage =
5073*4882a593Smuzhiyun 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5074*4882a593Smuzhiyun 			CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
5075*4882a593Smuzhiyun 	/* xcontext validation */
5076*4882a593Smuzhiyun 	cxt->xstorm_ag_context.cdu_reserved =
5077*4882a593Smuzhiyun 		CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
5078*4882a593Smuzhiyun 			CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
5079*4882a593Smuzhiyun }
5080*4882a593Smuzhiyun 
storm_memset_hc_timeout(struct bnx2x * bp,u8 port,u8 fw_sb_id,u8 sb_index,u8 ticks)5081*4882a593Smuzhiyun static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
5082*4882a593Smuzhiyun 				    u8 fw_sb_id, u8 sb_index,
5083*4882a593Smuzhiyun 				    u8 ticks)
5084*4882a593Smuzhiyun {
5085*4882a593Smuzhiyun 	u32 addr = BAR_CSTRORM_INTMEM +
5086*4882a593Smuzhiyun 		   CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
5087*4882a593Smuzhiyun 	REG_WR8(bp, addr, ticks);
5088*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP,
5089*4882a593Smuzhiyun 	   "port %x fw_sb_id %d sb_index %d ticks %d\n",
5090*4882a593Smuzhiyun 	   port, fw_sb_id, sb_index, ticks);
5091*4882a593Smuzhiyun }
5092*4882a593Smuzhiyun 
storm_memset_hc_disable(struct bnx2x * bp,u8 port,u16 fw_sb_id,u8 sb_index,u8 disable)5093*4882a593Smuzhiyun static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
5094*4882a593Smuzhiyun 				    u16 fw_sb_id, u8 sb_index,
5095*4882a593Smuzhiyun 				    u8 disable)
5096*4882a593Smuzhiyun {
5097*4882a593Smuzhiyun 	u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
5098*4882a593Smuzhiyun 	u32 addr = BAR_CSTRORM_INTMEM +
5099*4882a593Smuzhiyun 		   CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
5100*4882a593Smuzhiyun 	u8 flags = REG_RD8(bp, addr);
5101*4882a593Smuzhiyun 	/* clear and set */
5102*4882a593Smuzhiyun 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
5103*4882a593Smuzhiyun 	flags |= enable_flag;
5104*4882a593Smuzhiyun 	REG_WR8(bp, addr, flags);
5105*4882a593Smuzhiyun 	DP(NETIF_MSG_IFUP,
5106*4882a593Smuzhiyun 	   "port %x fw_sb_id %d sb_index %d disable %d\n",
5107*4882a593Smuzhiyun 	   port, fw_sb_id, sb_index, disable);
5108*4882a593Smuzhiyun }
5109*4882a593Smuzhiyun 
bnx2x_update_coalesce_sb_index(struct bnx2x * bp,u8 fw_sb_id,u8 sb_index,u8 disable,u16 usec)5110*4882a593Smuzhiyun void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
5111*4882a593Smuzhiyun 				    u8 sb_index, u8 disable, u16 usec)
5112*4882a593Smuzhiyun {
5113*4882a593Smuzhiyun 	int port = BP_PORT(bp);
5114*4882a593Smuzhiyun 	u8 ticks = usec / BNX2X_BTR;
5115*4882a593Smuzhiyun 
5116*4882a593Smuzhiyun 	storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
5117*4882a593Smuzhiyun 
5118*4882a593Smuzhiyun 	disable = disable ? 1 : (usec ? 0 : 1);
5119*4882a593Smuzhiyun 	storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
5120*4882a593Smuzhiyun }
5121*4882a593Smuzhiyun 
bnx2x_schedule_sp_rtnl(struct bnx2x * bp,enum sp_rtnl_flag flag,u32 verbose)5122*4882a593Smuzhiyun void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
5123*4882a593Smuzhiyun 			    u32 verbose)
5124*4882a593Smuzhiyun {
5125*4882a593Smuzhiyun 	smp_mb__before_atomic();
5126*4882a593Smuzhiyun 	set_bit(flag, &bp->sp_rtnl_state);
5127*4882a593Smuzhiyun 	smp_mb__after_atomic();
5128*4882a593Smuzhiyun 	DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
5129*4882a593Smuzhiyun 	   flag);
5130*4882a593Smuzhiyun 	schedule_delayed_work(&bp->sp_rtnl_task, 0);
5131*4882a593Smuzhiyun }
5132