xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/brocade/bna/bnad.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Linux network driver for QLogic BR-series Converged Network Adapter.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7*4882a593Smuzhiyun  * Copyright (c) 2014-2015 QLogic Corporation
8*4882a593Smuzhiyun  * All rights reserved
9*4882a593Smuzhiyun  * www.qlogic.com
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun #include <linux/bitops.h>
12*4882a593Smuzhiyun #include <linux/netdevice.h>
13*4882a593Smuzhiyun #include <linux/skbuff.h>
14*4882a593Smuzhiyun #include <linux/etherdevice.h>
15*4882a593Smuzhiyun #include <linux/in.h>
16*4882a593Smuzhiyun #include <linux/ethtool.h>
17*4882a593Smuzhiyun #include <linux/if_vlan.h>
18*4882a593Smuzhiyun #include <linux/if_ether.h>
19*4882a593Smuzhiyun #include <linux/ip.h>
20*4882a593Smuzhiyun #include <linux/prefetch.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "bnad.h"
24*4882a593Smuzhiyun #include "bna.h"
25*4882a593Smuzhiyun #include "cna.h"
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun static DEFINE_MUTEX(bnad_fwimg_mutex);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Module params
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun static uint bnad_msix_disable;
33*4882a593Smuzhiyun module_param(bnad_msix_disable, uint, 0444);
34*4882a593Smuzhiyun MODULE_PARM_DESC(bnad_msix_disable, "Disable MSIX mode");
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun static uint bnad_ioc_auto_recover = 1;
37*4882a593Smuzhiyun module_param(bnad_ioc_auto_recover, uint, 0444);
38*4882a593Smuzhiyun MODULE_PARM_DESC(bnad_ioc_auto_recover, "Enable / Disable auto recovery");
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun static uint bna_debugfs_enable = 1;
41*4882a593Smuzhiyun module_param(bna_debugfs_enable, uint, 0644);
42*4882a593Smuzhiyun MODULE_PARM_DESC(bna_debugfs_enable, "Enables debugfs feature, default=1,"
43*4882a593Smuzhiyun 		 " Range[false:0|true:1]");
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /*
46*4882a593Smuzhiyun  * Global variables
47*4882a593Smuzhiyun  */
48*4882a593Smuzhiyun static u32 bnad_rxqs_per_cq = 2;
49*4882a593Smuzhiyun static atomic_t bna_id;
50*4882a593Smuzhiyun static const u8 bnad_bcast_addr[] __aligned(2) =
51*4882a593Smuzhiyun 	{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun /*
54*4882a593Smuzhiyun  * Local MACROS
55*4882a593Smuzhiyun  */
56*4882a593Smuzhiyun #define BNAD_GET_MBOX_IRQ(_bnad)				\
57*4882a593Smuzhiyun 	(((_bnad)->cfg_flags & BNAD_CF_MSIX) ?			\
58*4882a593Smuzhiyun 	 ((_bnad)->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector) : \
59*4882a593Smuzhiyun 	 ((_bnad)->pcidev->irq))
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun #define BNAD_FILL_UNMAPQ_MEM_REQ(_res_info, _num, _size)	\
62*4882a593Smuzhiyun do {								\
63*4882a593Smuzhiyun 	(_res_info)->res_type = BNA_RES_T_MEM;			\
64*4882a593Smuzhiyun 	(_res_info)->res_u.mem_info.mem_type = BNA_MEM_T_KVA;	\
65*4882a593Smuzhiyun 	(_res_info)->res_u.mem_info.num = (_num);		\
66*4882a593Smuzhiyun 	(_res_info)->res_u.mem_info.len = (_size);		\
67*4882a593Smuzhiyun } while (0)
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * Reinitialize completions in CQ, once Rx is taken down
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun static void
bnad_cq_cleanup(struct bnad * bnad,struct bna_ccb * ccb)73*4882a593Smuzhiyun bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	struct bna_cq_entry *cmpl;
76*4882a593Smuzhiyun 	int i;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	for (i = 0; i < ccb->q_depth; i++) {
79*4882a593Smuzhiyun 		cmpl = &((struct bna_cq_entry *)ccb->sw_q)[i];
80*4882a593Smuzhiyun 		cmpl->valid = 0;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* Tx Datapath functions */
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun /* Caller should ensure that the entry at unmap_q[index] is valid */
88*4882a593Smuzhiyun static u32
bnad_tx_buff_unmap(struct bnad * bnad,struct bnad_tx_unmap * unmap_q,u32 q_depth,u32 index)89*4882a593Smuzhiyun bnad_tx_buff_unmap(struct bnad *bnad,
90*4882a593Smuzhiyun 			      struct bnad_tx_unmap *unmap_q,
91*4882a593Smuzhiyun 			      u32 q_depth, u32 index)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	struct bnad_tx_unmap *unmap;
94*4882a593Smuzhiyun 	struct sk_buff *skb;
95*4882a593Smuzhiyun 	int vector, nvecs;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	unmap = &unmap_q[index];
98*4882a593Smuzhiyun 	nvecs = unmap->nvecs;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	skb = unmap->skb;
101*4882a593Smuzhiyun 	unmap->skb = NULL;
102*4882a593Smuzhiyun 	unmap->nvecs = 0;
103*4882a593Smuzhiyun 	dma_unmap_single(&bnad->pcidev->dev,
104*4882a593Smuzhiyun 		dma_unmap_addr(&unmap->vectors[0], dma_addr),
105*4882a593Smuzhiyun 		skb_headlen(skb), DMA_TO_DEVICE);
106*4882a593Smuzhiyun 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, 0);
107*4882a593Smuzhiyun 	nvecs--;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	vector = 0;
110*4882a593Smuzhiyun 	while (nvecs) {
111*4882a593Smuzhiyun 		vector++;
112*4882a593Smuzhiyun 		if (vector == BFI_TX_MAX_VECTORS_PER_WI) {
113*4882a593Smuzhiyun 			vector = 0;
114*4882a593Smuzhiyun 			BNA_QE_INDX_INC(index, q_depth);
115*4882a593Smuzhiyun 			unmap = &unmap_q[index];
116*4882a593Smuzhiyun 		}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 		dma_unmap_page(&bnad->pcidev->dev,
119*4882a593Smuzhiyun 			dma_unmap_addr(&unmap->vectors[vector], dma_addr),
120*4882a593Smuzhiyun 			dma_unmap_len(&unmap->vectors[vector], dma_len),
121*4882a593Smuzhiyun 			DMA_TO_DEVICE);
122*4882a593Smuzhiyun 		dma_unmap_addr_set(&unmap->vectors[vector], dma_addr, 0);
123*4882a593Smuzhiyun 		nvecs--;
124*4882a593Smuzhiyun 	}
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	BNA_QE_INDX_INC(index, q_depth);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	return index;
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun  * Frees all pending Tx Bufs
133*4882a593Smuzhiyun  * At this point no activity is expected on the Q,
134*4882a593Smuzhiyun  * so DMA unmap & freeing is fine.
135*4882a593Smuzhiyun  */
136*4882a593Smuzhiyun static void
bnad_txq_cleanup(struct bnad * bnad,struct bna_tcb * tcb)137*4882a593Smuzhiyun bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
140*4882a593Smuzhiyun 	struct sk_buff *skb;
141*4882a593Smuzhiyun 	int i;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	for (i = 0; i < tcb->q_depth; i++) {
144*4882a593Smuzhiyun 		skb = unmap_q[i].skb;
145*4882a593Smuzhiyun 		if (!skb)
146*4882a593Smuzhiyun 			continue;
147*4882a593Smuzhiyun 		bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun /*
154*4882a593Smuzhiyun  * bnad_txcmpl_process : Frees the Tx bufs on Tx completion
155*4882a593Smuzhiyun  * Can be called in a) Interrupt context
156*4882a593Smuzhiyun  *		    b) Sending context
157*4882a593Smuzhiyun  */
158*4882a593Smuzhiyun static u32
bnad_txcmpl_process(struct bnad * bnad,struct bna_tcb * tcb)159*4882a593Smuzhiyun bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	u32 sent_packets = 0, sent_bytes = 0;
162*4882a593Smuzhiyun 	u32 wis, unmap_wis, hw_cons, cons, q_depth;
163*4882a593Smuzhiyun 	struct bnad_tx_unmap *unmap_q = tcb->unmap_q;
164*4882a593Smuzhiyun 	struct bnad_tx_unmap *unmap;
165*4882a593Smuzhiyun 	struct sk_buff *skb;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	/* Just return if TX is stopped */
168*4882a593Smuzhiyun 	if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
169*4882a593Smuzhiyun 		return 0;
170*4882a593Smuzhiyun 
171*4882a593Smuzhiyun 	hw_cons = *(tcb->hw_consumer_index);
172*4882a593Smuzhiyun 	rmb();
173*4882a593Smuzhiyun 	cons = tcb->consumer_index;
174*4882a593Smuzhiyun 	q_depth = tcb->q_depth;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	wis = BNA_Q_INDEX_CHANGE(cons, hw_cons, q_depth);
177*4882a593Smuzhiyun 	BUG_ON(!(wis <= BNA_QE_IN_USE_CNT(tcb, tcb->q_depth)));
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	while (wis) {
180*4882a593Smuzhiyun 		unmap = &unmap_q[cons];
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 		skb = unmap->skb;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		sent_packets++;
185*4882a593Smuzhiyun 		sent_bytes += skb->len;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		unmap_wis = BNA_TXQ_WI_NEEDED(unmap->nvecs);
188*4882a593Smuzhiyun 		wis -= unmap_wis;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
191*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Update consumer pointers. */
195*4882a593Smuzhiyun 	tcb->consumer_index = hw_cons;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	tcb->txq->tx_packets += sent_packets;
198*4882a593Smuzhiyun 	tcb->txq->tx_bytes += sent_bytes;
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	return sent_packets;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun static u32
bnad_tx_complete(struct bnad * bnad,struct bna_tcb * tcb)204*4882a593Smuzhiyun bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	struct net_device *netdev = bnad->netdev;
207*4882a593Smuzhiyun 	u32 sent = 0;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
210*4882a593Smuzhiyun 		return 0;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	sent = bnad_txcmpl_process(bnad, tcb);
213*4882a593Smuzhiyun 	if (sent) {
214*4882a593Smuzhiyun 		if (netif_queue_stopped(netdev) &&
215*4882a593Smuzhiyun 		    netif_carrier_ok(netdev) &&
216*4882a593Smuzhiyun 		    BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
217*4882a593Smuzhiyun 				    BNAD_NETIF_WAKE_THRESHOLD) {
218*4882a593Smuzhiyun 			if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
219*4882a593Smuzhiyun 				netif_wake_queue(netdev);
220*4882a593Smuzhiyun 				BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
221*4882a593Smuzhiyun 			}
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
226*4882a593Smuzhiyun 		bna_ib_ack(tcb->i_dbell, sent);
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	smp_mb__before_atomic();
229*4882a593Smuzhiyun 	clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	return sent;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /* MSIX Tx Completion Handler */
235*4882a593Smuzhiyun static irqreturn_t
bnad_msix_tx(int irq,void * data)236*4882a593Smuzhiyun bnad_msix_tx(int irq, void *data)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	struct bna_tcb *tcb = (struct bna_tcb *)data;
239*4882a593Smuzhiyun 	struct bnad *bnad = tcb->bnad;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	bnad_tx_complete(bnad, tcb);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	return IRQ_HANDLED;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun static inline void
bnad_rxq_alloc_uninit(struct bnad * bnad,struct bna_rcb * rcb)247*4882a593Smuzhiyun bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	unmap_q->reuse_pi = -1;
252*4882a593Smuzhiyun 	unmap_q->alloc_order = -1;
253*4882a593Smuzhiyun 	unmap_q->map_size = 0;
254*4882a593Smuzhiyun 	unmap_q->type = BNAD_RXBUF_NONE;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /* Default is page-based allocation. Multi-buffer support - TBD */
258*4882a593Smuzhiyun static int
bnad_rxq_alloc_init(struct bnad * bnad,struct bna_rcb * rcb)259*4882a593Smuzhiyun bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
262*4882a593Smuzhiyun 	int order;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	bnad_rxq_alloc_uninit(bnad, rcb);
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	order = get_order(rcb->rxq->buffer_size);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	unmap_q->type = BNAD_RXBUF_PAGE;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (bna_is_small_rxq(rcb->id)) {
271*4882a593Smuzhiyun 		unmap_q->alloc_order = 0;
272*4882a593Smuzhiyun 		unmap_q->map_size = rcb->rxq->buffer_size;
273*4882a593Smuzhiyun 	} else {
274*4882a593Smuzhiyun 		if (rcb->rxq->multi_buffer) {
275*4882a593Smuzhiyun 			unmap_q->alloc_order = 0;
276*4882a593Smuzhiyun 			unmap_q->map_size = rcb->rxq->buffer_size;
277*4882a593Smuzhiyun 			unmap_q->type = BNAD_RXBUF_MULTI_BUFF;
278*4882a593Smuzhiyun 		} else {
279*4882a593Smuzhiyun 			unmap_q->alloc_order = order;
280*4882a593Smuzhiyun 			unmap_q->map_size =
281*4882a593Smuzhiyun 				(rcb->rxq->buffer_size > 2048) ?
282*4882a593Smuzhiyun 				PAGE_SIZE << order : 2048;
283*4882a593Smuzhiyun 		}
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	BUG_ON((PAGE_SIZE << order) % unmap_q->map_size);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	return 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun static inline void
bnad_rxq_cleanup_page(struct bnad * bnad,struct bnad_rx_unmap * unmap)292*4882a593Smuzhiyun bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	if (!unmap->page)
295*4882a593Smuzhiyun 		return;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	dma_unmap_page(&bnad->pcidev->dev,
298*4882a593Smuzhiyun 			dma_unmap_addr(&unmap->vector, dma_addr),
299*4882a593Smuzhiyun 			unmap->vector.len, DMA_FROM_DEVICE);
300*4882a593Smuzhiyun 	put_page(unmap->page);
301*4882a593Smuzhiyun 	unmap->page = NULL;
302*4882a593Smuzhiyun 	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
303*4882a593Smuzhiyun 	unmap->vector.len = 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun static inline void
bnad_rxq_cleanup_skb(struct bnad * bnad,struct bnad_rx_unmap * unmap)307*4882a593Smuzhiyun bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	if (!unmap->skb)
310*4882a593Smuzhiyun 		return;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	dma_unmap_single(&bnad->pcidev->dev,
313*4882a593Smuzhiyun 			dma_unmap_addr(&unmap->vector, dma_addr),
314*4882a593Smuzhiyun 			unmap->vector.len, DMA_FROM_DEVICE);
315*4882a593Smuzhiyun 	dev_kfree_skb_any(unmap->skb);
316*4882a593Smuzhiyun 	unmap->skb = NULL;
317*4882a593Smuzhiyun 	dma_unmap_addr_set(&unmap->vector, dma_addr, 0);
318*4882a593Smuzhiyun 	unmap->vector.len = 0;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun static void
bnad_rxq_cleanup(struct bnad * bnad,struct bna_rcb * rcb)322*4882a593Smuzhiyun bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
325*4882a593Smuzhiyun 	int i;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	for (i = 0; i < rcb->q_depth; i++) {
328*4882a593Smuzhiyun 		struct bnad_rx_unmap *unmap = &unmap_q->unmap[i];
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
331*4882a593Smuzhiyun 			bnad_rxq_cleanup_skb(bnad, unmap);
332*4882a593Smuzhiyun 		else
333*4882a593Smuzhiyun 			bnad_rxq_cleanup_page(bnad, unmap);
334*4882a593Smuzhiyun 	}
335*4882a593Smuzhiyun 	bnad_rxq_alloc_uninit(bnad, rcb);
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun static u32
bnad_rxq_refill_page(struct bnad * bnad,struct bna_rcb * rcb,u32 nalloc)339*4882a593Smuzhiyun bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun 	u32 alloced, prod, q_depth;
342*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
343*4882a593Smuzhiyun 	struct bnad_rx_unmap *unmap, *prev;
344*4882a593Smuzhiyun 	struct bna_rxq_entry *rxent;
345*4882a593Smuzhiyun 	struct page *page;
346*4882a593Smuzhiyun 	u32 page_offset, alloc_size;
347*4882a593Smuzhiyun 	dma_addr_t dma_addr;
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun 	prod = rcb->producer_index;
350*4882a593Smuzhiyun 	q_depth = rcb->q_depth;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	alloc_size = PAGE_SIZE << unmap_q->alloc_order;
353*4882a593Smuzhiyun 	alloced = 0;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	while (nalloc--) {
356*4882a593Smuzhiyun 		unmap = &unmap_q->unmap[prod];
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 		if (unmap_q->reuse_pi < 0) {
359*4882a593Smuzhiyun 			page = alloc_pages(GFP_ATOMIC | __GFP_COMP,
360*4882a593Smuzhiyun 					unmap_q->alloc_order);
361*4882a593Smuzhiyun 			page_offset = 0;
362*4882a593Smuzhiyun 		} else {
363*4882a593Smuzhiyun 			prev = &unmap_q->unmap[unmap_q->reuse_pi];
364*4882a593Smuzhiyun 			page = prev->page;
365*4882a593Smuzhiyun 			page_offset = prev->page_offset + unmap_q->map_size;
366*4882a593Smuzhiyun 			get_page(page);
367*4882a593Smuzhiyun 		}
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		if (unlikely(!page)) {
370*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
371*4882a593Smuzhiyun 			rcb->rxq->rxbuf_alloc_failed++;
372*4882a593Smuzhiyun 			goto finishing;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
376*4882a593Smuzhiyun 					unmap_q->map_size, DMA_FROM_DEVICE);
377*4882a593Smuzhiyun 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
378*4882a593Smuzhiyun 			put_page(page);
379*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
380*4882a593Smuzhiyun 			rcb->rxq->rxbuf_map_failed++;
381*4882a593Smuzhiyun 			goto finishing;
382*4882a593Smuzhiyun 		}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 		unmap->page = page;
385*4882a593Smuzhiyun 		unmap->page_offset = page_offset;
386*4882a593Smuzhiyun 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
387*4882a593Smuzhiyun 		unmap->vector.len = unmap_q->map_size;
388*4882a593Smuzhiyun 		page_offset += unmap_q->map_size;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 		if (page_offset < alloc_size)
391*4882a593Smuzhiyun 			unmap_q->reuse_pi = prod;
392*4882a593Smuzhiyun 		else
393*4882a593Smuzhiyun 			unmap_q->reuse_pi = -1;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
396*4882a593Smuzhiyun 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
397*4882a593Smuzhiyun 		BNA_QE_INDX_INC(prod, q_depth);
398*4882a593Smuzhiyun 		alloced++;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun finishing:
402*4882a593Smuzhiyun 	if (likely(alloced)) {
403*4882a593Smuzhiyun 		rcb->producer_index = prod;
404*4882a593Smuzhiyun 		smp_mb();
405*4882a593Smuzhiyun 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
406*4882a593Smuzhiyun 			bna_rxq_prod_indx_doorbell(rcb);
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	return alloced;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun static u32
bnad_rxq_refill_skb(struct bnad * bnad,struct bna_rcb * rcb,u32 nalloc)413*4882a593Smuzhiyun bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
414*4882a593Smuzhiyun {
415*4882a593Smuzhiyun 	u32 alloced, prod, q_depth, buff_sz;
416*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
417*4882a593Smuzhiyun 	struct bnad_rx_unmap *unmap;
418*4882a593Smuzhiyun 	struct bna_rxq_entry *rxent;
419*4882a593Smuzhiyun 	struct sk_buff *skb;
420*4882a593Smuzhiyun 	dma_addr_t dma_addr;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	buff_sz = rcb->rxq->buffer_size;
423*4882a593Smuzhiyun 	prod = rcb->producer_index;
424*4882a593Smuzhiyun 	q_depth = rcb->q_depth;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	alloced = 0;
427*4882a593Smuzhiyun 	while (nalloc--) {
428*4882a593Smuzhiyun 		unmap = &unmap_q->unmap[prod];
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 		skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 		if (unlikely(!skb)) {
433*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
434*4882a593Smuzhiyun 			rcb->rxq->rxbuf_alloc_failed++;
435*4882a593Smuzhiyun 			goto finishing;
436*4882a593Smuzhiyun 		}
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
439*4882a593Smuzhiyun 					  buff_sz, DMA_FROM_DEVICE);
440*4882a593Smuzhiyun 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
441*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
442*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
443*4882a593Smuzhiyun 			rcb->rxq->rxbuf_map_failed++;
444*4882a593Smuzhiyun 			goto finishing;
445*4882a593Smuzhiyun 		}
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 		unmap->skb = skb;
448*4882a593Smuzhiyun 		dma_unmap_addr_set(&unmap->vector, dma_addr, dma_addr);
449*4882a593Smuzhiyun 		unmap->vector.len = buff_sz;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		rxent = &((struct bna_rxq_entry *)rcb->sw_q)[prod];
452*4882a593Smuzhiyun 		BNA_SET_DMA_ADDR(dma_addr, &rxent->host_addr);
453*4882a593Smuzhiyun 		BNA_QE_INDX_INC(prod, q_depth);
454*4882a593Smuzhiyun 		alloced++;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun finishing:
458*4882a593Smuzhiyun 	if (likely(alloced)) {
459*4882a593Smuzhiyun 		rcb->producer_index = prod;
460*4882a593Smuzhiyun 		smp_mb();
461*4882a593Smuzhiyun 		if (likely(test_bit(BNAD_RXQ_POST_OK, &rcb->flags)))
462*4882a593Smuzhiyun 			bna_rxq_prod_indx_doorbell(rcb);
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	return alloced;
466*4882a593Smuzhiyun }
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun static inline void
bnad_rxq_post(struct bnad * bnad,struct bna_rcb * rcb)469*4882a593Smuzhiyun bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
470*4882a593Smuzhiyun {
471*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q = rcb->unmap_q;
472*4882a593Smuzhiyun 	u32 to_alloc;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	to_alloc = BNA_QE_FREE_CNT(rcb, rcb->q_depth);
475*4882a593Smuzhiyun 	if (!(to_alloc >> BNAD_RXQ_REFILL_THRESHOLD_SHIFT))
476*4882a593Smuzhiyun 		return;
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
479*4882a593Smuzhiyun 		bnad_rxq_refill_skb(bnad, rcb, to_alloc);
480*4882a593Smuzhiyun 	else
481*4882a593Smuzhiyun 		bnad_rxq_refill_page(bnad, rcb, to_alloc);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun #define flags_cksum_prot_mask (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
485*4882a593Smuzhiyun 					BNA_CQ_EF_IPV6 | \
486*4882a593Smuzhiyun 					BNA_CQ_EF_TCP | BNA_CQ_EF_UDP | \
487*4882a593Smuzhiyun 					BNA_CQ_EF_L4_CKSUM_OK)
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun #define flags_tcp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
490*4882a593Smuzhiyun 				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
491*4882a593Smuzhiyun #define flags_tcp6 (BNA_CQ_EF_IPV6 | \
492*4882a593Smuzhiyun 				BNA_CQ_EF_TCP | BNA_CQ_EF_L4_CKSUM_OK)
493*4882a593Smuzhiyun #define flags_udp4 (BNA_CQ_EF_IPV4 | BNA_CQ_EF_L3_CKSUM_OK | \
494*4882a593Smuzhiyun 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
495*4882a593Smuzhiyun #define flags_udp6 (BNA_CQ_EF_IPV6 | \
496*4882a593Smuzhiyun 				BNA_CQ_EF_UDP | BNA_CQ_EF_L4_CKSUM_OK)
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun static void
bnad_cq_drop_packet(struct bnad * bnad,struct bna_rcb * rcb,u32 sop_ci,u32 nvecs)499*4882a593Smuzhiyun bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
500*4882a593Smuzhiyun 		    u32 sop_ci, u32 nvecs)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q;
503*4882a593Smuzhiyun 	struct bnad_rx_unmap *unmap;
504*4882a593Smuzhiyun 	u32 ci, vec;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	unmap_q = rcb->unmap_q;
507*4882a593Smuzhiyun 	for (vec = 0, ci = sop_ci; vec < nvecs; vec++) {
508*4882a593Smuzhiyun 		unmap = &unmap_q->unmap[ci];
509*4882a593Smuzhiyun 		BNA_QE_INDX_INC(ci, rcb->q_depth);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
512*4882a593Smuzhiyun 			bnad_rxq_cleanup_skb(bnad, unmap);
513*4882a593Smuzhiyun 		else
514*4882a593Smuzhiyun 			bnad_rxq_cleanup_page(bnad, unmap);
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun static void
bnad_cq_setup_skb_frags(struct bna_ccb * ccb,struct sk_buff * skb,u32 nvecs)519*4882a593Smuzhiyun bnad_cq_setup_skb_frags(struct bna_ccb *ccb, struct sk_buff *skb, u32 nvecs)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun 	struct bna_rcb *rcb;
522*4882a593Smuzhiyun 	struct bnad *bnad;
523*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q;
524*4882a593Smuzhiyun 	struct bna_cq_entry *cq, *cmpl;
525*4882a593Smuzhiyun 	u32 ci, pi, totlen = 0;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	cq = ccb->sw_q;
528*4882a593Smuzhiyun 	pi = ccb->producer_index;
529*4882a593Smuzhiyun 	cmpl = &cq[pi];
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	rcb = bna_is_small_rxq(cmpl->rxq_id) ? ccb->rcb[1] : ccb->rcb[0];
532*4882a593Smuzhiyun 	unmap_q = rcb->unmap_q;
533*4882a593Smuzhiyun 	bnad = rcb->bnad;
534*4882a593Smuzhiyun 	ci = rcb->consumer_index;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	/* prefetch header */
537*4882a593Smuzhiyun 	prefetch(page_address(unmap_q->unmap[ci].page) +
538*4882a593Smuzhiyun 		 unmap_q->unmap[ci].page_offset);
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	while (nvecs--) {
541*4882a593Smuzhiyun 		struct bnad_rx_unmap *unmap;
542*4882a593Smuzhiyun 		u32 len;
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 		unmap = &unmap_q->unmap[ci];
545*4882a593Smuzhiyun 		BNA_QE_INDX_INC(ci, rcb->q_depth);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 		dma_unmap_page(&bnad->pcidev->dev,
548*4882a593Smuzhiyun 			       dma_unmap_addr(&unmap->vector, dma_addr),
549*4882a593Smuzhiyun 			       unmap->vector.len, DMA_FROM_DEVICE);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 		len = ntohs(cmpl->length);
552*4882a593Smuzhiyun 		skb->truesize += unmap->vector.len;
553*4882a593Smuzhiyun 		totlen += len;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
556*4882a593Smuzhiyun 				   unmap->page, unmap->page_offset, len);
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 		unmap->page = NULL;
559*4882a593Smuzhiyun 		unmap->vector.len = 0;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		BNA_QE_INDX_INC(pi, ccb->q_depth);
562*4882a593Smuzhiyun 		cmpl = &cq[pi];
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	skb->len += totlen;
566*4882a593Smuzhiyun 	skb->data_len += totlen;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun static inline void
bnad_cq_setup_skb(struct bnad * bnad,struct sk_buff * skb,struct bnad_rx_unmap * unmap,u32 len)570*4882a593Smuzhiyun bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
571*4882a593Smuzhiyun 		  struct bnad_rx_unmap *unmap, u32 len)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun 	prefetch(skb->data);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	dma_unmap_single(&bnad->pcidev->dev,
576*4882a593Smuzhiyun 			dma_unmap_addr(&unmap->vector, dma_addr),
577*4882a593Smuzhiyun 			unmap->vector.len, DMA_FROM_DEVICE);
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	skb_put(skb, len);
580*4882a593Smuzhiyun 	skb->protocol = eth_type_trans(skb, bnad->netdev);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	unmap->skb = NULL;
583*4882a593Smuzhiyun 	unmap->vector.len = 0;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun static u32
bnad_cq_process(struct bnad * bnad,struct bna_ccb * ccb,int budget)587*4882a593Smuzhiyun bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun 	struct bna_cq_entry *cq, *cmpl, *next_cmpl;
590*4882a593Smuzhiyun 	struct bna_rcb *rcb = NULL;
591*4882a593Smuzhiyun 	struct bnad_rx_unmap_q *unmap_q;
592*4882a593Smuzhiyun 	struct bnad_rx_unmap *unmap = NULL;
593*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
594*4882a593Smuzhiyun 	struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
595*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl = ccb->ctrl;
596*4882a593Smuzhiyun 	u32 packets = 0, len = 0, totlen = 0;
597*4882a593Smuzhiyun 	u32 pi, vec, sop_ci = 0, nvecs = 0;
598*4882a593Smuzhiyun 	u32 flags, masked_flags;
599*4882a593Smuzhiyun 
600*4882a593Smuzhiyun 	prefetch(bnad->netdev);
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	cq = ccb->sw_q;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	while (packets < budget) {
605*4882a593Smuzhiyun 		cmpl = &cq[ccb->producer_index];
606*4882a593Smuzhiyun 		if (!cmpl->valid)
607*4882a593Smuzhiyun 			break;
608*4882a593Smuzhiyun 		/* The 'valid' field is set by the adapter, only after writing
609*4882a593Smuzhiyun 		 * the other fields of completion entry. Hence, do not load
610*4882a593Smuzhiyun 		 * other fields of completion entry *before* the 'valid' is
611*4882a593Smuzhiyun 		 * loaded. Adding the rmb() here prevents the compiler and/or
612*4882a593Smuzhiyun 		 * CPU from reordering the reads which would potentially result
613*4882a593Smuzhiyun 		 * in reading stale values in completion entry.
614*4882a593Smuzhiyun 		 */
615*4882a593Smuzhiyun 		rmb();
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		BNA_UPDATE_PKT_CNT(pkt_rt, ntohs(cmpl->length));
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 		if (bna_is_small_rxq(cmpl->rxq_id))
620*4882a593Smuzhiyun 			rcb = ccb->rcb[1];
621*4882a593Smuzhiyun 		else
622*4882a593Smuzhiyun 			rcb = ccb->rcb[0];
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 		unmap_q = rcb->unmap_q;
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		/* start of packet ci */
627*4882a593Smuzhiyun 		sop_ci = rcb->consumer_index;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type)) {
630*4882a593Smuzhiyun 			unmap = &unmap_q->unmap[sop_ci];
631*4882a593Smuzhiyun 			skb = unmap->skb;
632*4882a593Smuzhiyun 		} else {
633*4882a593Smuzhiyun 			skb = napi_get_frags(&rx_ctrl->napi);
634*4882a593Smuzhiyun 			if (unlikely(!skb))
635*4882a593Smuzhiyun 				break;
636*4882a593Smuzhiyun 		}
637*4882a593Smuzhiyun 		prefetch(skb);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		flags = ntohl(cmpl->flags);
640*4882a593Smuzhiyun 		len = ntohs(cmpl->length);
641*4882a593Smuzhiyun 		totlen = len;
642*4882a593Smuzhiyun 		nvecs = 1;
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		/* Check all the completions for this frame.
645*4882a593Smuzhiyun 		 * busy-wait doesn't help much, break here.
646*4882a593Smuzhiyun 		 */
647*4882a593Smuzhiyun 		if (BNAD_RXBUF_IS_MULTI_BUFF(unmap_q->type) &&
648*4882a593Smuzhiyun 		    (flags & BNA_CQ_EF_EOP) == 0) {
649*4882a593Smuzhiyun 			pi = ccb->producer_index;
650*4882a593Smuzhiyun 			do {
651*4882a593Smuzhiyun 				BNA_QE_INDX_INC(pi, ccb->q_depth);
652*4882a593Smuzhiyun 				next_cmpl = &cq[pi];
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 				if (!next_cmpl->valid)
655*4882a593Smuzhiyun 					break;
656*4882a593Smuzhiyun 				/* The 'valid' field is set by the adapter, only
657*4882a593Smuzhiyun 				 * after writing the other fields of completion
658*4882a593Smuzhiyun 				 * entry. Hence, do not load other fields of
659*4882a593Smuzhiyun 				 * completion entry *before* the 'valid' is
660*4882a593Smuzhiyun 				 * loaded. Adding the rmb() here prevents the
661*4882a593Smuzhiyun 				 * compiler and/or CPU from reordering the reads
662*4882a593Smuzhiyun 				 * which would potentially result in reading
663*4882a593Smuzhiyun 				 * stale values in completion entry.
664*4882a593Smuzhiyun 				 */
665*4882a593Smuzhiyun 				rmb();
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 				len = ntohs(next_cmpl->length);
668*4882a593Smuzhiyun 				flags = ntohl(next_cmpl->flags);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 				nvecs++;
671*4882a593Smuzhiyun 				totlen += len;
672*4882a593Smuzhiyun 			} while ((flags & BNA_CQ_EF_EOP) == 0);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 			if (!next_cmpl->valid)
675*4882a593Smuzhiyun 				break;
676*4882a593Smuzhiyun 		}
677*4882a593Smuzhiyun 		packets++;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		/* TODO: BNA_CQ_EF_LOCAL ? */
680*4882a593Smuzhiyun 		if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
681*4882a593Smuzhiyun 						BNA_CQ_EF_FCS_ERROR |
682*4882a593Smuzhiyun 						BNA_CQ_EF_TOO_LONG))) {
683*4882a593Smuzhiyun 			bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
684*4882a593Smuzhiyun 			rcb->rxq->rx_packets_with_error++;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 			goto next;
687*4882a593Smuzhiyun 		}
688*4882a593Smuzhiyun 
689*4882a593Smuzhiyun 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
690*4882a593Smuzhiyun 			bnad_cq_setup_skb(bnad, skb, unmap, len);
691*4882a593Smuzhiyun 		else
692*4882a593Smuzhiyun 			bnad_cq_setup_skb_frags(ccb, skb, nvecs);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 		rcb->rxq->rx_packets++;
695*4882a593Smuzhiyun 		rcb->rxq->rx_bytes += totlen;
696*4882a593Smuzhiyun 		ccb->bytes_per_intr += totlen;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 		masked_flags = flags & flags_cksum_prot_mask;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 		if (likely
701*4882a593Smuzhiyun 		    ((bnad->netdev->features & NETIF_F_RXCSUM) &&
702*4882a593Smuzhiyun 		     ((masked_flags == flags_tcp4) ||
703*4882a593Smuzhiyun 		      (masked_flags == flags_udp4) ||
704*4882a593Smuzhiyun 		      (masked_flags == flags_tcp6) ||
705*4882a593Smuzhiyun 		      (masked_flags == flags_udp6))))
706*4882a593Smuzhiyun 			skb->ip_summed = CHECKSUM_UNNECESSARY;
707*4882a593Smuzhiyun 		else
708*4882a593Smuzhiyun 			skb_checksum_none_assert(skb);
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		if ((flags & BNA_CQ_EF_VLAN) &&
711*4882a593Smuzhiyun 		    (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
712*4882a593Smuzhiyun 			__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 		if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
715*4882a593Smuzhiyun 			netif_receive_skb(skb);
716*4882a593Smuzhiyun 		else
717*4882a593Smuzhiyun 			napi_gro_frags(&rx_ctrl->napi);
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun next:
720*4882a593Smuzhiyun 		BNA_QE_INDX_ADD(rcb->consumer_index, nvecs, rcb->q_depth);
721*4882a593Smuzhiyun 		for (vec = 0; vec < nvecs; vec++) {
722*4882a593Smuzhiyun 			cmpl = &cq[ccb->producer_index];
723*4882a593Smuzhiyun 			cmpl->valid = 0;
724*4882a593Smuzhiyun 			BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth);
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	napi_gro_flush(&rx_ctrl->napi, false);
729*4882a593Smuzhiyun 	if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
730*4882a593Smuzhiyun 		bna_ib_ack_disable_irq(ccb->i_dbell, packets);
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	bnad_rxq_post(bnad, ccb->rcb[0]);
733*4882a593Smuzhiyun 	if (ccb->rcb[1])
734*4882a593Smuzhiyun 		bnad_rxq_post(bnad, ccb->rcb[1]);
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	return packets;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun static void
bnad_netif_rx_schedule_poll(struct bnad * bnad,struct bna_ccb * ccb)740*4882a593Smuzhiyun bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
743*4882a593Smuzhiyun 	struct napi_struct *napi = &rx_ctrl->napi;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	if (likely(napi_schedule_prep(napi))) {
746*4882a593Smuzhiyun 		__napi_schedule(napi);
747*4882a593Smuzhiyun 		rx_ctrl->rx_schedule++;
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun /* MSIX Rx Path Handler */
752*4882a593Smuzhiyun static irqreturn_t
bnad_msix_rx(int irq,void * data)753*4882a593Smuzhiyun bnad_msix_rx(int irq, void *data)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	struct bna_ccb *ccb = (struct bna_ccb *)data;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	if (ccb) {
758*4882a593Smuzhiyun 		((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++;
759*4882a593Smuzhiyun 		bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
760*4882a593Smuzhiyun 	}
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	return IRQ_HANDLED;
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun /* Interrupt handlers */
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun /* Mbox Interrupt Handlers */
768*4882a593Smuzhiyun static irqreturn_t
bnad_msix_mbox_handler(int irq,void * data)769*4882a593Smuzhiyun bnad_msix_mbox_handler(int irq, void *data)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun 	u32 intr_status;
772*4882a593Smuzhiyun 	unsigned long flags;
773*4882a593Smuzhiyun 	struct bnad *bnad = (struct bnad *)data;
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
776*4882a593Smuzhiyun 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
778*4882a593Smuzhiyun 		return IRQ_HANDLED;
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 	bna_intr_status_get(&bnad->bna, intr_status);
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784*4882a593Smuzhiyun 		bna_mbox_handler(&bnad->bna, intr_status);
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	return IRQ_HANDLED;
789*4882a593Smuzhiyun }
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun static irqreturn_t
bnad_isr(int irq,void * data)792*4882a593Smuzhiyun bnad_isr(int irq, void *data)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	int i, j;
795*4882a593Smuzhiyun 	u32 intr_status;
796*4882a593Smuzhiyun 	unsigned long flags;
797*4882a593Smuzhiyun 	struct bnad *bnad = (struct bnad *)data;
798*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info;
799*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
800*4882a593Smuzhiyun 	struct bna_tcb *tcb = NULL;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
803*4882a593Smuzhiyun 	if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
805*4882a593Smuzhiyun 		return IRQ_NONE;
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	bna_intr_status_get(&bnad->bna, intr_status);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	if (unlikely(!intr_status)) {
811*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
812*4882a593Smuzhiyun 		return IRQ_NONE;
813*4882a593Smuzhiyun 	}
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816*4882a593Smuzhiyun 		bna_mbox_handler(&bnad->bna, intr_status);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	if (!BNA_IS_INTX_DATA_INTR(intr_status))
821*4882a593Smuzhiyun 		return IRQ_HANDLED;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	/* Process data interrupts */
824*4882a593Smuzhiyun 	/* Tx processing */
825*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_tx; i++) {
826*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
827*4882a593Smuzhiyun 			tcb = bnad->tx_info[i].tcb[j];
828*4882a593Smuzhiyun 			if (tcb && test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
829*4882a593Smuzhiyun 				bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
830*4882a593Smuzhiyun 		}
831*4882a593Smuzhiyun 	}
832*4882a593Smuzhiyun 	/* Rx processing */
833*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
834*4882a593Smuzhiyun 		rx_info = &bnad->rx_info[i];
835*4882a593Smuzhiyun 		if (!rx_info->rx)
836*4882a593Smuzhiyun 			continue;
837*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
838*4882a593Smuzhiyun 			rx_ctrl = &rx_info->rx_ctrl[j];
839*4882a593Smuzhiyun 			if (rx_ctrl->ccb)
840*4882a593Smuzhiyun 				bnad_netif_rx_schedule_poll(bnad,
841*4882a593Smuzhiyun 							    rx_ctrl->ccb);
842*4882a593Smuzhiyun 		}
843*4882a593Smuzhiyun 	}
844*4882a593Smuzhiyun 	return IRQ_HANDLED;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun /*
848*4882a593Smuzhiyun  * Called in interrupt / callback context
849*4882a593Smuzhiyun  * with bna_lock held, so cfg_flags access is OK
850*4882a593Smuzhiyun  */
851*4882a593Smuzhiyun static void
bnad_enable_mbox_irq(struct bnad * bnad)852*4882a593Smuzhiyun bnad_enable_mbox_irq(struct bnad *bnad)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun 	clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun  * Called with bnad->bna_lock held b'cos of
861*4882a593Smuzhiyun  * bnad->cfg_flags access.
862*4882a593Smuzhiyun  */
863*4882a593Smuzhiyun static void
bnad_disable_mbox_irq(struct bnad * bnad)864*4882a593Smuzhiyun bnad_disable_mbox_irq(struct bnad *bnad)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun static void
bnad_set_netdev_perm_addr(struct bnad * bnad)872*4882a593Smuzhiyun bnad_set_netdev_perm_addr(struct bnad *bnad)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun 	struct net_device *netdev = bnad->netdev;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
877*4882a593Smuzhiyun 	if (is_zero_ether_addr(netdev->dev_addr))
878*4882a593Smuzhiyun 		ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
879*4882a593Smuzhiyun }
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun /* Control Path Handlers */
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun /* Callbacks */
884*4882a593Smuzhiyun void
bnad_cb_mbox_intr_enable(struct bnad * bnad)885*4882a593Smuzhiyun bnad_cb_mbox_intr_enable(struct bnad *bnad)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun 	bnad_enable_mbox_irq(bnad);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun 
890*4882a593Smuzhiyun void
bnad_cb_mbox_intr_disable(struct bnad * bnad)891*4882a593Smuzhiyun bnad_cb_mbox_intr_disable(struct bnad *bnad)
892*4882a593Smuzhiyun {
893*4882a593Smuzhiyun 	bnad_disable_mbox_irq(bnad);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun void
bnad_cb_ioceth_ready(struct bnad * bnad)897*4882a593Smuzhiyun bnad_cb_ioceth_ready(struct bnad *bnad)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.ioc_comp);
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun void
bnad_cb_ioceth_failed(struct bnad * bnad)904*4882a593Smuzhiyun bnad_cb_ioceth_failed(struct bnad *bnad)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.ioc_comp);
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun void
bnad_cb_ioceth_disabled(struct bnad * bnad)911*4882a593Smuzhiyun bnad_cb_ioceth_disabled(struct bnad *bnad)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.ioc_comp);
915*4882a593Smuzhiyun }
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun static void
bnad_cb_enet_disabled(void * arg)918*4882a593Smuzhiyun bnad_cb_enet_disabled(void *arg)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun 	struct bnad *bnad = (struct bnad *)arg;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	netif_carrier_off(bnad->netdev);
923*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.enet_comp);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun void
bnad_cb_ethport_link_status(struct bnad * bnad,enum bna_link_status link_status)927*4882a593Smuzhiyun bnad_cb_ethport_link_status(struct bnad *bnad,
928*4882a593Smuzhiyun 			enum bna_link_status link_status)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun 	bool link_up = false;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	link_up = (link_status == BNA_LINK_UP) || (link_status == BNA_CEE_UP);
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	if (link_status == BNA_CEE_UP) {
935*4882a593Smuzhiyun 		if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, cee_toggle);
937*4882a593Smuzhiyun 		set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
938*4882a593Smuzhiyun 	} else {
939*4882a593Smuzhiyun 		if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, cee_toggle);
941*4882a593Smuzhiyun 		clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if (link_up) {
945*4882a593Smuzhiyun 		if (!netif_carrier_ok(bnad->netdev)) {
946*4882a593Smuzhiyun 			uint tx_id, tcb_id;
947*4882a593Smuzhiyun 			netdev_info(bnad->netdev, "link up\n");
948*4882a593Smuzhiyun 			netif_carrier_on(bnad->netdev);
949*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, link_toggle);
950*4882a593Smuzhiyun 			for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951*4882a593Smuzhiyun 				for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
952*4882a593Smuzhiyun 				      tcb_id++) {
953*4882a593Smuzhiyun 					struct bna_tcb *tcb =
954*4882a593Smuzhiyun 					bnad->tx_info[tx_id].tcb[tcb_id];
955*4882a593Smuzhiyun 					u32 txq_id;
956*4882a593Smuzhiyun 					if (!tcb)
957*4882a593Smuzhiyun 						continue;
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 					txq_id = tcb->id;
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 					if (test_bit(BNAD_TXQ_TX_STARTED,
962*4882a593Smuzhiyun 						     &tcb->flags)) {
963*4882a593Smuzhiyun 						/*
964*4882a593Smuzhiyun 						 * Force an immediate
965*4882a593Smuzhiyun 						 * Transmit Schedule */
966*4882a593Smuzhiyun 						netif_wake_subqueue(
967*4882a593Smuzhiyun 								bnad->netdev,
968*4882a593Smuzhiyun 								txq_id);
969*4882a593Smuzhiyun 						BNAD_UPDATE_CTR(bnad,
970*4882a593Smuzhiyun 							netif_queue_wakeup);
971*4882a593Smuzhiyun 					} else {
972*4882a593Smuzhiyun 						netif_stop_subqueue(
973*4882a593Smuzhiyun 								bnad->netdev,
974*4882a593Smuzhiyun 								txq_id);
975*4882a593Smuzhiyun 						BNAD_UPDATE_CTR(bnad,
976*4882a593Smuzhiyun 							netif_queue_stop);
977*4882a593Smuzhiyun 					}
978*4882a593Smuzhiyun 				}
979*4882a593Smuzhiyun 			}
980*4882a593Smuzhiyun 		}
981*4882a593Smuzhiyun 	} else {
982*4882a593Smuzhiyun 		if (netif_carrier_ok(bnad->netdev)) {
983*4882a593Smuzhiyun 			netdev_info(bnad->netdev, "link down\n");
984*4882a593Smuzhiyun 			netif_carrier_off(bnad->netdev);
985*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, link_toggle);
986*4882a593Smuzhiyun 		}
987*4882a593Smuzhiyun 	}
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun static void
bnad_cb_tx_disabled(void * arg,struct bna_tx * tx)991*4882a593Smuzhiyun bnad_cb_tx_disabled(void *arg, struct bna_tx *tx)
992*4882a593Smuzhiyun {
993*4882a593Smuzhiyun 	struct bnad *bnad = (struct bnad *)arg;
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.tx_comp);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun static void
bnad_cb_tcb_setup(struct bnad * bnad,struct bna_tcb * tcb)999*4882a593Smuzhiyun bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info =
1002*4882a593Smuzhiyun 			(struct bnad_tx_info *)tcb->txq->tx->priv;
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun 	tcb->priv = tcb;
1005*4882a593Smuzhiyun 	tx_info->tcb[tcb->id] = tcb;
1006*4882a593Smuzhiyun }
1007*4882a593Smuzhiyun 
1008*4882a593Smuzhiyun static void
bnad_cb_tcb_destroy(struct bnad * bnad,struct bna_tcb * tcb)1009*4882a593Smuzhiyun bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info =
1012*4882a593Smuzhiyun 			(struct bnad_tx_info *)tcb->txq->tx->priv;
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun 	tx_info->tcb[tcb->id] = NULL;
1015*4882a593Smuzhiyun 	tcb->priv = NULL;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun static void
bnad_cb_ccb_setup(struct bnad * bnad,struct bna_ccb * ccb)1019*4882a593Smuzhiyun bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info =
1022*4882a593Smuzhiyun 			(struct bnad_rx_info *)ccb->cq->rx->priv;
1023*4882a593Smuzhiyun 
1024*4882a593Smuzhiyun 	rx_info->rx_ctrl[ccb->id].ccb = ccb;
1025*4882a593Smuzhiyun 	ccb->ctrl = &rx_info->rx_ctrl[ccb->id];
1026*4882a593Smuzhiyun }
1027*4882a593Smuzhiyun 
1028*4882a593Smuzhiyun static void
bnad_cb_ccb_destroy(struct bnad * bnad,struct bna_ccb * ccb)1029*4882a593Smuzhiyun bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1030*4882a593Smuzhiyun {
1031*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info =
1032*4882a593Smuzhiyun 			(struct bnad_rx_info *)ccb->cq->rx->priv;
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	rx_info->rx_ctrl[ccb->id].ccb = NULL;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun static void
bnad_cb_tx_stall(struct bnad * bnad,struct bna_tx * tx)1038*4882a593Smuzhiyun bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1039*4882a593Smuzhiyun {
1040*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info =
1041*4882a593Smuzhiyun 			(struct bnad_tx_info *)tx->priv;
1042*4882a593Smuzhiyun 	struct bna_tcb *tcb;
1043*4882a593Smuzhiyun 	u32 txq_id;
1044*4882a593Smuzhiyun 	int i;
1045*4882a593Smuzhiyun 
1046*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1047*4882a593Smuzhiyun 		tcb = tx_info->tcb[i];
1048*4882a593Smuzhiyun 		if (!tcb)
1049*4882a593Smuzhiyun 			continue;
1050*4882a593Smuzhiyun 		txq_id = tcb->id;
1051*4882a593Smuzhiyun 		clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1052*4882a593Smuzhiyun 		netif_stop_subqueue(bnad->netdev, txq_id);
1053*4882a593Smuzhiyun 	}
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun 
1056*4882a593Smuzhiyun static void
bnad_cb_tx_resume(struct bnad * bnad,struct bna_tx * tx)1057*4882a593Smuzhiyun bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1058*4882a593Smuzhiyun {
1059*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1060*4882a593Smuzhiyun 	struct bna_tcb *tcb;
1061*4882a593Smuzhiyun 	u32 txq_id;
1062*4882a593Smuzhiyun 	int i;
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1065*4882a593Smuzhiyun 		tcb = tx_info->tcb[i];
1066*4882a593Smuzhiyun 		if (!tcb)
1067*4882a593Smuzhiyun 			continue;
1068*4882a593Smuzhiyun 		txq_id = tcb->id;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun 		BUG_ON(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags));
1071*4882a593Smuzhiyun 		set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
1072*4882a593Smuzhiyun 		BUG_ON(*(tcb->hw_consumer_index) != 0);
1073*4882a593Smuzhiyun 
1074*4882a593Smuzhiyun 		if (netif_carrier_ok(bnad->netdev)) {
1075*4882a593Smuzhiyun 			netif_wake_subqueue(bnad->netdev, txq_id);
1076*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1077*4882a593Smuzhiyun 		}
1078*4882a593Smuzhiyun 	}
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	/*
1081*4882a593Smuzhiyun 	 * Workaround for first ioceth enable failure & we
1082*4882a593Smuzhiyun 	 * get a 0 MAC address. We try to get the MAC address
1083*4882a593Smuzhiyun 	 * again here.
1084*4882a593Smuzhiyun 	 */
1085*4882a593Smuzhiyun 	if (is_zero_ether_addr(bnad->perm_addr)) {
1086*4882a593Smuzhiyun 		bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087*4882a593Smuzhiyun 		bnad_set_netdev_perm_addr(bnad);
1088*4882a593Smuzhiyun 	}
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun /*
1092*4882a593Smuzhiyun  * Free all TxQs buffers and then notify TX_E_CLEANUP_DONE to Tx fsm.
1093*4882a593Smuzhiyun  */
1094*4882a593Smuzhiyun static void
bnad_tx_cleanup(struct delayed_work * work)1095*4882a593Smuzhiyun bnad_tx_cleanup(struct delayed_work *work)
1096*4882a593Smuzhiyun {
1097*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info =
1098*4882a593Smuzhiyun 		container_of(work, struct bnad_tx_info, tx_cleanup_work);
1099*4882a593Smuzhiyun 	struct bnad *bnad = NULL;
1100*4882a593Smuzhiyun 	struct bna_tcb *tcb;
1101*4882a593Smuzhiyun 	unsigned long flags;
1102*4882a593Smuzhiyun 	u32 i, pending = 0;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1105*4882a593Smuzhiyun 		tcb = tx_info->tcb[i];
1106*4882a593Smuzhiyun 		if (!tcb)
1107*4882a593Smuzhiyun 			continue;
1108*4882a593Smuzhiyun 
1109*4882a593Smuzhiyun 		bnad = tcb->bnad;
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 		if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
1112*4882a593Smuzhiyun 			pending++;
1113*4882a593Smuzhiyun 			continue;
1114*4882a593Smuzhiyun 		}
1115*4882a593Smuzhiyun 
1116*4882a593Smuzhiyun 		bnad_txq_cleanup(bnad, tcb);
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 		smp_mb__before_atomic();
1119*4882a593Smuzhiyun 		clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
1120*4882a593Smuzhiyun 	}
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 	if (pending) {
1123*4882a593Smuzhiyun 		queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1124*4882a593Smuzhiyun 			msecs_to_jiffies(1));
1125*4882a593Smuzhiyun 		return;
1126*4882a593Smuzhiyun 	}
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1129*4882a593Smuzhiyun 	bna_tx_cleanup_complete(tx_info->tx);
1130*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun static void
bnad_cb_tx_cleanup(struct bnad * bnad,struct bna_tx * tx)1134*4882a593Smuzhiyun bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1135*4882a593Smuzhiyun {
1136*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info = (struct bnad_tx_info *)tx->priv;
1137*4882a593Smuzhiyun 	struct bna_tcb *tcb;
1138*4882a593Smuzhiyun 	int i;
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_TXQ_PER_TX; i++) {
1141*4882a593Smuzhiyun 		tcb = tx_info->tcb[i];
1142*4882a593Smuzhiyun 		if (!tcb)
1143*4882a593Smuzhiyun 			continue;
1144*4882a593Smuzhiyun 	}
1145*4882a593Smuzhiyun 
1146*4882a593Smuzhiyun 	queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1147*4882a593Smuzhiyun }
1148*4882a593Smuzhiyun 
1149*4882a593Smuzhiyun static void
bnad_cb_rx_stall(struct bnad * bnad,struct bna_rx * rx)1150*4882a593Smuzhiyun bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1153*4882a593Smuzhiyun 	struct bna_ccb *ccb;
1154*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
1155*4882a593Smuzhiyun 	int i;
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1158*4882a593Smuzhiyun 		rx_ctrl = &rx_info->rx_ctrl[i];
1159*4882a593Smuzhiyun 		ccb = rx_ctrl->ccb;
1160*4882a593Smuzhiyun 		if (!ccb)
1161*4882a593Smuzhiyun 			continue;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 		clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[0]->flags);
1164*4882a593Smuzhiyun 
1165*4882a593Smuzhiyun 		if (ccb->rcb[1])
1166*4882a593Smuzhiyun 			clear_bit(BNAD_RXQ_POST_OK, &ccb->rcb[1]->flags);
1167*4882a593Smuzhiyun 	}
1168*4882a593Smuzhiyun }
1169*4882a593Smuzhiyun 
1170*4882a593Smuzhiyun /*
1171*4882a593Smuzhiyun  * Free all RxQs buffers and then notify RX_E_CLEANUP_DONE to Rx fsm.
1172*4882a593Smuzhiyun  */
1173*4882a593Smuzhiyun static void
bnad_rx_cleanup(void * work)1174*4882a593Smuzhiyun bnad_rx_cleanup(void *work)
1175*4882a593Smuzhiyun {
1176*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info =
1177*4882a593Smuzhiyun 		container_of(work, struct bnad_rx_info, rx_cleanup_work);
1178*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
1179*4882a593Smuzhiyun 	struct bnad *bnad = NULL;
1180*4882a593Smuzhiyun 	unsigned long flags;
1181*4882a593Smuzhiyun 	u32 i;
1182*4882a593Smuzhiyun 
1183*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1184*4882a593Smuzhiyun 		rx_ctrl = &rx_info->rx_ctrl[i];
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 		if (!rx_ctrl->ccb)
1187*4882a593Smuzhiyun 			continue;
1188*4882a593Smuzhiyun 
1189*4882a593Smuzhiyun 		bnad = rx_ctrl->ccb->bnad;
1190*4882a593Smuzhiyun 
1191*4882a593Smuzhiyun 		/*
1192*4882a593Smuzhiyun 		 * Wait till the poll handler has exited
1193*4882a593Smuzhiyun 		 * and nothing can be scheduled anymore
1194*4882a593Smuzhiyun 		 */
1195*4882a593Smuzhiyun 		napi_disable(&rx_ctrl->napi);
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 		bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198*4882a593Smuzhiyun 		bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199*4882a593Smuzhiyun 		if (rx_ctrl->ccb->rcb[1])
1200*4882a593Smuzhiyun 			bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1204*4882a593Smuzhiyun 	bna_rx_cleanup_complete(rx_info->rx);
1205*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
1208*4882a593Smuzhiyun static void
bnad_cb_rx_cleanup(struct bnad * bnad,struct bna_rx * rx)1209*4882a593Smuzhiyun bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1210*4882a593Smuzhiyun {
1211*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1212*4882a593Smuzhiyun 	struct bna_ccb *ccb;
1213*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
1214*4882a593Smuzhiyun 	int i;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1217*4882a593Smuzhiyun 		rx_ctrl = &rx_info->rx_ctrl[i];
1218*4882a593Smuzhiyun 		ccb = rx_ctrl->ccb;
1219*4882a593Smuzhiyun 		if (!ccb)
1220*4882a593Smuzhiyun 			continue;
1221*4882a593Smuzhiyun 
1222*4882a593Smuzhiyun 		clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun 		if (ccb->rcb[1])
1225*4882a593Smuzhiyun 			clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
1226*4882a593Smuzhiyun 	}
1227*4882a593Smuzhiyun 
1228*4882a593Smuzhiyun 	queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun 
1231*4882a593Smuzhiyun static void
bnad_cb_rx_post(struct bnad * bnad,struct bna_rx * rx)1232*4882a593Smuzhiyun bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1233*4882a593Smuzhiyun {
1234*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = (struct bnad_rx_info *)rx->priv;
1235*4882a593Smuzhiyun 	struct bna_ccb *ccb;
1236*4882a593Smuzhiyun 	struct bna_rcb *rcb;
1237*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
1238*4882a593Smuzhiyun 	int i, j;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	for (i = 0; i < BNAD_MAX_RXP_PER_RX; i++) {
1241*4882a593Smuzhiyun 		rx_ctrl = &rx_info->rx_ctrl[i];
1242*4882a593Smuzhiyun 		ccb = rx_ctrl->ccb;
1243*4882a593Smuzhiyun 		if (!ccb)
1244*4882a593Smuzhiyun 			continue;
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 		napi_enable(&rx_ctrl->napi);
1247*4882a593Smuzhiyun 
1248*4882a593Smuzhiyun 		for (j = 0; j < BNAD_MAX_RXQ_PER_RXP; j++) {
1249*4882a593Smuzhiyun 			rcb = ccb->rcb[j];
1250*4882a593Smuzhiyun 			if (!rcb)
1251*4882a593Smuzhiyun 				continue;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 			bnad_rxq_alloc_init(bnad, rcb);
1254*4882a593Smuzhiyun 			set_bit(BNAD_RXQ_STARTED, &rcb->flags);
1255*4882a593Smuzhiyun 			set_bit(BNAD_RXQ_POST_OK, &rcb->flags);
1256*4882a593Smuzhiyun 			bnad_rxq_post(bnad, rcb);
1257*4882a593Smuzhiyun 		}
1258*4882a593Smuzhiyun 	}
1259*4882a593Smuzhiyun }
1260*4882a593Smuzhiyun 
1261*4882a593Smuzhiyun static void
bnad_cb_rx_disabled(void * arg,struct bna_rx * rx)1262*4882a593Smuzhiyun bnad_cb_rx_disabled(void *arg, struct bna_rx *rx)
1263*4882a593Smuzhiyun {
1264*4882a593Smuzhiyun 	struct bnad *bnad = (struct bnad *)arg;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.rx_comp);
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun 
1269*4882a593Smuzhiyun static void
bnad_cb_rx_mcast_add(struct bnad * bnad,struct bna_rx * rx)1270*4882a593Smuzhiyun bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun 	bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.mcast_comp);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun void
bnad_cb_stats_get(struct bnad * bnad,enum bna_cb_status status,struct bna_stats * stats)1277*4882a593Smuzhiyun bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1278*4882a593Smuzhiyun 		       struct bna_stats *stats)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun 	if (status == BNA_CB_SUCCESS)
1281*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282*4882a593Smuzhiyun 
1283*4882a593Smuzhiyun 	if (!netif_running(bnad->netdev) ||
1284*4882a593Smuzhiyun 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1285*4882a593Smuzhiyun 		return;
1286*4882a593Smuzhiyun 
1287*4882a593Smuzhiyun 	mod_timer(&bnad->stats_timer,
1288*4882a593Smuzhiyun 		  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1289*4882a593Smuzhiyun }
1290*4882a593Smuzhiyun 
1291*4882a593Smuzhiyun static void
bnad_cb_enet_mtu_set(struct bnad * bnad)1292*4882a593Smuzhiyun bnad_cb_enet_mtu_set(struct bnad *bnad)
1293*4882a593Smuzhiyun {
1294*4882a593Smuzhiyun 	bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295*4882a593Smuzhiyun 	complete(&bnad->bnad_completions.mtu_comp);
1296*4882a593Smuzhiyun }
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun void
bnad_cb_completion(void * arg,enum bfa_status status)1299*4882a593Smuzhiyun bnad_cb_completion(void *arg, enum bfa_status status)
1300*4882a593Smuzhiyun {
1301*4882a593Smuzhiyun 	struct bnad_iocmd_comp *iocmd_comp =
1302*4882a593Smuzhiyun 			(struct bnad_iocmd_comp *)arg;
1303*4882a593Smuzhiyun 
1304*4882a593Smuzhiyun 	iocmd_comp->comp_status = (u32) status;
1305*4882a593Smuzhiyun 	complete(&iocmd_comp->comp);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun /* Resource allocation, free functions */
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun static void
bnad_mem_free(struct bnad * bnad,struct bna_mem_info * mem_info)1311*4882a593Smuzhiyun bnad_mem_free(struct bnad *bnad,
1312*4882a593Smuzhiyun 	      struct bna_mem_info *mem_info)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun 	int i;
1315*4882a593Smuzhiyun 	dma_addr_t dma_pa;
1316*4882a593Smuzhiyun 
1317*4882a593Smuzhiyun 	if (mem_info->mdl == NULL)
1318*4882a593Smuzhiyun 		return;
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	for (i = 0; i < mem_info->num; i++) {
1321*4882a593Smuzhiyun 		if (mem_info->mdl[i].kva != NULL) {
1322*4882a593Smuzhiyun 			if (mem_info->mem_type == BNA_MEM_T_DMA) {
1323*4882a593Smuzhiyun 				BNA_GET_DMA_ADDR(&(mem_info->mdl[i].dma),
1324*4882a593Smuzhiyun 						dma_pa);
1325*4882a593Smuzhiyun 				dma_free_coherent(&bnad->pcidev->dev,
1326*4882a593Smuzhiyun 						  mem_info->mdl[i].len,
1327*4882a593Smuzhiyun 						  mem_info->mdl[i].kva, dma_pa);
1328*4882a593Smuzhiyun 			} else
1329*4882a593Smuzhiyun 				kfree(mem_info->mdl[i].kva);
1330*4882a593Smuzhiyun 		}
1331*4882a593Smuzhiyun 	}
1332*4882a593Smuzhiyun 	kfree(mem_info->mdl);
1333*4882a593Smuzhiyun 	mem_info->mdl = NULL;
1334*4882a593Smuzhiyun }
1335*4882a593Smuzhiyun 
1336*4882a593Smuzhiyun static int
bnad_mem_alloc(struct bnad * bnad,struct bna_mem_info * mem_info)1337*4882a593Smuzhiyun bnad_mem_alloc(struct bnad *bnad,
1338*4882a593Smuzhiyun 	       struct bna_mem_info *mem_info)
1339*4882a593Smuzhiyun {
1340*4882a593Smuzhiyun 	int i;
1341*4882a593Smuzhiyun 	dma_addr_t dma_pa;
1342*4882a593Smuzhiyun 
1343*4882a593Smuzhiyun 	if ((mem_info->num == 0) || (mem_info->len == 0)) {
1344*4882a593Smuzhiyun 		mem_info->mdl = NULL;
1345*4882a593Smuzhiyun 		return 0;
1346*4882a593Smuzhiyun 	}
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	mem_info->mdl = kcalloc(mem_info->num, sizeof(struct bna_mem_descr),
1349*4882a593Smuzhiyun 				GFP_KERNEL);
1350*4882a593Smuzhiyun 	if (mem_info->mdl == NULL)
1351*4882a593Smuzhiyun 		return -ENOMEM;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	if (mem_info->mem_type == BNA_MEM_T_DMA) {
1354*4882a593Smuzhiyun 		for (i = 0; i < mem_info->num; i++) {
1355*4882a593Smuzhiyun 			mem_info->mdl[i].len = mem_info->len;
1356*4882a593Smuzhiyun 			mem_info->mdl[i].kva =
1357*4882a593Smuzhiyun 				dma_alloc_coherent(&bnad->pcidev->dev,
1358*4882a593Smuzhiyun 						   mem_info->len, &dma_pa,
1359*4882a593Smuzhiyun 						   GFP_KERNEL);
1360*4882a593Smuzhiyun 			if (mem_info->mdl[i].kva == NULL)
1361*4882a593Smuzhiyun 				goto err_return;
1362*4882a593Smuzhiyun 
1363*4882a593Smuzhiyun 			BNA_SET_DMA_ADDR(dma_pa,
1364*4882a593Smuzhiyun 					 &(mem_info->mdl[i].dma));
1365*4882a593Smuzhiyun 		}
1366*4882a593Smuzhiyun 	} else {
1367*4882a593Smuzhiyun 		for (i = 0; i < mem_info->num; i++) {
1368*4882a593Smuzhiyun 			mem_info->mdl[i].len = mem_info->len;
1369*4882a593Smuzhiyun 			mem_info->mdl[i].kva = kzalloc(mem_info->len,
1370*4882a593Smuzhiyun 							GFP_KERNEL);
1371*4882a593Smuzhiyun 			if (mem_info->mdl[i].kva == NULL)
1372*4882a593Smuzhiyun 				goto err_return;
1373*4882a593Smuzhiyun 		}
1374*4882a593Smuzhiyun 	}
1375*4882a593Smuzhiyun 
1376*4882a593Smuzhiyun 	return 0;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun err_return:
1379*4882a593Smuzhiyun 	bnad_mem_free(bnad, mem_info);
1380*4882a593Smuzhiyun 	return -ENOMEM;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun 
1383*4882a593Smuzhiyun /* Free IRQ for Mailbox */
1384*4882a593Smuzhiyun static void
bnad_mbox_irq_free(struct bnad * bnad)1385*4882a593Smuzhiyun bnad_mbox_irq_free(struct bnad *bnad)
1386*4882a593Smuzhiyun {
1387*4882a593Smuzhiyun 	int irq;
1388*4882a593Smuzhiyun 	unsigned long flags;
1389*4882a593Smuzhiyun 
1390*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1391*4882a593Smuzhiyun 	bnad_disable_mbox_irq(bnad);
1392*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	irq = BNAD_GET_MBOX_IRQ(bnad);
1395*4882a593Smuzhiyun 	free_irq(irq, bnad);
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun 
1398*4882a593Smuzhiyun /*
1399*4882a593Smuzhiyun  * Allocates IRQ for Mailbox, but keep it disabled
1400*4882a593Smuzhiyun  * This will be enabled once we get the mbox enable callback
1401*4882a593Smuzhiyun  * from bna
1402*4882a593Smuzhiyun  */
1403*4882a593Smuzhiyun static int
bnad_mbox_irq_alloc(struct bnad * bnad)1404*4882a593Smuzhiyun bnad_mbox_irq_alloc(struct bnad *bnad)
1405*4882a593Smuzhiyun {
1406*4882a593Smuzhiyun 	int		err = 0;
1407*4882a593Smuzhiyun 	unsigned long	irq_flags, flags;
1408*4882a593Smuzhiyun 	u32	irq;
1409*4882a593Smuzhiyun 	irq_handler_t	irq_handler;
1410*4882a593Smuzhiyun 
1411*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1412*4882a593Smuzhiyun 	if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413*4882a593Smuzhiyun 		irq_handler = (irq_handler_t)bnad_msix_mbox_handler;
1414*4882a593Smuzhiyun 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1415*4882a593Smuzhiyun 		irq_flags = 0;
1416*4882a593Smuzhiyun 	} else {
1417*4882a593Smuzhiyun 		irq_handler = (irq_handler_t)bnad_isr;
1418*4882a593Smuzhiyun 		irq = bnad->pcidev->irq;
1419*4882a593Smuzhiyun 		irq_flags = IRQF_SHARED;
1420*4882a593Smuzhiyun 	}
1421*4882a593Smuzhiyun 
1422*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423*4882a593Smuzhiyun 	sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	/*
1426*4882a593Smuzhiyun 	 * Set the Mbox IRQ disable flag, so that the IRQ handler
1427*4882a593Smuzhiyun 	 * called from request_irq() for SHARED IRQs do not execute
1428*4882a593Smuzhiyun 	 */
1429*4882a593Smuzhiyun 	set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430*4882a593Smuzhiyun 
1431*4882a593Smuzhiyun 	BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1432*4882a593Smuzhiyun 
1433*4882a593Smuzhiyun 	err = request_irq(irq, irq_handler, irq_flags,
1434*4882a593Smuzhiyun 			  bnad->mbox_irq_name, bnad);
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	return err;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun 
1439*4882a593Smuzhiyun static void
bnad_txrx_irq_free(struct bnad * bnad,struct bna_intr_info * intr_info)1440*4882a593Smuzhiyun bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun 	kfree(intr_info->idl);
1443*4882a593Smuzhiyun 	intr_info->idl = NULL;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun 
1446*4882a593Smuzhiyun /* Allocates Interrupt Descriptor List for MSIX/INT-X vectors */
1447*4882a593Smuzhiyun static int
bnad_txrx_irq_alloc(struct bnad * bnad,enum bnad_intr_source src,u32 txrx_id,struct bna_intr_info * intr_info)1448*4882a593Smuzhiyun bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1449*4882a593Smuzhiyun 		    u32 txrx_id, struct bna_intr_info *intr_info)
1450*4882a593Smuzhiyun {
1451*4882a593Smuzhiyun 	int i, vector_start = 0;
1452*4882a593Smuzhiyun 	u32 cfg_flags;
1453*4882a593Smuzhiyun 	unsigned long flags;
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1456*4882a593Smuzhiyun 	cfg_flags = bnad->cfg_flags;
1457*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	if (cfg_flags & BNAD_CF_MSIX) {
1460*4882a593Smuzhiyun 		intr_info->intr_type = BNA_INTR_T_MSIX;
1461*4882a593Smuzhiyun 		intr_info->idl = kcalloc(intr_info->num,
1462*4882a593Smuzhiyun 					sizeof(struct bna_intr_descr),
1463*4882a593Smuzhiyun 					GFP_KERNEL);
1464*4882a593Smuzhiyun 		if (!intr_info->idl)
1465*4882a593Smuzhiyun 			return -ENOMEM;
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 		switch (src) {
1468*4882a593Smuzhiyun 		case BNAD_INTR_TX:
1469*4882a593Smuzhiyun 			vector_start = BNAD_MAILBOX_MSIX_VECTORS + txrx_id;
1470*4882a593Smuzhiyun 			break;
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 		case BNAD_INTR_RX:
1473*4882a593Smuzhiyun 			vector_start = BNAD_MAILBOX_MSIX_VECTORS +
1474*4882a593Smuzhiyun 					(bnad->num_tx * bnad->num_txq_per_tx) +
1475*4882a593Smuzhiyun 					txrx_id;
1476*4882a593Smuzhiyun 			break;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 		default:
1479*4882a593Smuzhiyun 			BUG();
1480*4882a593Smuzhiyun 		}
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 		for (i = 0; i < intr_info->num; i++)
1483*4882a593Smuzhiyun 			intr_info->idl[i].vector = vector_start + i;
1484*4882a593Smuzhiyun 	} else {
1485*4882a593Smuzhiyun 		intr_info->intr_type = BNA_INTR_T_INTX;
1486*4882a593Smuzhiyun 		intr_info->num = 1;
1487*4882a593Smuzhiyun 		intr_info->idl = kcalloc(intr_info->num,
1488*4882a593Smuzhiyun 					sizeof(struct bna_intr_descr),
1489*4882a593Smuzhiyun 					GFP_KERNEL);
1490*4882a593Smuzhiyun 		if (!intr_info->idl)
1491*4882a593Smuzhiyun 			return -ENOMEM;
1492*4882a593Smuzhiyun 
1493*4882a593Smuzhiyun 		switch (src) {
1494*4882a593Smuzhiyun 		case BNAD_INTR_TX:
1495*4882a593Smuzhiyun 			intr_info->idl[0].vector = BNAD_INTX_TX_IB_BITMASK;
1496*4882a593Smuzhiyun 			break;
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 		case BNAD_INTR_RX:
1499*4882a593Smuzhiyun 			intr_info->idl[0].vector = BNAD_INTX_RX_IB_BITMASK;
1500*4882a593Smuzhiyun 			break;
1501*4882a593Smuzhiyun 		}
1502*4882a593Smuzhiyun 	}
1503*4882a593Smuzhiyun 	return 0;
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun /* NOTE: Should be called for MSIX only
1507*4882a593Smuzhiyun  * Unregisters Tx MSIX vector(s) from the kernel
1508*4882a593Smuzhiyun  */
1509*4882a593Smuzhiyun static void
bnad_tx_msix_unregister(struct bnad * bnad,struct bnad_tx_info * tx_info,int num_txqs)1510*4882a593Smuzhiyun bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1511*4882a593Smuzhiyun 			int num_txqs)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun 	int i;
1514*4882a593Smuzhiyun 	int vector_num;
1515*4882a593Smuzhiyun 
1516*4882a593Smuzhiyun 	for (i = 0; i < num_txqs; i++) {
1517*4882a593Smuzhiyun 		if (tx_info->tcb[i] == NULL)
1518*4882a593Smuzhiyun 			continue;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 		vector_num = tx_info->tcb[i]->intr_vector;
1521*4882a593Smuzhiyun 		free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1522*4882a593Smuzhiyun 	}
1523*4882a593Smuzhiyun }
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun /* NOTE: Should be called for MSIX only
1526*4882a593Smuzhiyun  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1527*4882a593Smuzhiyun  */
1528*4882a593Smuzhiyun static int
bnad_tx_msix_register(struct bnad * bnad,struct bnad_tx_info * tx_info,u32 tx_id,int num_txqs)1529*4882a593Smuzhiyun bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1530*4882a593Smuzhiyun 			u32 tx_id, int num_txqs)
1531*4882a593Smuzhiyun {
1532*4882a593Smuzhiyun 	int i;
1533*4882a593Smuzhiyun 	int err;
1534*4882a593Smuzhiyun 	int vector_num;
1535*4882a593Smuzhiyun 
1536*4882a593Smuzhiyun 	for (i = 0; i < num_txqs; i++) {
1537*4882a593Smuzhiyun 		vector_num = tx_info->tcb[i]->intr_vector;
1538*4882a593Smuzhiyun 		sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539*4882a593Smuzhiyun 				tx_id + tx_info->tcb[i]->id);
1540*4882a593Smuzhiyun 		err = request_irq(bnad->msix_table[vector_num].vector,
1541*4882a593Smuzhiyun 				  (irq_handler_t)bnad_msix_tx, 0,
1542*4882a593Smuzhiyun 				  tx_info->tcb[i]->name,
1543*4882a593Smuzhiyun 				  tx_info->tcb[i]);
1544*4882a593Smuzhiyun 		if (err)
1545*4882a593Smuzhiyun 			goto err_return;
1546*4882a593Smuzhiyun 	}
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun 	return 0;
1549*4882a593Smuzhiyun 
1550*4882a593Smuzhiyun err_return:
1551*4882a593Smuzhiyun 	if (i > 0)
1552*4882a593Smuzhiyun 		bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1553*4882a593Smuzhiyun 	return -1;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun /* NOTE: Should be called for MSIX only
1557*4882a593Smuzhiyun  * Unregisters Rx MSIX vector(s) from the kernel
1558*4882a593Smuzhiyun  */
1559*4882a593Smuzhiyun static void
bnad_rx_msix_unregister(struct bnad * bnad,struct bnad_rx_info * rx_info,int num_rxps)1560*4882a593Smuzhiyun bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1561*4882a593Smuzhiyun 			int num_rxps)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun 	int i;
1564*4882a593Smuzhiyun 	int vector_num;
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	for (i = 0; i < num_rxps; i++) {
1567*4882a593Smuzhiyun 		if (rx_info->rx_ctrl[i].ccb == NULL)
1568*4882a593Smuzhiyun 			continue;
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1571*4882a593Smuzhiyun 		free_irq(bnad->msix_table[vector_num].vector,
1572*4882a593Smuzhiyun 			 rx_info->rx_ctrl[i].ccb);
1573*4882a593Smuzhiyun 	}
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun 
1576*4882a593Smuzhiyun /* NOTE: Should be called for MSIX only
1577*4882a593Smuzhiyun  * Registers Tx MSIX vector(s) and ISR(s), cookie with the kernel
1578*4882a593Smuzhiyun  */
1579*4882a593Smuzhiyun static int
bnad_rx_msix_register(struct bnad * bnad,struct bnad_rx_info * rx_info,u32 rx_id,int num_rxps)1580*4882a593Smuzhiyun bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1581*4882a593Smuzhiyun 			u32 rx_id, int num_rxps)
1582*4882a593Smuzhiyun {
1583*4882a593Smuzhiyun 	int i;
1584*4882a593Smuzhiyun 	int err;
1585*4882a593Smuzhiyun 	int vector_num;
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun 	for (i = 0; i < num_rxps; i++) {
1588*4882a593Smuzhiyun 		vector_num = rx_info->rx_ctrl[i].ccb->intr_vector;
1589*4882a593Smuzhiyun 		sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d",
1590*4882a593Smuzhiyun 			bnad->netdev->name,
1591*4882a593Smuzhiyun 			rx_id + rx_info->rx_ctrl[i].ccb->id);
1592*4882a593Smuzhiyun 		err = request_irq(bnad->msix_table[vector_num].vector,
1593*4882a593Smuzhiyun 				  (irq_handler_t)bnad_msix_rx, 0,
1594*4882a593Smuzhiyun 				  rx_info->rx_ctrl[i].ccb->name,
1595*4882a593Smuzhiyun 				  rx_info->rx_ctrl[i].ccb);
1596*4882a593Smuzhiyun 		if (err)
1597*4882a593Smuzhiyun 			goto err_return;
1598*4882a593Smuzhiyun 	}
1599*4882a593Smuzhiyun 
1600*4882a593Smuzhiyun 	return 0;
1601*4882a593Smuzhiyun 
1602*4882a593Smuzhiyun err_return:
1603*4882a593Smuzhiyun 	if (i > 0)
1604*4882a593Smuzhiyun 		bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1605*4882a593Smuzhiyun 	return -1;
1606*4882a593Smuzhiyun }
1607*4882a593Smuzhiyun 
1608*4882a593Smuzhiyun /* Free Tx object Resources */
1609*4882a593Smuzhiyun static void
bnad_tx_res_free(struct bnad * bnad,struct bna_res_info * res_info)1610*4882a593Smuzhiyun bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1611*4882a593Smuzhiyun {
1612*4882a593Smuzhiyun 	int i;
1613*4882a593Smuzhiyun 
1614*4882a593Smuzhiyun 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1615*4882a593Smuzhiyun 		if (res_info[i].res_type == BNA_RES_T_MEM)
1616*4882a593Smuzhiyun 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617*4882a593Smuzhiyun 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1618*4882a593Smuzhiyun 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1619*4882a593Smuzhiyun 	}
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun /* Allocates memory and interrupt resources for Tx object */
1623*4882a593Smuzhiyun static int
bnad_tx_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,u32 tx_id)1624*4882a593Smuzhiyun bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1625*4882a593Smuzhiyun 		  u32 tx_id)
1626*4882a593Smuzhiyun {
1627*4882a593Smuzhiyun 	int i, err = 0;
1628*4882a593Smuzhiyun 
1629*4882a593Smuzhiyun 	for (i = 0; i < BNA_TX_RES_T_MAX; i++) {
1630*4882a593Smuzhiyun 		if (res_info[i].res_type == BNA_RES_T_MEM)
1631*4882a593Smuzhiyun 			err = bnad_mem_alloc(bnad,
1632*4882a593Smuzhiyun 					&res_info[i].res_u.mem_info);
1633*4882a593Smuzhiyun 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1634*4882a593Smuzhiyun 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1635*4882a593Smuzhiyun 					&res_info[i].res_u.intr_info);
1636*4882a593Smuzhiyun 		if (err)
1637*4882a593Smuzhiyun 			goto err_return;
1638*4882a593Smuzhiyun 	}
1639*4882a593Smuzhiyun 	return 0;
1640*4882a593Smuzhiyun 
1641*4882a593Smuzhiyun err_return:
1642*4882a593Smuzhiyun 	bnad_tx_res_free(bnad, res_info);
1643*4882a593Smuzhiyun 	return err;
1644*4882a593Smuzhiyun }
1645*4882a593Smuzhiyun 
1646*4882a593Smuzhiyun /* Free Rx object Resources */
1647*4882a593Smuzhiyun static void
bnad_rx_res_free(struct bnad * bnad,struct bna_res_info * res_info)1648*4882a593Smuzhiyun bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1649*4882a593Smuzhiyun {
1650*4882a593Smuzhiyun 	int i;
1651*4882a593Smuzhiyun 
1652*4882a593Smuzhiyun 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1653*4882a593Smuzhiyun 		if (res_info[i].res_type == BNA_RES_T_MEM)
1654*4882a593Smuzhiyun 			bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655*4882a593Smuzhiyun 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1656*4882a593Smuzhiyun 			bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1657*4882a593Smuzhiyun 	}
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun /* Allocates memory and interrupt resources for Rx object */
1661*4882a593Smuzhiyun static int
bnad_rx_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,uint rx_id)1662*4882a593Smuzhiyun bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1663*4882a593Smuzhiyun 		  uint rx_id)
1664*4882a593Smuzhiyun {
1665*4882a593Smuzhiyun 	int i, err = 0;
1666*4882a593Smuzhiyun 
1667*4882a593Smuzhiyun 	/* All memory needs to be allocated before setup_ccbs */
1668*4882a593Smuzhiyun 	for (i = 0; i < BNA_RX_RES_T_MAX; i++) {
1669*4882a593Smuzhiyun 		if (res_info[i].res_type == BNA_RES_T_MEM)
1670*4882a593Smuzhiyun 			err = bnad_mem_alloc(bnad,
1671*4882a593Smuzhiyun 					&res_info[i].res_u.mem_info);
1672*4882a593Smuzhiyun 		else if (res_info[i].res_type == BNA_RES_T_INTR)
1673*4882a593Smuzhiyun 			err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1674*4882a593Smuzhiyun 					&res_info[i].res_u.intr_info);
1675*4882a593Smuzhiyun 		if (err)
1676*4882a593Smuzhiyun 			goto err_return;
1677*4882a593Smuzhiyun 	}
1678*4882a593Smuzhiyun 	return 0;
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun err_return:
1681*4882a593Smuzhiyun 	bnad_rx_res_free(bnad, res_info);
1682*4882a593Smuzhiyun 	return err;
1683*4882a593Smuzhiyun }
1684*4882a593Smuzhiyun 
1685*4882a593Smuzhiyun /* Timer callbacks */
1686*4882a593Smuzhiyun /* a) IOC timer */
1687*4882a593Smuzhiyun static void
bnad_ioc_timeout(struct timer_list * t)1688*4882a593Smuzhiyun bnad_ioc_timeout(struct timer_list *t)
1689*4882a593Smuzhiyun {
1690*4882a593Smuzhiyun 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1691*4882a593Smuzhiyun 	unsigned long flags;
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1694*4882a593Smuzhiyun 	bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun 
1698*4882a593Smuzhiyun static void
bnad_ioc_hb_check(struct timer_list * t)1699*4882a593Smuzhiyun bnad_ioc_hb_check(struct timer_list *t)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1702*4882a593Smuzhiyun 	unsigned long flags;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1705*4882a593Smuzhiyun 	bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1707*4882a593Smuzhiyun }
1708*4882a593Smuzhiyun 
1709*4882a593Smuzhiyun static void
bnad_iocpf_timeout(struct timer_list * t)1710*4882a593Smuzhiyun bnad_iocpf_timeout(struct timer_list *t)
1711*4882a593Smuzhiyun {
1712*4882a593Smuzhiyun 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1713*4882a593Smuzhiyun 	unsigned long flags;
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1716*4882a593Smuzhiyun 	bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1718*4882a593Smuzhiyun }
1719*4882a593Smuzhiyun 
1720*4882a593Smuzhiyun static void
bnad_iocpf_sem_timeout(struct timer_list * t)1721*4882a593Smuzhiyun bnad_iocpf_sem_timeout(struct timer_list *t)
1722*4882a593Smuzhiyun {
1723*4882a593Smuzhiyun 	struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1724*4882a593Smuzhiyun 	unsigned long flags;
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1727*4882a593Smuzhiyun 	bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1729*4882a593Smuzhiyun }
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun /*
1732*4882a593Smuzhiyun  * All timer routines use bnad->bna_lock to protect against
1733*4882a593Smuzhiyun  * the following race, which may occur in case of no locking:
1734*4882a593Smuzhiyun  *	Time	CPU m	CPU n
1735*4882a593Smuzhiyun  *	0       1 = test_bit
1736*4882a593Smuzhiyun  *	1			clear_bit
1737*4882a593Smuzhiyun  *	2			del_timer_sync
1738*4882a593Smuzhiyun  *	3	mod_timer
1739*4882a593Smuzhiyun  */
1740*4882a593Smuzhiyun 
1741*4882a593Smuzhiyun /* b) Dynamic Interrupt Moderation Timer */
1742*4882a593Smuzhiyun static void
bnad_dim_timeout(struct timer_list * t)1743*4882a593Smuzhiyun bnad_dim_timeout(struct timer_list *t)
1744*4882a593Smuzhiyun {
1745*4882a593Smuzhiyun 	struct bnad *bnad = from_timer(bnad, t, dim_timer);
1746*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info;
1747*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
1748*4882a593Smuzhiyun 	int i, j;
1749*4882a593Smuzhiyun 	unsigned long flags;
1750*4882a593Smuzhiyun 
1751*4882a593Smuzhiyun 	if (!netif_carrier_ok(bnad->netdev))
1752*4882a593Smuzhiyun 		return;
1753*4882a593Smuzhiyun 
1754*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1755*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
1756*4882a593Smuzhiyun 		rx_info = &bnad->rx_info[i];
1757*4882a593Smuzhiyun 		if (!rx_info->rx)
1758*4882a593Smuzhiyun 			continue;
1759*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1760*4882a593Smuzhiyun 			rx_ctrl = &rx_info->rx_ctrl[j];
1761*4882a593Smuzhiyun 			if (!rx_ctrl->ccb)
1762*4882a593Smuzhiyun 				continue;
1763*4882a593Smuzhiyun 			bna_rx_dim_update(rx_ctrl->ccb);
1764*4882a593Smuzhiyun 		}
1765*4882a593Smuzhiyun 	}
1766*4882a593Smuzhiyun 
1767*4882a593Smuzhiyun 	/* Check for BNAD_CF_DIM_ENABLED, does not eleminate a race */
1768*4882a593Smuzhiyun 	if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769*4882a593Smuzhiyun 		mod_timer(&bnad->dim_timer,
1770*4882a593Smuzhiyun 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1771*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun /* c)  Statistics Timer */
1775*4882a593Smuzhiyun static void
bnad_stats_timeout(struct timer_list * t)1776*4882a593Smuzhiyun bnad_stats_timeout(struct timer_list *t)
1777*4882a593Smuzhiyun {
1778*4882a593Smuzhiyun 	struct bnad *bnad = from_timer(bnad, t, stats_timer);
1779*4882a593Smuzhiyun 	unsigned long flags;
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun 	if (!netif_running(bnad->netdev) ||
1782*4882a593Smuzhiyun 		!test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1783*4882a593Smuzhiyun 		return;
1784*4882a593Smuzhiyun 
1785*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1786*4882a593Smuzhiyun 	bna_hw_stats_get(&bnad->bna);
1787*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun 
1790*4882a593Smuzhiyun /*
1791*4882a593Smuzhiyun  * Set up timer for DIM
1792*4882a593Smuzhiyun  * Called with bnad->bna_lock held
1793*4882a593Smuzhiyun  */
1794*4882a593Smuzhiyun void
bnad_dim_timer_start(struct bnad * bnad)1795*4882a593Smuzhiyun bnad_dim_timer_start(struct bnad *bnad)
1796*4882a593Smuzhiyun {
1797*4882a593Smuzhiyun 	if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798*4882a593Smuzhiyun 	    !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799*4882a593Smuzhiyun 		timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800*4882a593Smuzhiyun 		set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801*4882a593Smuzhiyun 		mod_timer(&bnad->dim_timer,
1802*4882a593Smuzhiyun 			  jiffies + msecs_to_jiffies(BNAD_DIM_TIMER_FREQ));
1803*4882a593Smuzhiyun 	}
1804*4882a593Smuzhiyun }
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun /*
1807*4882a593Smuzhiyun  * Set up timer for statistics
1808*4882a593Smuzhiyun  * Called with mutex_lock(&bnad->conf_mutex) held
1809*4882a593Smuzhiyun  */
1810*4882a593Smuzhiyun static void
bnad_stats_timer_start(struct bnad * bnad)1811*4882a593Smuzhiyun bnad_stats_timer_start(struct bnad *bnad)
1812*4882a593Smuzhiyun {
1813*4882a593Smuzhiyun 	unsigned long flags;
1814*4882a593Smuzhiyun 
1815*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1816*4882a593Smuzhiyun 	if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817*4882a593Smuzhiyun 		timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818*4882a593Smuzhiyun 		mod_timer(&bnad->stats_timer,
1819*4882a593Smuzhiyun 			  jiffies + msecs_to_jiffies(BNAD_STATS_TIMER_FREQ));
1820*4882a593Smuzhiyun 	}
1821*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun /*
1825*4882a593Smuzhiyun  * Stops the stats timer
1826*4882a593Smuzhiyun  * Called with mutex_lock(&bnad->conf_mutex) held
1827*4882a593Smuzhiyun  */
1828*4882a593Smuzhiyun static void
bnad_stats_timer_stop(struct bnad * bnad)1829*4882a593Smuzhiyun bnad_stats_timer_stop(struct bnad *bnad)
1830*4882a593Smuzhiyun {
1831*4882a593Smuzhiyun 	int to_del = 0;
1832*4882a593Smuzhiyun 	unsigned long flags;
1833*4882a593Smuzhiyun 
1834*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1835*4882a593Smuzhiyun 	if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836*4882a593Smuzhiyun 		to_del = 1;
1837*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838*4882a593Smuzhiyun 	if (to_del)
1839*4882a593Smuzhiyun 		del_timer_sync(&bnad->stats_timer);
1840*4882a593Smuzhiyun }
1841*4882a593Smuzhiyun 
1842*4882a593Smuzhiyun /* Utilities */
1843*4882a593Smuzhiyun 
1844*4882a593Smuzhiyun static void
bnad_netdev_mc_list_get(struct net_device * netdev,u8 * mc_list)1845*4882a593Smuzhiyun bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list)
1846*4882a593Smuzhiyun {
1847*4882a593Smuzhiyun 	int i = 1; /* Index 0 has broadcast address */
1848*4882a593Smuzhiyun 	struct netdev_hw_addr *mc_addr;
1849*4882a593Smuzhiyun 
1850*4882a593Smuzhiyun 	netdev_for_each_mc_addr(mc_addr, netdev) {
1851*4882a593Smuzhiyun 		ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]);
1852*4882a593Smuzhiyun 		i++;
1853*4882a593Smuzhiyun 	}
1854*4882a593Smuzhiyun }
1855*4882a593Smuzhiyun 
1856*4882a593Smuzhiyun static int
bnad_napi_poll_rx(struct napi_struct * napi,int budget)1857*4882a593Smuzhiyun bnad_napi_poll_rx(struct napi_struct *napi, int budget)
1858*4882a593Smuzhiyun {
1859*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl =
1860*4882a593Smuzhiyun 		container_of(napi, struct bnad_rx_ctrl, napi);
1861*4882a593Smuzhiyun 	struct bnad *bnad = rx_ctrl->bnad;
1862*4882a593Smuzhiyun 	int rcvd = 0;
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun 	rx_ctrl->rx_poll_ctr++;
1865*4882a593Smuzhiyun 
1866*4882a593Smuzhiyun 	if (!netif_carrier_ok(bnad->netdev))
1867*4882a593Smuzhiyun 		goto poll_exit;
1868*4882a593Smuzhiyun 
1869*4882a593Smuzhiyun 	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1870*4882a593Smuzhiyun 	if (rcvd >= budget)
1871*4882a593Smuzhiyun 		return rcvd;
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun poll_exit:
1874*4882a593Smuzhiyun 	napi_complete_done(napi, rcvd);
1875*4882a593Smuzhiyun 
1876*4882a593Smuzhiyun 	rx_ctrl->rx_complete++;
1877*4882a593Smuzhiyun 
1878*4882a593Smuzhiyun 	if (rx_ctrl->ccb)
1879*4882a593Smuzhiyun 		bnad_enable_rx_irq_unsafe(rx_ctrl->ccb);
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 	return rcvd;
1882*4882a593Smuzhiyun }
1883*4882a593Smuzhiyun 
1884*4882a593Smuzhiyun #define BNAD_NAPI_POLL_QUOTA		64
1885*4882a593Smuzhiyun static void
bnad_napi_add(struct bnad * bnad,u32 rx_id)1886*4882a593Smuzhiyun bnad_napi_add(struct bnad *bnad, u32 rx_id)
1887*4882a593Smuzhiyun {
1888*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
1889*4882a593Smuzhiyun 	int i;
1890*4882a593Smuzhiyun 
1891*4882a593Smuzhiyun 	/* Initialize & enable NAPI */
1892*4882a593Smuzhiyun 	for (i = 0; i <	bnad->num_rxp_per_rx; i++) {
1893*4882a593Smuzhiyun 		rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894*4882a593Smuzhiyun 		netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1895*4882a593Smuzhiyun 			       bnad_napi_poll_rx, BNAD_NAPI_POLL_QUOTA);
1896*4882a593Smuzhiyun 	}
1897*4882a593Smuzhiyun }
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun static void
bnad_napi_delete(struct bnad * bnad,u32 rx_id)1900*4882a593Smuzhiyun bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun 	int i;
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	/* First disable and then clean up */
1905*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906*4882a593Smuzhiyun 		netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1907*4882a593Smuzhiyun }
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun /* Should be held with conf_lock held */
1910*4882a593Smuzhiyun void
bnad_destroy_tx(struct bnad * bnad,u32 tx_id)1911*4882a593Smuzhiyun bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1912*4882a593Smuzhiyun {
1913*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914*4882a593Smuzhiyun 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1915*4882a593Smuzhiyun 	unsigned long flags;
1916*4882a593Smuzhiyun 
1917*4882a593Smuzhiyun 	if (!tx_info->tx)
1918*4882a593Smuzhiyun 		return;
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.tx_comp);
1921*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1922*4882a593Smuzhiyun 	bna_tx_disable(tx_info->tx, BNA_HARD_CLEANUP, bnad_cb_tx_disabled);
1923*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924*4882a593Smuzhiyun 	wait_for_completion(&bnad->bnad_completions.tx_comp);
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	if (tx_info->tcb[0]->intr_type == BNA_INTR_T_MSIX)
1927*4882a593Smuzhiyun 		bnad_tx_msix_unregister(bnad, tx_info,
1928*4882a593Smuzhiyun 			bnad->num_txq_per_tx);
1929*4882a593Smuzhiyun 
1930*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1931*4882a593Smuzhiyun 	bna_tx_destroy(tx_info->tx);
1932*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1933*4882a593Smuzhiyun 
1934*4882a593Smuzhiyun 	tx_info->tx = NULL;
1935*4882a593Smuzhiyun 	tx_info->tx_id = 0;
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	bnad_tx_res_free(bnad, res_info);
1938*4882a593Smuzhiyun }
1939*4882a593Smuzhiyun 
1940*4882a593Smuzhiyun /* Should be held with conf_lock held */
1941*4882a593Smuzhiyun int
bnad_setup_tx(struct bnad * bnad,u32 tx_id)1942*4882a593Smuzhiyun bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1943*4882a593Smuzhiyun {
1944*4882a593Smuzhiyun 	int err;
1945*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946*4882a593Smuzhiyun 	struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947*4882a593Smuzhiyun 	struct bna_intr_info *intr_info =
1948*4882a593Smuzhiyun 			&res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
1949*4882a593Smuzhiyun 	struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1950*4882a593Smuzhiyun 	static const struct bna_tx_event_cbfn tx_cbfn = {
1951*4882a593Smuzhiyun 		.tcb_setup_cbfn = bnad_cb_tcb_setup,
1952*4882a593Smuzhiyun 		.tcb_destroy_cbfn = bnad_cb_tcb_destroy,
1953*4882a593Smuzhiyun 		.tx_stall_cbfn = bnad_cb_tx_stall,
1954*4882a593Smuzhiyun 		.tx_resume_cbfn = bnad_cb_tx_resume,
1955*4882a593Smuzhiyun 		.tx_cleanup_cbfn = bnad_cb_tx_cleanup,
1956*4882a593Smuzhiyun 	};
1957*4882a593Smuzhiyun 
1958*4882a593Smuzhiyun 	struct bna_tx *tx;
1959*4882a593Smuzhiyun 	unsigned long flags;
1960*4882a593Smuzhiyun 
1961*4882a593Smuzhiyun 	tx_info->tx_id = tx_id;
1962*4882a593Smuzhiyun 
1963*4882a593Smuzhiyun 	/* Initialize the Tx object configuration */
1964*4882a593Smuzhiyun 	tx_config->num_txq = bnad->num_txq_per_tx;
1965*4882a593Smuzhiyun 	tx_config->txq_depth = bnad->txq_depth;
1966*4882a593Smuzhiyun 	tx_config->tx_type = BNA_TX_T_REGULAR;
1967*4882a593Smuzhiyun 	tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1968*4882a593Smuzhiyun 
1969*4882a593Smuzhiyun 	/* Get BNA's resource requirement for one tx object */
1970*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1971*4882a593Smuzhiyun 	bna_tx_res_req(bnad->num_txq_per_tx,
1972*4882a593Smuzhiyun 		bnad->txq_depth, res_info);
1973*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	/* Fill Unmap Q memory requirements */
1976*4882a593Smuzhiyun 	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_TX_RES_MEM_T_UNMAPQ],
1977*4882a593Smuzhiyun 			bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1978*4882a593Smuzhiyun 			bnad->txq_depth));
1979*4882a593Smuzhiyun 
1980*4882a593Smuzhiyun 	/* Allocate resources */
1981*4882a593Smuzhiyun 	err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1982*4882a593Smuzhiyun 	if (err)
1983*4882a593Smuzhiyun 		return err;
1984*4882a593Smuzhiyun 
1985*4882a593Smuzhiyun 	/* Ask BNA to create one Tx object, supplying required resources */
1986*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1987*4882a593Smuzhiyun 	tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1988*4882a593Smuzhiyun 			tx_info);
1989*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1990*4882a593Smuzhiyun 	if (!tx) {
1991*4882a593Smuzhiyun 		err = -ENOMEM;
1992*4882a593Smuzhiyun 		goto err_return;
1993*4882a593Smuzhiyun 	}
1994*4882a593Smuzhiyun 	tx_info->tx = tx;
1995*4882a593Smuzhiyun 
1996*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&tx_info->tx_cleanup_work,
1997*4882a593Smuzhiyun 			(work_func_t)bnad_tx_cleanup);
1998*4882a593Smuzhiyun 
1999*4882a593Smuzhiyun 	/* Register ISR for the Tx object */
2000*4882a593Smuzhiyun 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2001*4882a593Smuzhiyun 		err = bnad_tx_msix_register(bnad, tx_info,
2002*4882a593Smuzhiyun 			tx_id, bnad->num_txq_per_tx);
2003*4882a593Smuzhiyun 		if (err)
2004*4882a593Smuzhiyun 			goto cleanup_tx;
2005*4882a593Smuzhiyun 	}
2006*4882a593Smuzhiyun 
2007*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2008*4882a593Smuzhiyun 	bna_tx_enable(tx);
2009*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	return 0;
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun cleanup_tx:
2014*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2015*4882a593Smuzhiyun 	bna_tx_destroy(tx_info->tx);
2016*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2017*4882a593Smuzhiyun 	tx_info->tx = NULL;
2018*4882a593Smuzhiyun 	tx_info->tx_id = 0;
2019*4882a593Smuzhiyun err_return:
2020*4882a593Smuzhiyun 	bnad_tx_res_free(bnad, res_info);
2021*4882a593Smuzhiyun 	return err;
2022*4882a593Smuzhiyun }
2023*4882a593Smuzhiyun 
2024*4882a593Smuzhiyun /* Setup the rx config for bna_rx_create */
2025*4882a593Smuzhiyun /* bnad decides the configuration */
2026*4882a593Smuzhiyun static void
bnad_init_rx_config(struct bnad * bnad,struct bna_rx_config * rx_config)2027*4882a593Smuzhiyun bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2028*4882a593Smuzhiyun {
2029*4882a593Smuzhiyun 	memset(rx_config, 0, sizeof(*rx_config));
2030*4882a593Smuzhiyun 	rx_config->rx_type = BNA_RX_T_REGULAR;
2031*4882a593Smuzhiyun 	rx_config->num_paths = bnad->num_rxp_per_rx;
2032*4882a593Smuzhiyun 	rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2033*4882a593Smuzhiyun 
2034*4882a593Smuzhiyun 	if (bnad->num_rxp_per_rx > 1) {
2035*4882a593Smuzhiyun 		rx_config->rss_status = BNA_STATUS_T_ENABLED;
2036*4882a593Smuzhiyun 		rx_config->rss_config.hash_type =
2037*4882a593Smuzhiyun 				(BFI_ENET_RSS_IPV6 |
2038*4882a593Smuzhiyun 				 BFI_ENET_RSS_IPV6_TCP |
2039*4882a593Smuzhiyun 				 BFI_ENET_RSS_IPV4 |
2040*4882a593Smuzhiyun 				 BFI_ENET_RSS_IPV4_TCP);
2041*4882a593Smuzhiyun 		rx_config->rss_config.hash_mask =
2042*4882a593Smuzhiyun 				bnad->num_rxp_per_rx - 1;
2043*4882a593Smuzhiyun 		netdev_rss_key_fill(rx_config->rss_config.toeplitz_hash_key,
2044*4882a593Smuzhiyun 			sizeof(rx_config->rss_config.toeplitz_hash_key));
2045*4882a593Smuzhiyun 	} else {
2046*4882a593Smuzhiyun 		rx_config->rss_status = BNA_STATUS_T_DISABLED;
2047*4882a593Smuzhiyun 		memset(&rx_config->rss_config, 0,
2048*4882a593Smuzhiyun 		       sizeof(rx_config->rss_config));
2049*4882a593Smuzhiyun 	}
2050*4882a593Smuzhiyun 
2051*4882a593Smuzhiyun 	rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2052*4882a593Smuzhiyun 	rx_config->q0_multi_buf = BNA_STATUS_T_DISABLED;
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun 	/* BNA_RXP_SINGLE - one data-buffer queue
2055*4882a593Smuzhiyun 	 * BNA_RXP_SLR - one small-buffer and one large-buffer queues
2056*4882a593Smuzhiyun 	 * BNA_RXP_HDS - one header-buffer and one data-buffer queues
2057*4882a593Smuzhiyun 	 */
2058*4882a593Smuzhiyun 	/* TODO: configurable param for queue type */
2059*4882a593Smuzhiyun 	rx_config->rxp_type = BNA_RXP_SLR;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2062*4882a593Smuzhiyun 	    rx_config->frame_size > 4096) {
2063*4882a593Smuzhiyun 		/* though size_routing_enable is set in SLR,
2064*4882a593Smuzhiyun 		 * small packets may get routed to same rxq.
2065*4882a593Smuzhiyun 		 * set buf_size to 2048 instead of PAGE_SIZE.
2066*4882a593Smuzhiyun 		 */
2067*4882a593Smuzhiyun 		rx_config->q0_buf_size = 2048;
2068*4882a593Smuzhiyun 		/* this should be in multiples of 2 */
2069*4882a593Smuzhiyun 		rx_config->q0_num_vecs = 4;
2070*4882a593Smuzhiyun 		rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2071*4882a593Smuzhiyun 		rx_config->q0_multi_buf = BNA_STATUS_T_ENABLED;
2072*4882a593Smuzhiyun 	} else {
2073*4882a593Smuzhiyun 		rx_config->q0_buf_size = rx_config->frame_size;
2074*4882a593Smuzhiyun 		rx_config->q0_num_vecs = 1;
2075*4882a593Smuzhiyun 		rx_config->q0_depth = bnad->rxq_depth;
2076*4882a593Smuzhiyun 	}
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 	/* initialize for q1 for BNA_RXP_SLR/BNA_RXP_HDS */
2079*4882a593Smuzhiyun 	if (rx_config->rxp_type == BNA_RXP_SLR) {
2080*4882a593Smuzhiyun 		rx_config->q1_depth = bnad->rxq_depth;
2081*4882a593Smuzhiyun 		rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
2082*4882a593Smuzhiyun 	}
2083*4882a593Smuzhiyun 
2084*4882a593Smuzhiyun 	rx_config->vlan_strip_status =
2085*4882a593Smuzhiyun 		(bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2086*4882a593Smuzhiyun 		BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun static void
bnad_rx_ctrl_init(struct bnad * bnad,u32 rx_id)2090*4882a593Smuzhiyun bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2091*4882a593Smuzhiyun {
2092*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2093*4882a593Smuzhiyun 	int i;
2094*4882a593Smuzhiyun 
2095*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rxp_per_rx; i++)
2096*4882a593Smuzhiyun 		rx_info->rx_ctrl[i].bnad = bnad;
2097*4882a593Smuzhiyun }
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun /* Called with mutex_lock(&bnad->conf_mutex) held */
2100*4882a593Smuzhiyun static u32
bnad_reinit_rx(struct bnad * bnad)2101*4882a593Smuzhiyun bnad_reinit_rx(struct bnad *bnad)
2102*4882a593Smuzhiyun {
2103*4882a593Smuzhiyun 	struct net_device *netdev = bnad->netdev;
2104*4882a593Smuzhiyun 	u32 err = 0, current_err = 0;
2105*4882a593Smuzhiyun 	u32 rx_id = 0, count = 0;
2106*4882a593Smuzhiyun 	unsigned long flags;
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun 	/* destroy and create new rx objects */
2109*4882a593Smuzhiyun 	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2110*4882a593Smuzhiyun 		if (!bnad->rx_info[rx_id].rx)
2111*4882a593Smuzhiyun 			continue;
2112*4882a593Smuzhiyun 		bnad_destroy_rx(bnad, rx_id);
2113*4882a593Smuzhiyun 	}
2114*4882a593Smuzhiyun 
2115*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2116*4882a593Smuzhiyun 	bna_enet_mtu_set(&bnad->bna.enet,
2117*4882a593Smuzhiyun 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2118*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2121*4882a593Smuzhiyun 		count++;
2122*4882a593Smuzhiyun 		current_err = bnad_setup_rx(bnad, rx_id);
2123*4882a593Smuzhiyun 		if (current_err && !err) {
2124*4882a593Smuzhiyun 			err = current_err;
2125*4882a593Smuzhiyun 			netdev_err(netdev, "RXQ:%u setup failed\n", rx_id);
2126*4882a593Smuzhiyun 		}
2127*4882a593Smuzhiyun 	}
2128*4882a593Smuzhiyun 
2129*4882a593Smuzhiyun 	/* restore rx configuration */
2130*4882a593Smuzhiyun 	if (bnad->rx_info[0].rx && !err) {
2131*4882a593Smuzhiyun 		bnad_restore_vlans(bnad, 0);
2132*4882a593Smuzhiyun 		bnad_enable_default_bcast(bnad);
2133*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
2134*4882a593Smuzhiyun 		bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2135*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2136*4882a593Smuzhiyun 		bnad_set_rx_mode(netdev);
2137*4882a593Smuzhiyun 	}
2138*4882a593Smuzhiyun 
2139*4882a593Smuzhiyun 	return count;
2140*4882a593Smuzhiyun }
2141*4882a593Smuzhiyun 
2142*4882a593Smuzhiyun /* Called with bnad_conf_lock() held */
2143*4882a593Smuzhiyun void
bnad_destroy_rx(struct bnad * bnad,u32 rx_id)2144*4882a593Smuzhiyun bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2145*4882a593Smuzhiyun {
2146*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2147*4882a593Smuzhiyun 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2148*4882a593Smuzhiyun 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2149*4882a593Smuzhiyun 	unsigned long flags;
2150*4882a593Smuzhiyun 	int to_del = 0;
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	if (!rx_info->rx)
2153*4882a593Smuzhiyun 		return;
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun 	if (0 == rx_id) {
2156*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
2157*4882a593Smuzhiyun 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2158*4882a593Smuzhiyun 		    test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2159*4882a593Smuzhiyun 			clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2160*4882a593Smuzhiyun 			to_del = 1;
2161*4882a593Smuzhiyun 		}
2162*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2163*4882a593Smuzhiyun 		if (to_del)
2164*4882a593Smuzhiyun 			del_timer_sync(&bnad->dim_timer);
2165*4882a593Smuzhiyun 	}
2166*4882a593Smuzhiyun 
2167*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.rx_comp);
2168*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2169*4882a593Smuzhiyun 	bna_rx_disable(rx_info->rx, BNA_HARD_CLEANUP, bnad_cb_rx_disabled);
2170*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171*4882a593Smuzhiyun 	wait_for_completion(&bnad->bnad_completions.rx_comp);
2172*4882a593Smuzhiyun 
2173*4882a593Smuzhiyun 	if (rx_info->rx_ctrl[0].ccb->intr_type == BNA_INTR_T_MSIX)
2174*4882a593Smuzhiyun 		bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	bnad_napi_delete(bnad, rx_id);
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2179*4882a593Smuzhiyun 	bna_rx_destroy(rx_info->rx);
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	rx_info->rx = NULL;
2182*4882a593Smuzhiyun 	rx_info->rx_id = 0;
2183*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	bnad_rx_res_free(bnad, res_info);
2186*4882a593Smuzhiyun }
2187*4882a593Smuzhiyun 
2188*4882a593Smuzhiyun /* Called with mutex_lock(&bnad->conf_mutex) held */
2189*4882a593Smuzhiyun int
bnad_setup_rx(struct bnad * bnad,u32 rx_id)2190*4882a593Smuzhiyun bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2191*4882a593Smuzhiyun {
2192*4882a593Smuzhiyun 	int err;
2193*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2194*4882a593Smuzhiyun 	struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2195*4882a593Smuzhiyun 	struct bna_intr_info *intr_info =
2196*4882a593Smuzhiyun 			&res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
2197*4882a593Smuzhiyun 	struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2198*4882a593Smuzhiyun 	static const struct bna_rx_event_cbfn rx_cbfn = {
2199*4882a593Smuzhiyun 		.rcb_setup_cbfn = NULL,
2200*4882a593Smuzhiyun 		.rcb_destroy_cbfn = NULL,
2201*4882a593Smuzhiyun 		.ccb_setup_cbfn = bnad_cb_ccb_setup,
2202*4882a593Smuzhiyun 		.ccb_destroy_cbfn = bnad_cb_ccb_destroy,
2203*4882a593Smuzhiyun 		.rx_stall_cbfn = bnad_cb_rx_stall,
2204*4882a593Smuzhiyun 		.rx_cleanup_cbfn = bnad_cb_rx_cleanup,
2205*4882a593Smuzhiyun 		.rx_post_cbfn = bnad_cb_rx_post,
2206*4882a593Smuzhiyun 	};
2207*4882a593Smuzhiyun 	struct bna_rx *rx;
2208*4882a593Smuzhiyun 	unsigned long flags;
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	rx_info->rx_id = rx_id;
2211*4882a593Smuzhiyun 
2212*4882a593Smuzhiyun 	/* Initialize the Rx object configuration */
2213*4882a593Smuzhiyun 	bnad_init_rx_config(bnad, rx_config);
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	/* Get BNA's resource requirement for one Rx object */
2216*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2217*4882a593Smuzhiyun 	bna_rx_res_req(rx_config, res_info);
2218*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	/* Fill Unmap Q memory requirements */
2221*4882a593Smuzhiyun 	BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPDQ],
2222*4882a593Smuzhiyun 				 rx_config->num_paths,
2223*4882a593Smuzhiyun 			(rx_config->q0_depth *
2224*4882a593Smuzhiyun 			 sizeof(struct bnad_rx_unmap)) +
2225*4882a593Smuzhiyun 			 sizeof(struct bnad_rx_unmap_q));
2226*4882a593Smuzhiyun 
2227*4882a593Smuzhiyun 	if (rx_config->rxp_type != BNA_RXP_SINGLE) {
2228*4882a593Smuzhiyun 		BNAD_FILL_UNMAPQ_MEM_REQ(&res_info[BNA_RX_RES_MEM_T_UNMAPHQ],
2229*4882a593Smuzhiyun 					 rx_config->num_paths,
2230*4882a593Smuzhiyun 				(rx_config->q1_depth *
2231*4882a593Smuzhiyun 				 sizeof(struct bnad_rx_unmap) +
2232*4882a593Smuzhiyun 				 sizeof(struct bnad_rx_unmap_q)));
2233*4882a593Smuzhiyun 	}
2234*4882a593Smuzhiyun 	/* Allocate resource */
2235*4882a593Smuzhiyun 	err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2236*4882a593Smuzhiyun 	if (err)
2237*4882a593Smuzhiyun 		return err;
2238*4882a593Smuzhiyun 
2239*4882a593Smuzhiyun 	bnad_rx_ctrl_init(bnad, rx_id);
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	/* Ask BNA to create one Rx object, supplying required resources */
2242*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2243*4882a593Smuzhiyun 	rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2244*4882a593Smuzhiyun 			rx_info);
2245*4882a593Smuzhiyun 	if (!rx) {
2246*4882a593Smuzhiyun 		err = -ENOMEM;
2247*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2248*4882a593Smuzhiyun 		goto err_return;
2249*4882a593Smuzhiyun 	}
2250*4882a593Smuzhiyun 	rx_info->rx = rx;
2251*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	INIT_WORK(&rx_info->rx_cleanup_work,
2254*4882a593Smuzhiyun 			(work_func_t)(bnad_rx_cleanup));
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	/*
2257*4882a593Smuzhiyun 	 * Init NAPI, so that state is set to NAPI_STATE_SCHED,
2258*4882a593Smuzhiyun 	 * so that IRQ handler cannot schedule NAPI at this point.
2259*4882a593Smuzhiyun 	 */
2260*4882a593Smuzhiyun 	bnad_napi_add(bnad, rx_id);
2261*4882a593Smuzhiyun 
2262*4882a593Smuzhiyun 	/* Register ISR for the Rx object */
2263*4882a593Smuzhiyun 	if (intr_info->intr_type == BNA_INTR_T_MSIX) {
2264*4882a593Smuzhiyun 		err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2265*4882a593Smuzhiyun 						rx_config->num_paths);
2266*4882a593Smuzhiyun 		if (err)
2267*4882a593Smuzhiyun 			goto err_return;
2268*4882a593Smuzhiyun 	}
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2271*4882a593Smuzhiyun 	if (0 == rx_id) {
2272*4882a593Smuzhiyun 		/* Set up Dynamic Interrupt Moderation Vector */
2273*4882a593Smuzhiyun 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2274*4882a593Smuzhiyun 			bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun 		/* Enable VLAN filtering only on the default Rx */
2277*4882a593Smuzhiyun 		bna_rx_vlanfilter_enable(rx);
2278*4882a593Smuzhiyun 
2279*4882a593Smuzhiyun 		/* Start the DIM timer */
2280*4882a593Smuzhiyun 		bnad_dim_timer_start(bnad);
2281*4882a593Smuzhiyun 	}
2282*4882a593Smuzhiyun 
2283*4882a593Smuzhiyun 	bna_rx_enable(rx);
2284*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	return 0;
2287*4882a593Smuzhiyun 
2288*4882a593Smuzhiyun err_return:
2289*4882a593Smuzhiyun 	bnad_destroy_rx(bnad, rx_id);
2290*4882a593Smuzhiyun 	return err;
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun /* Called with conf_lock & bnad->bna_lock held */
2294*4882a593Smuzhiyun void
bnad_tx_coalescing_timeo_set(struct bnad * bnad)2295*4882a593Smuzhiyun bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2296*4882a593Smuzhiyun {
2297*4882a593Smuzhiyun 	struct bnad_tx_info *tx_info;
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 	tx_info = &bnad->tx_info[0];
2300*4882a593Smuzhiyun 	if (!tx_info->tx)
2301*4882a593Smuzhiyun 		return;
2302*4882a593Smuzhiyun 
2303*4882a593Smuzhiyun 	bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2304*4882a593Smuzhiyun }
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun /* Called with conf_lock & bnad->bna_lock held */
2307*4882a593Smuzhiyun void
bnad_rx_coalescing_timeo_set(struct bnad * bnad)2308*4882a593Smuzhiyun bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2309*4882a593Smuzhiyun {
2310*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info;
2311*4882a593Smuzhiyun 	int	i;
2312*4882a593Smuzhiyun 
2313*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
2314*4882a593Smuzhiyun 		rx_info = &bnad->rx_info[i];
2315*4882a593Smuzhiyun 		if (!rx_info->rx)
2316*4882a593Smuzhiyun 			continue;
2317*4882a593Smuzhiyun 		bna_rx_coalescing_timeo_set(rx_info->rx,
2318*4882a593Smuzhiyun 				bnad->rx_coalescing_timeo);
2319*4882a593Smuzhiyun 	}
2320*4882a593Smuzhiyun }
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun /*
2323*4882a593Smuzhiyun  * Called with bnad->bna_lock held
2324*4882a593Smuzhiyun  */
2325*4882a593Smuzhiyun int
bnad_mac_addr_set_locked(struct bnad * bnad,const u8 * mac_addr)2326*4882a593Smuzhiyun bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	int ret;
2329*4882a593Smuzhiyun 
2330*4882a593Smuzhiyun 	if (!is_valid_ether_addr(mac_addr))
2331*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
2332*4882a593Smuzhiyun 
2333*4882a593Smuzhiyun 	/* If datapath is down, pretend everything went through */
2334*4882a593Smuzhiyun 	if (!bnad->rx_info[0].rx)
2335*4882a593Smuzhiyun 		return 0;
2336*4882a593Smuzhiyun 
2337*4882a593Smuzhiyun 	ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2338*4882a593Smuzhiyun 	if (ret != BNA_CB_SUCCESS)
2339*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
2340*4882a593Smuzhiyun 
2341*4882a593Smuzhiyun 	return 0;
2342*4882a593Smuzhiyun }
2343*4882a593Smuzhiyun 
2344*4882a593Smuzhiyun /* Should be called with conf_lock held */
2345*4882a593Smuzhiyun int
bnad_enable_default_bcast(struct bnad * bnad)2346*4882a593Smuzhiyun bnad_enable_default_bcast(struct bnad *bnad)
2347*4882a593Smuzhiyun {
2348*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2349*4882a593Smuzhiyun 	int ret;
2350*4882a593Smuzhiyun 	unsigned long flags;
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.mcast_comp);
2353*4882a593Smuzhiyun 
2354*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2355*4882a593Smuzhiyun 	ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr,
2356*4882a593Smuzhiyun 			       bnad_cb_rx_mcast_add);
2357*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358*4882a593Smuzhiyun 
2359*4882a593Smuzhiyun 	if (ret == BNA_CB_SUCCESS)
2360*4882a593Smuzhiyun 		wait_for_completion(&bnad->bnad_completions.mcast_comp);
2361*4882a593Smuzhiyun 	else
2362*4882a593Smuzhiyun 		return -ENODEV;
2363*4882a593Smuzhiyun 
2364*4882a593Smuzhiyun 	if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2365*4882a593Smuzhiyun 		return -ENODEV;
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun 	return 0;
2368*4882a593Smuzhiyun }
2369*4882a593Smuzhiyun 
2370*4882a593Smuzhiyun /* Called with mutex_lock(&bnad->conf_mutex) held */
2371*4882a593Smuzhiyun void
bnad_restore_vlans(struct bnad * bnad,u32 rx_id)2372*4882a593Smuzhiyun bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2373*4882a593Smuzhiyun {
2374*4882a593Smuzhiyun 	u16 vid;
2375*4882a593Smuzhiyun 	unsigned long flags;
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2378*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
2379*4882a593Smuzhiyun 		bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2380*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2381*4882a593Smuzhiyun 	}
2382*4882a593Smuzhiyun }
2383*4882a593Smuzhiyun 
2384*4882a593Smuzhiyun /* Statistics utilities */
2385*4882a593Smuzhiyun void
bnad_netdev_qstats_fill(struct bnad * bnad,struct rtnl_link_stats64 * stats)2386*4882a593Smuzhiyun bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2387*4882a593Smuzhiyun {
2388*4882a593Smuzhiyun 	int i, j;
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
2391*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2392*4882a593Smuzhiyun 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2393*4882a593Smuzhiyun 				stats->rx_packets += bnad->rx_info[i].
2394*4882a593Smuzhiyun 				rx_ctrl[j].ccb->rcb[0]->rxq->rx_packets;
2395*4882a593Smuzhiyun 				stats->rx_bytes += bnad->rx_info[i].
2396*4882a593Smuzhiyun 					rx_ctrl[j].ccb->rcb[0]->rxq->rx_bytes;
2397*4882a593Smuzhiyun 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2398*4882a593Smuzhiyun 					bnad->rx_info[i].rx_ctrl[j].ccb->
2399*4882a593Smuzhiyun 					rcb[1]->rxq) {
2400*4882a593Smuzhiyun 					stats->rx_packets +=
2401*4882a593Smuzhiyun 						bnad->rx_info[i].rx_ctrl[j].
2402*4882a593Smuzhiyun 						ccb->rcb[1]->rxq->rx_packets;
2403*4882a593Smuzhiyun 					stats->rx_bytes +=
2404*4882a593Smuzhiyun 						bnad->rx_info[i].rx_ctrl[j].
2405*4882a593Smuzhiyun 						ccb->rcb[1]->rxq->rx_bytes;
2406*4882a593Smuzhiyun 				}
2407*4882a593Smuzhiyun 			}
2408*4882a593Smuzhiyun 		}
2409*4882a593Smuzhiyun 	}
2410*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_tx; i++) {
2411*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_txq_per_tx; j++) {
2412*4882a593Smuzhiyun 			if (bnad->tx_info[i].tcb[j]) {
2413*4882a593Smuzhiyun 				stats->tx_packets +=
2414*4882a593Smuzhiyun 				bnad->tx_info[i].tcb[j]->txq->tx_packets;
2415*4882a593Smuzhiyun 				stats->tx_bytes +=
2416*4882a593Smuzhiyun 					bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2417*4882a593Smuzhiyun 			}
2418*4882a593Smuzhiyun 		}
2419*4882a593Smuzhiyun 	}
2420*4882a593Smuzhiyun }
2421*4882a593Smuzhiyun 
2422*4882a593Smuzhiyun /*
2423*4882a593Smuzhiyun  * Must be called with the bna_lock held.
2424*4882a593Smuzhiyun  */
2425*4882a593Smuzhiyun void
bnad_netdev_hwstats_fill(struct bnad * bnad,struct rtnl_link_stats64 * stats)2426*4882a593Smuzhiyun bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2427*4882a593Smuzhiyun {
2428*4882a593Smuzhiyun 	struct bfi_enet_stats_mac *mac_stats;
2429*4882a593Smuzhiyun 	u32 bmap;
2430*4882a593Smuzhiyun 	int i;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2433*4882a593Smuzhiyun 	stats->rx_errors =
2434*4882a593Smuzhiyun 		mac_stats->rx_fcs_error + mac_stats->rx_alignment_error +
2435*4882a593Smuzhiyun 		mac_stats->rx_frame_length_error + mac_stats->rx_code_error +
2436*4882a593Smuzhiyun 		mac_stats->rx_undersize;
2437*4882a593Smuzhiyun 	stats->tx_errors = mac_stats->tx_fcs_error +
2438*4882a593Smuzhiyun 					mac_stats->tx_undersize;
2439*4882a593Smuzhiyun 	stats->rx_dropped = mac_stats->rx_drop;
2440*4882a593Smuzhiyun 	stats->tx_dropped = mac_stats->tx_drop;
2441*4882a593Smuzhiyun 	stats->multicast = mac_stats->rx_multicast;
2442*4882a593Smuzhiyun 	stats->collisions = mac_stats->tx_total_collision;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	stats->rx_length_errors = mac_stats->rx_frame_length_error;
2445*4882a593Smuzhiyun 
2446*4882a593Smuzhiyun 	/* receive ring buffer overflow  ?? */
2447*4882a593Smuzhiyun 
2448*4882a593Smuzhiyun 	stats->rx_crc_errors = mac_stats->rx_fcs_error;
2449*4882a593Smuzhiyun 	stats->rx_frame_errors = mac_stats->rx_alignment_error;
2450*4882a593Smuzhiyun 	/* recv'r fifo overrun */
2451*4882a593Smuzhiyun 	bmap = bna_rx_rid_mask(&bnad->bna);
2452*4882a593Smuzhiyun 	for (i = 0; bmap; i++) {
2453*4882a593Smuzhiyun 		if (bmap & 1) {
2454*4882a593Smuzhiyun 			stats->rx_fifo_errors +=
2455*4882a593Smuzhiyun 				bnad->stats.bna_stats->
2456*4882a593Smuzhiyun 					hw_stats.rxf_stats[i].frame_drops;
2457*4882a593Smuzhiyun 			break;
2458*4882a593Smuzhiyun 		}
2459*4882a593Smuzhiyun 		bmap >>= 1;
2460*4882a593Smuzhiyun 	}
2461*4882a593Smuzhiyun }
2462*4882a593Smuzhiyun 
2463*4882a593Smuzhiyun static void
bnad_mbox_irq_sync(struct bnad * bnad)2464*4882a593Smuzhiyun bnad_mbox_irq_sync(struct bnad *bnad)
2465*4882a593Smuzhiyun {
2466*4882a593Smuzhiyun 	u32 irq;
2467*4882a593Smuzhiyun 	unsigned long flags;
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2470*4882a593Smuzhiyun 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2471*4882a593Smuzhiyun 		irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2472*4882a593Smuzhiyun 	else
2473*4882a593Smuzhiyun 		irq = bnad->pcidev->irq;
2474*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	synchronize_irq(irq);
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun /* Utility used by bnad_start_xmit, for doing TSO */
2480*4882a593Smuzhiyun static int
bnad_tso_prepare(struct bnad * bnad,struct sk_buff * skb)2481*4882a593Smuzhiyun bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2482*4882a593Smuzhiyun {
2483*4882a593Smuzhiyun 	int err;
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	err = skb_cow_head(skb, 0);
2486*4882a593Smuzhiyun 	if (err < 0) {
2487*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tso_err);
2488*4882a593Smuzhiyun 		return err;
2489*4882a593Smuzhiyun 	}
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun 	/*
2492*4882a593Smuzhiyun 	 * For TSO, the TCP checksum field is seeded with pseudo-header sum
2493*4882a593Smuzhiyun 	 * excluding the length field.
2494*4882a593Smuzhiyun 	 */
2495*4882a593Smuzhiyun 	if (vlan_get_protocol(skb) == htons(ETH_P_IP)) {
2496*4882a593Smuzhiyun 		struct iphdr *iph = ip_hdr(skb);
2497*4882a593Smuzhiyun 
2498*4882a593Smuzhiyun 		/* Do we really need these? */
2499*4882a593Smuzhiyun 		iph->tot_len = 0;
2500*4882a593Smuzhiyun 		iph->check = 0;
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun 		tcp_hdr(skb)->check =
2503*4882a593Smuzhiyun 			~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
2504*4882a593Smuzhiyun 					   IPPROTO_TCP, 0);
2505*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tso4);
2506*4882a593Smuzhiyun 	} else {
2507*4882a593Smuzhiyun 		tcp_v6_gso_csum_prep(skb);
2508*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tso6);
2509*4882a593Smuzhiyun 	}
2510*4882a593Smuzhiyun 
2511*4882a593Smuzhiyun 	return 0;
2512*4882a593Smuzhiyun }
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun /*
2515*4882a593Smuzhiyun  * Initialize Q numbers depending on Rx Paths
2516*4882a593Smuzhiyun  * Called with bnad->bna_lock held, because of cfg_flags
2517*4882a593Smuzhiyun  * access.
2518*4882a593Smuzhiyun  */
2519*4882a593Smuzhiyun static void
bnad_q_num_init(struct bnad * bnad)2520*4882a593Smuzhiyun bnad_q_num_init(struct bnad *bnad)
2521*4882a593Smuzhiyun {
2522*4882a593Smuzhiyun 	int rxps;
2523*4882a593Smuzhiyun 
2524*4882a593Smuzhiyun 	rxps = min((uint)num_online_cpus(),
2525*4882a593Smuzhiyun 			(uint)(BNAD_MAX_RX * BNAD_MAX_RXP_PER_RX));
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2528*4882a593Smuzhiyun 		rxps = 1;	/* INTx */
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	bnad->num_rx = 1;
2531*4882a593Smuzhiyun 	bnad->num_tx = 1;
2532*4882a593Smuzhiyun 	bnad->num_rxp_per_rx = rxps;
2533*4882a593Smuzhiyun 	bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2534*4882a593Smuzhiyun }
2535*4882a593Smuzhiyun 
2536*4882a593Smuzhiyun /*
2537*4882a593Smuzhiyun  * Adjusts the Q numbers, given a number of msix vectors
2538*4882a593Smuzhiyun  * Give preference to RSS as opposed to Tx priority Queues,
2539*4882a593Smuzhiyun  * in such a case, just use 1 Tx Q
2540*4882a593Smuzhiyun  * Called with bnad->bna_lock held b'cos of cfg_flags access
2541*4882a593Smuzhiyun  */
2542*4882a593Smuzhiyun static void
bnad_q_num_adjust(struct bnad * bnad,int msix_vectors,int temp)2543*4882a593Smuzhiyun bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2544*4882a593Smuzhiyun {
2545*4882a593Smuzhiyun 	bnad->num_txq_per_tx = 1;
2546*4882a593Smuzhiyun 	if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx)  +
2547*4882a593Smuzhiyun 	     bnad_rxqs_per_cq + BNAD_MAILBOX_MSIX_VECTORS) &&
2548*4882a593Smuzhiyun 	    (bnad->cfg_flags & BNAD_CF_MSIX)) {
2549*4882a593Smuzhiyun 		bnad->num_rxp_per_rx = msix_vectors -
2550*4882a593Smuzhiyun 			(bnad->num_tx * bnad->num_txq_per_tx) -
2551*4882a593Smuzhiyun 			BNAD_MAILBOX_MSIX_VECTORS;
2552*4882a593Smuzhiyun 	} else
2553*4882a593Smuzhiyun 		bnad->num_rxp_per_rx = 1;
2554*4882a593Smuzhiyun }
2555*4882a593Smuzhiyun 
2556*4882a593Smuzhiyun /* Enable / disable ioceth */
2557*4882a593Smuzhiyun static int
bnad_ioceth_disable(struct bnad * bnad)2558*4882a593Smuzhiyun bnad_ioceth_disable(struct bnad *bnad)
2559*4882a593Smuzhiyun {
2560*4882a593Smuzhiyun 	unsigned long flags;
2561*4882a593Smuzhiyun 	int err = 0;
2562*4882a593Smuzhiyun 
2563*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2564*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.ioc_comp);
2565*4882a593Smuzhiyun 	bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2566*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2569*4882a593Smuzhiyun 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2570*4882a593Smuzhiyun 
2571*4882a593Smuzhiyun 	err = bnad->bnad_completions.ioc_comp_status;
2572*4882a593Smuzhiyun 	return err;
2573*4882a593Smuzhiyun }
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun static int
bnad_ioceth_enable(struct bnad * bnad)2576*4882a593Smuzhiyun bnad_ioceth_enable(struct bnad *bnad)
2577*4882a593Smuzhiyun {
2578*4882a593Smuzhiyun 	int err = 0;
2579*4882a593Smuzhiyun 	unsigned long flags;
2580*4882a593Smuzhiyun 
2581*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2582*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.ioc_comp);
2583*4882a593Smuzhiyun 	bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2584*4882a593Smuzhiyun 	bna_ioceth_enable(&bnad->bna.ioceth);
2585*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 	wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2588*4882a593Smuzhiyun 		msecs_to_jiffies(BNAD_IOCETH_TIMEOUT));
2589*4882a593Smuzhiyun 
2590*4882a593Smuzhiyun 	err = bnad->bnad_completions.ioc_comp_status;
2591*4882a593Smuzhiyun 
2592*4882a593Smuzhiyun 	return err;
2593*4882a593Smuzhiyun }
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun /* Free BNA resources */
2596*4882a593Smuzhiyun static void
bnad_res_free(struct bnad * bnad,struct bna_res_info * res_info,u32 res_val_max)2597*4882a593Smuzhiyun bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2598*4882a593Smuzhiyun 		u32 res_val_max)
2599*4882a593Smuzhiyun {
2600*4882a593Smuzhiyun 	int i;
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	for (i = 0; i < res_val_max; i++)
2603*4882a593Smuzhiyun 		bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2604*4882a593Smuzhiyun }
2605*4882a593Smuzhiyun 
2606*4882a593Smuzhiyun /* Allocates memory and interrupt resources for BNA */
2607*4882a593Smuzhiyun static int
bnad_res_alloc(struct bnad * bnad,struct bna_res_info * res_info,u32 res_val_max)2608*4882a593Smuzhiyun bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2609*4882a593Smuzhiyun 		u32 res_val_max)
2610*4882a593Smuzhiyun {
2611*4882a593Smuzhiyun 	int i, err;
2612*4882a593Smuzhiyun 
2613*4882a593Smuzhiyun 	for (i = 0; i < res_val_max; i++) {
2614*4882a593Smuzhiyun 		err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2615*4882a593Smuzhiyun 		if (err)
2616*4882a593Smuzhiyun 			goto err_return;
2617*4882a593Smuzhiyun 	}
2618*4882a593Smuzhiyun 	return 0;
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun err_return:
2621*4882a593Smuzhiyun 	bnad_res_free(bnad, res_info, res_val_max);
2622*4882a593Smuzhiyun 	return err;
2623*4882a593Smuzhiyun }
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun /* Interrupt enable / disable */
2626*4882a593Smuzhiyun static void
bnad_enable_msix(struct bnad * bnad)2627*4882a593Smuzhiyun bnad_enable_msix(struct bnad *bnad)
2628*4882a593Smuzhiyun {
2629*4882a593Smuzhiyun 	int i, ret;
2630*4882a593Smuzhiyun 	unsigned long flags;
2631*4882a593Smuzhiyun 
2632*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2633*4882a593Smuzhiyun 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2634*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2635*4882a593Smuzhiyun 		return;
2636*4882a593Smuzhiyun 	}
2637*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2638*4882a593Smuzhiyun 
2639*4882a593Smuzhiyun 	if (bnad->msix_table)
2640*4882a593Smuzhiyun 		return;
2641*4882a593Smuzhiyun 
2642*4882a593Smuzhiyun 	bnad->msix_table =
2643*4882a593Smuzhiyun 		kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2644*4882a593Smuzhiyun 
2645*4882a593Smuzhiyun 	if (!bnad->msix_table)
2646*4882a593Smuzhiyun 		goto intx_mode;
2647*4882a593Smuzhiyun 
2648*4882a593Smuzhiyun 	for (i = 0; i < bnad->msix_num; i++)
2649*4882a593Smuzhiyun 		bnad->msix_table[i].entry = i;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2652*4882a593Smuzhiyun 				    1, bnad->msix_num);
2653*4882a593Smuzhiyun 	if (ret < 0) {
2654*4882a593Smuzhiyun 		goto intx_mode;
2655*4882a593Smuzhiyun 	} else if (ret < bnad->msix_num) {
2656*4882a593Smuzhiyun 		dev_warn(&bnad->pcidev->dev,
2657*4882a593Smuzhiyun 			 "%d MSI-X vectors allocated < %d requested\n",
2658*4882a593Smuzhiyun 			 ret, bnad->msix_num);
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
2661*4882a593Smuzhiyun 		/* ret = #of vectors that we got */
2662*4882a593Smuzhiyun 		bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2663*4882a593Smuzhiyun 			(ret - BNAD_MAILBOX_MSIX_VECTORS) / 2);
2664*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 		bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2667*4882a593Smuzhiyun 			 BNAD_MAILBOX_MSIX_VECTORS;
2668*4882a593Smuzhiyun 
2669*4882a593Smuzhiyun 		if (bnad->msix_num > ret) {
2670*4882a593Smuzhiyun 			pci_disable_msix(bnad->pcidev);
2671*4882a593Smuzhiyun 			goto intx_mode;
2672*4882a593Smuzhiyun 		}
2673*4882a593Smuzhiyun 	}
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun 	pci_intx(bnad->pcidev, 0);
2676*4882a593Smuzhiyun 
2677*4882a593Smuzhiyun 	return;
2678*4882a593Smuzhiyun 
2679*4882a593Smuzhiyun intx_mode:
2680*4882a593Smuzhiyun 	dev_warn(&bnad->pcidev->dev,
2681*4882a593Smuzhiyun 		 "MSI-X enable failed - operating in INTx mode\n");
2682*4882a593Smuzhiyun 
2683*4882a593Smuzhiyun 	kfree(bnad->msix_table);
2684*4882a593Smuzhiyun 	bnad->msix_table = NULL;
2685*4882a593Smuzhiyun 	bnad->msix_num = 0;
2686*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2687*4882a593Smuzhiyun 	bnad->cfg_flags &= ~BNAD_CF_MSIX;
2688*4882a593Smuzhiyun 	bnad_q_num_init(bnad);
2689*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2690*4882a593Smuzhiyun }
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun static void
bnad_disable_msix(struct bnad * bnad)2693*4882a593Smuzhiyun bnad_disable_msix(struct bnad *bnad)
2694*4882a593Smuzhiyun {
2695*4882a593Smuzhiyun 	u32 cfg_flags;
2696*4882a593Smuzhiyun 	unsigned long flags;
2697*4882a593Smuzhiyun 
2698*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2699*4882a593Smuzhiyun 	cfg_flags = bnad->cfg_flags;
2700*4882a593Smuzhiyun 	if (bnad->cfg_flags & BNAD_CF_MSIX)
2701*4882a593Smuzhiyun 		bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	if (cfg_flags & BNAD_CF_MSIX) {
2705*4882a593Smuzhiyun 		pci_disable_msix(bnad->pcidev);
2706*4882a593Smuzhiyun 		kfree(bnad->msix_table);
2707*4882a593Smuzhiyun 		bnad->msix_table = NULL;
2708*4882a593Smuzhiyun 	}
2709*4882a593Smuzhiyun }
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun /* Netdev entry points */
2712*4882a593Smuzhiyun static int
bnad_open(struct net_device * netdev)2713*4882a593Smuzhiyun bnad_open(struct net_device *netdev)
2714*4882a593Smuzhiyun {
2715*4882a593Smuzhiyun 	int err;
2716*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
2717*4882a593Smuzhiyun 	struct bna_pause_config pause_config;
2718*4882a593Smuzhiyun 	unsigned long flags;
2719*4882a593Smuzhiyun 
2720*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
2721*4882a593Smuzhiyun 
2722*4882a593Smuzhiyun 	/* Tx */
2723*4882a593Smuzhiyun 	err = bnad_setup_tx(bnad, 0);
2724*4882a593Smuzhiyun 	if (err)
2725*4882a593Smuzhiyun 		goto err_return;
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	/* Rx */
2728*4882a593Smuzhiyun 	err = bnad_setup_rx(bnad, 0);
2729*4882a593Smuzhiyun 	if (err)
2730*4882a593Smuzhiyun 		goto cleanup_tx;
2731*4882a593Smuzhiyun 
2732*4882a593Smuzhiyun 	/* Port */
2733*4882a593Smuzhiyun 	pause_config.tx_pause = 0;
2734*4882a593Smuzhiyun 	pause_config.rx_pause = 0;
2735*4882a593Smuzhiyun 
2736*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2737*4882a593Smuzhiyun 	bna_enet_mtu_set(&bnad->bna.enet,
2738*4882a593Smuzhiyun 			 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2739*4882a593Smuzhiyun 	bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2740*4882a593Smuzhiyun 	bna_enet_enable(&bnad->bna.enet);
2741*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2742*4882a593Smuzhiyun 
2743*4882a593Smuzhiyun 	/* Enable broadcast */
2744*4882a593Smuzhiyun 	bnad_enable_default_bcast(bnad);
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	/* Restore VLANs, if any */
2747*4882a593Smuzhiyun 	bnad_restore_vlans(bnad, 0);
2748*4882a593Smuzhiyun 
2749*4882a593Smuzhiyun 	/* Set the UCAST address */
2750*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2751*4882a593Smuzhiyun 	bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2752*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2753*4882a593Smuzhiyun 
2754*4882a593Smuzhiyun 	/* Start the stats timer */
2755*4882a593Smuzhiyun 	bnad_stats_timer_start(bnad);
2756*4882a593Smuzhiyun 
2757*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
2758*4882a593Smuzhiyun 
2759*4882a593Smuzhiyun 	return 0;
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun cleanup_tx:
2762*4882a593Smuzhiyun 	bnad_destroy_tx(bnad, 0);
2763*4882a593Smuzhiyun 
2764*4882a593Smuzhiyun err_return:
2765*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
2766*4882a593Smuzhiyun 	return err;
2767*4882a593Smuzhiyun }
2768*4882a593Smuzhiyun 
2769*4882a593Smuzhiyun static int
bnad_stop(struct net_device * netdev)2770*4882a593Smuzhiyun bnad_stop(struct net_device *netdev)
2771*4882a593Smuzhiyun {
2772*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
2773*4882a593Smuzhiyun 	unsigned long flags;
2774*4882a593Smuzhiyun 
2775*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	/* Stop the stats timer */
2778*4882a593Smuzhiyun 	bnad_stats_timer_stop(bnad);
2779*4882a593Smuzhiyun 
2780*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.enet_comp);
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
2783*4882a593Smuzhiyun 	bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2784*4882a593Smuzhiyun 			bnad_cb_enet_disabled);
2785*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
2786*4882a593Smuzhiyun 
2787*4882a593Smuzhiyun 	wait_for_completion(&bnad->bnad_completions.enet_comp);
2788*4882a593Smuzhiyun 
2789*4882a593Smuzhiyun 	bnad_destroy_tx(bnad, 0);
2790*4882a593Smuzhiyun 	bnad_destroy_rx(bnad, 0);
2791*4882a593Smuzhiyun 
2792*4882a593Smuzhiyun 	/* Synchronize mailbox IRQ */
2793*4882a593Smuzhiyun 	bnad_mbox_irq_sync(bnad);
2794*4882a593Smuzhiyun 
2795*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	return 0;
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun /* TX */
2801*4882a593Smuzhiyun /* Returns 0 for success */
2802*4882a593Smuzhiyun static int
bnad_txq_wi_prepare(struct bnad * bnad,struct bna_tcb * tcb,struct sk_buff * skb,struct bna_txq_entry * txqent)2803*4882a593Smuzhiyun bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2804*4882a593Smuzhiyun 		    struct sk_buff *skb, struct bna_txq_entry *txqent)
2805*4882a593Smuzhiyun {
2806*4882a593Smuzhiyun 	u16 flags = 0;
2807*4882a593Smuzhiyun 	u32 gso_size;
2808*4882a593Smuzhiyun 	u16 vlan_tag = 0;
2809*4882a593Smuzhiyun 
2810*4882a593Smuzhiyun 	if (skb_vlan_tag_present(skb)) {
2811*4882a593Smuzhiyun 		vlan_tag = (u16)skb_vlan_tag_get(skb);
2812*4882a593Smuzhiyun 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2813*4882a593Smuzhiyun 	}
2814*4882a593Smuzhiyun 	if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2815*4882a593Smuzhiyun 		vlan_tag = ((tcb->priority & 0x7) << VLAN_PRIO_SHIFT)
2816*4882a593Smuzhiyun 				| (vlan_tag & 0x1fff);
2817*4882a593Smuzhiyun 		flags |= (BNA_TXQ_WI_CF_INS_PRIO | BNA_TXQ_WI_CF_INS_VLAN);
2818*4882a593Smuzhiyun 	}
2819*4882a593Smuzhiyun 	txqent->hdr.wi.vlan_tag = htons(vlan_tag);
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	if (skb_is_gso(skb)) {
2822*4882a593Smuzhiyun 		gso_size = skb_shinfo(skb)->gso_size;
2823*4882a593Smuzhiyun 		if (unlikely(gso_size > bnad->netdev->mtu)) {
2824*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2825*4882a593Smuzhiyun 			return -EINVAL;
2826*4882a593Smuzhiyun 		}
2827*4882a593Smuzhiyun 		if (unlikely((gso_size + skb_transport_offset(skb) +
2828*4882a593Smuzhiyun 			      tcp_hdrlen(skb)) >= skb->len)) {
2829*4882a593Smuzhiyun 			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND);
2830*4882a593Smuzhiyun 			txqent->hdr.wi.lso_mss = 0;
2831*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2832*4882a593Smuzhiyun 		} else {
2833*4882a593Smuzhiyun 			txqent->hdr.wi.opcode = htons(BNA_TXQ_WI_SEND_LSO);
2834*4882a593Smuzhiyun 			txqent->hdr.wi.lso_mss = htons(gso_size);
2835*4882a593Smuzhiyun 		}
2836*4882a593Smuzhiyun 
2837*4882a593Smuzhiyun 		if (bnad_tso_prepare(bnad, skb)) {
2838*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2839*4882a593Smuzhiyun 			return -EINVAL;
2840*4882a593Smuzhiyun 		}
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 		flags |= (BNA_TXQ_WI_CF_IP_CKSUM | BNA_TXQ_WI_CF_TCP_CKSUM);
2843*4882a593Smuzhiyun 		txqent->hdr.wi.l4_hdr_size_n_offset =
2844*4882a593Smuzhiyun 			htons(BNA_TXQ_WI_L4_HDR_N_OFFSET(
2845*4882a593Smuzhiyun 			tcp_hdrlen(skb) >> 2, skb_transport_offset(skb)));
2846*4882a593Smuzhiyun 	} else  {
2847*4882a593Smuzhiyun 		txqent->hdr.wi.opcode =	htons(BNA_TXQ_WI_SEND);
2848*4882a593Smuzhiyun 		txqent->hdr.wi.lso_mss = 0;
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun 		if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2851*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2852*4882a593Smuzhiyun 			return -EINVAL;
2853*4882a593Smuzhiyun 		}
2854*4882a593Smuzhiyun 
2855*4882a593Smuzhiyun 		if (skb->ip_summed == CHECKSUM_PARTIAL) {
2856*4882a593Smuzhiyun 			__be16 net_proto = vlan_get_protocol(skb);
2857*4882a593Smuzhiyun 			u8 proto = 0;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 			if (net_proto == htons(ETH_P_IP))
2860*4882a593Smuzhiyun 				proto = ip_hdr(skb)->protocol;
2861*4882a593Smuzhiyun #ifdef NETIF_F_IPV6_CSUM
2862*4882a593Smuzhiyun 			else if (net_proto == htons(ETH_P_IPV6)) {
2863*4882a593Smuzhiyun 				/* nexthdr may not be TCP immediately. */
2864*4882a593Smuzhiyun 				proto = ipv6_hdr(skb)->nexthdr;
2865*4882a593Smuzhiyun 			}
2866*4882a593Smuzhiyun #endif
2867*4882a593Smuzhiyun 			if (proto == IPPROTO_TCP) {
2868*4882a593Smuzhiyun 				flags |= BNA_TXQ_WI_CF_TCP_CKSUM;
2869*4882a593Smuzhiyun 				txqent->hdr.wi.l4_hdr_size_n_offset =
2870*4882a593Smuzhiyun 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2871*4882a593Smuzhiyun 					      (0, skb_transport_offset(skb)));
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 				BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun 				if (unlikely(skb_headlen(skb) <
2876*4882a593Smuzhiyun 					    skb_transport_offset(skb) +
2877*4882a593Smuzhiyun 				    tcp_hdrlen(skb))) {
2878*4882a593Smuzhiyun 					BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2879*4882a593Smuzhiyun 					return -EINVAL;
2880*4882a593Smuzhiyun 				}
2881*4882a593Smuzhiyun 			} else if (proto == IPPROTO_UDP) {
2882*4882a593Smuzhiyun 				flags |= BNA_TXQ_WI_CF_UDP_CKSUM;
2883*4882a593Smuzhiyun 				txqent->hdr.wi.l4_hdr_size_n_offset =
2884*4882a593Smuzhiyun 					htons(BNA_TXQ_WI_L4_HDR_N_OFFSET
2885*4882a593Smuzhiyun 					      (0, skb_transport_offset(skb)));
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 				BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2888*4882a593Smuzhiyun 				if (unlikely(skb_headlen(skb) <
2889*4882a593Smuzhiyun 					    skb_transport_offset(skb) +
2890*4882a593Smuzhiyun 				    sizeof(struct udphdr))) {
2891*4882a593Smuzhiyun 					BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2892*4882a593Smuzhiyun 					return -EINVAL;
2893*4882a593Smuzhiyun 				}
2894*4882a593Smuzhiyun 			} else {
2895*4882a593Smuzhiyun 
2896*4882a593Smuzhiyun 				BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2897*4882a593Smuzhiyun 				return -EINVAL;
2898*4882a593Smuzhiyun 			}
2899*4882a593Smuzhiyun 		} else
2900*4882a593Smuzhiyun 			txqent->hdr.wi.l4_hdr_size_n_offset = 0;
2901*4882a593Smuzhiyun 	}
2902*4882a593Smuzhiyun 
2903*4882a593Smuzhiyun 	txqent->hdr.wi.flags = htons(flags);
2904*4882a593Smuzhiyun 	txqent->hdr.wi.frame_length = htonl(skb->len);
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	return 0;
2907*4882a593Smuzhiyun }
2908*4882a593Smuzhiyun 
2909*4882a593Smuzhiyun /*
2910*4882a593Smuzhiyun  * bnad_start_xmit : Netdev entry point for Transmit
2911*4882a593Smuzhiyun  *		     Called under lock held by net_device
2912*4882a593Smuzhiyun  */
2913*4882a593Smuzhiyun static netdev_tx_t
bnad_start_xmit(struct sk_buff * skb,struct net_device * netdev)2914*4882a593Smuzhiyun bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2915*4882a593Smuzhiyun {
2916*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
2917*4882a593Smuzhiyun 	u32 txq_id = 0;
2918*4882a593Smuzhiyun 	struct bna_tcb *tcb = NULL;
2919*4882a593Smuzhiyun 	struct bnad_tx_unmap *unmap_q, *unmap, *head_unmap;
2920*4882a593Smuzhiyun 	u32		prod, q_depth, vect_id;
2921*4882a593Smuzhiyun 	u32		wis, vectors, len;
2922*4882a593Smuzhiyun 	int		i;
2923*4882a593Smuzhiyun 	dma_addr_t		dma_addr;
2924*4882a593Smuzhiyun 	struct bna_txq_entry *txqent;
2925*4882a593Smuzhiyun 
2926*4882a593Smuzhiyun 	len = skb_headlen(skb);
2927*4882a593Smuzhiyun 
2928*4882a593Smuzhiyun 	/* Sanity checks for the skb */
2929*4882a593Smuzhiyun 
2930*4882a593Smuzhiyun 	if (unlikely(skb->len <= ETH_HLEN)) {
2931*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
2932*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2933*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2934*4882a593Smuzhiyun 	}
2935*4882a593Smuzhiyun 	if (unlikely(len > BFI_TX_MAX_DATA_PER_VECTOR)) {
2936*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
2937*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2938*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2939*4882a593Smuzhiyun 	}
2940*4882a593Smuzhiyun 	if (unlikely(len == 0)) {
2941*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
2942*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2943*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2944*4882a593Smuzhiyun 	}
2945*4882a593Smuzhiyun 
2946*4882a593Smuzhiyun 	tcb = bnad->tx_info[0].tcb[txq_id];
2947*4882a593Smuzhiyun 
2948*4882a593Smuzhiyun 	/*
2949*4882a593Smuzhiyun 	 * Takes care of the Tx that is scheduled between clearing the flag
2950*4882a593Smuzhiyun 	 * and the netif_tx_stop_all_queues() call.
2951*4882a593Smuzhiyun 	 */
2952*4882a593Smuzhiyun 	if (unlikely(!tcb || !test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2953*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
2954*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2955*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2956*4882a593Smuzhiyun 	}
2957*4882a593Smuzhiyun 
2958*4882a593Smuzhiyun 	q_depth = tcb->q_depth;
2959*4882a593Smuzhiyun 	prod = tcb->producer_index;
2960*4882a593Smuzhiyun 	unmap_q = tcb->unmap_q;
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 	vectors = 1 + skb_shinfo(skb)->nr_frags;
2963*4882a593Smuzhiyun 	wis = BNA_TXQ_WI_NEEDED(vectors);	/* 4 vectors per work item */
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	if (unlikely(vectors > BFI_TX_MAX_VECTORS_PER_PKT)) {
2966*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
2967*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2968*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2969*4882a593Smuzhiyun 	}
2970*4882a593Smuzhiyun 
2971*4882a593Smuzhiyun 	/* Check for available TxQ resources */
2972*4882a593Smuzhiyun 	if (unlikely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2973*4882a593Smuzhiyun 		if ((*tcb->hw_consumer_index != tcb->consumer_index) &&
2974*4882a593Smuzhiyun 		    !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2975*4882a593Smuzhiyun 			u32 sent;
2976*4882a593Smuzhiyun 			sent = bnad_txcmpl_process(bnad, tcb);
2977*4882a593Smuzhiyun 			if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2978*4882a593Smuzhiyun 				bna_ib_ack(tcb->i_dbell, sent);
2979*4882a593Smuzhiyun 			smp_mb__before_atomic();
2980*4882a593Smuzhiyun 			clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2981*4882a593Smuzhiyun 		} else {
2982*4882a593Smuzhiyun 			netif_stop_queue(netdev);
2983*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2984*4882a593Smuzhiyun 		}
2985*4882a593Smuzhiyun 
2986*4882a593Smuzhiyun 		smp_mb();
2987*4882a593Smuzhiyun 		/*
2988*4882a593Smuzhiyun 		 * Check again to deal with race condition between
2989*4882a593Smuzhiyun 		 * netif_stop_queue here, and netif_wake_queue in
2990*4882a593Smuzhiyun 		 * interrupt handler which is not inside netif tx lock.
2991*4882a593Smuzhiyun 		 */
2992*4882a593Smuzhiyun 		if (likely(wis > BNA_QE_FREE_CNT(tcb, q_depth))) {
2993*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2994*4882a593Smuzhiyun 			return NETDEV_TX_BUSY;
2995*4882a593Smuzhiyun 		} else {
2996*4882a593Smuzhiyun 			netif_wake_queue(netdev);
2997*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
2998*4882a593Smuzhiyun 		}
2999*4882a593Smuzhiyun 	}
3000*4882a593Smuzhiyun 
3001*4882a593Smuzhiyun 	txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3002*4882a593Smuzhiyun 	head_unmap = &unmap_q[prod];
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 	/* Program the opcode, flags, frame_len, num_vectors in WI */
3005*4882a593Smuzhiyun 	if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3006*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
3007*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3008*4882a593Smuzhiyun 	}
3009*4882a593Smuzhiyun 	txqent->hdr.wi.reserved = 0;
3010*4882a593Smuzhiyun 	txqent->hdr.wi.num_vectors = vectors;
3011*4882a593Smuzhiyun 
3012*4882a593Smuzhiyun 	head_unmap->skb = skb;
3013*4882a593Smuzhiyun 	head_unmap->nvecs = 0;
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun 	/* Program the vectors */
3016*4882a593Smuzhiyun 	unmap = head_unmap;
3017*4882a593Smuzhiyun 	dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3018*4882a593Smuzhiyun 				  len, DMA_TO_DEVICE);
3019*4882a593Smuzhiyun 	if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3020*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
3021*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3022*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3023*4882a593Smuzhiyun 	}
3024*4882a593Smuzhiyun 	BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[0].host_addr);
3025*4882a593Smuzhiyun 	txqent->vector[0].length = htons(len);
3026*4882a593Smuzhiyun 	dma_unmap_addr_set(&unmap->vectors[0], dma_addr, dma_addr);
3027*4882a593Smuzhiyun 	head_unmap->nvecs++;
3028*4882a593Smuzhiyun 
3029*4882a593Smuzhiyun 	for (i = 0, vect_id = 0; i < vectors - 1; i++) {
3030*4882a593Smuzhiyun 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3031*4882a593Smuzhiyun 		u32		size = skb_frag_size(frag);
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 		if (unlikely(size == 0)) {
3034*4882a593Smuzhiyun 			/* Undo the changes starting at tcb->producer_index */
3035*4882a593Smuzhiyun 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3036*4882a593Smuzhiyun 				tcb->producer_index);
3037*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
3038*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3039*4882a593Smuzhiyun 			return NETDEV_TX_OK;
3040*4882a593Smuzhiyun 		}
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun 		len += size;
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 		vect_id++;
3045*4882a593Smuzhiyun 		if (vect_id == BFI_TX_MAX_VECTORS_PER_WI) {
3046*4882a593Smuzhiyun 			vect_id = 0;
3047*4882a593Smuzhiyun 			BNA_QE_INDX_INC(prod, q_depth);
3048*4882a593Smuzhiyun 			txqent = &((struct bna_txq_entry *)tcb->sw_q)[prod];
3049*4882a593Smuzhiyun 			txqent->hdr.wi_ext.opcode = htons(BNA_TXQ_WI_EXTENSION);
3050*4882a593Smuzhiyun 			unmap = &unmap_q[prod];
3051*4882a593Smuzhiyun 		}
3052*4882a593Smuzhiyun 
3053*4882a593Smuzhiyun 		dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3054*4882a593Smuzhiyun 					    0, size, DMA_TO_DEVICE);
3055*4882a593Smuzhiyun 		if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3056*4882a593Smuzhiyun 			/* Undo the changes starting at tcb->producer_index */
3057*4882a593Smuzhiyun 			bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3058*4882a593Smuzhiyun 					   tcb->producer_index);
3059*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
3060*4882a593Smuzhiyun 			BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3061*4882a593Smuzhiyun 			return NETDEV_TX_OK;
3062*4882a593Smuzhiyun 		}
3063*4882a593Smuzhiyun 
3064*4882a593Smuzhiyun 		dma_unmap_len_set(&unmap->vectors[vect_id], dma_len, size);
3065*4882a593Smuzhiyun 		BNA_SET_DMA_ADDR(dma_addr, &txqent->vector[vect_id].host_addr);
3066*4882a593Smuzhiyun 		txqent->vector[vect_id].length = htons(size);
3067*4882a593Smuzhiyun 		dma_unmap_addr_set(&unmap->vectors[vect_id], dma_addr,
3068*4882a593Smuzhiyun 				   dma_addr);
3069*4882a593Smuzhiyun 		head_unmap->nvecs++;
3070*4882a593Smuzhiyun 	}
3071*4882a593Smuzhiyun 
3072*4882a593Smuzhiyun 	if (unlikely(len != skb->len)) {
3073*4882a593Smuzhiyun 		/* Undo the changes starting at tcb->producer_index */
3074*4882a593Smuzhiyun 		bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3075*4882a593Smuzhiyun 		dev_kfree_skb_any(skb);
3076*4882a593Smuzhiyun 		BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3077*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3078*4882a593Smuzhiyun 	}
3079*4882a593Smuzhiyun 
3080*4882a593Smuzhiyun 	BNA_QE_INDX_INC(prod, q_depth);
3081*4882a593Smuzhiyun 	tcb->producer_index = prod;
3082*4882a593Smuzhiyun 
3083*4882a593Smuzhiyun 	wmb();
3084*4882a593Smuzhiyun 
3085*4882a593Smuzhiyun 	if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
3086*4882a593Smuzhiyun 		return NETDEV_TX_OK;
3087*4882a593Smuzhiyun 
3088*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 	bna_txq_prod_indx_doorbell(tcb);
3091*4882a593Smuzhiyun 
3092*4882a593Smuzhiyun 	return NETDEV_TX_OK;
3093*4882a593Smuzhiyun }
3094*4882a593Smuzhiyun 
3095*4882a593Smuzhiyun /*
3096*4882a593Smuzhiyun  * Used spin_lock to synchronize reading of stats structures, which
3097*4882a593Smuzhiyun  * is written by BNA under the same lock.
3098*4882a593Smuzhiyun  */
3099*4882a593Smuzhiyun static void
bnad_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)3100*4882a593Smuzhiyun bnad_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
3101*4882a593Smuzhiyun {
3102*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3103*4882a593Smuzhiyun 	unsigned long flags;
3104*4882a593Smuzhiyun 
3105*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3106*4882a593Smuzhiyun 
3107*4882a593Smuzhiyun 	bnad_netdev_qstats_fill(bnad, stats);
3108*4882a593Smuzhiyun 	bnad_netdev_hwstats_fill(bnad, stats);
3109*4882a593Smuzhiyun 
3110*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3111*4882a593Smuzhiyun }
3112*4882a593Smuzhiyun 
3113*4882a593Smuzhiyun static void
bnad_set_rx_ucast_fltr(struct bnad * bnad)3114*4882a593Smuzhiyun bnad_set_rx_ucast_fltr(struct bnad *bnad)
3115*4882a593Smuzhiyun {
3116*4882a593Smuzhiyun 	struct net_device *netdev = bnad->netdev;
3117*4882a593Smuzhiyun 	int uc_count = netdev_uc_count(netdev);
3118*4882a593Smuzhiyun 	enum bna_cb_status ret;
3119*4882a593Smuzhiyun 	u8 *mac_list;
3120*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
3121*4882a593Smuzhiyun 	int entry;
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun 	if (netdev_uc_empty(bnad->netdev)) {
3124*4882a593Smuzhiyun 		bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3125*4882a593Smuzhiyun 		return;
3126*4882a593Smuzhiyun 	}
3127*4882a593Smuzhiyun 
3128*4882a593Smuzhiyun 	if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3129*4882a593Smuzhiyun 		goto mode_default;
3130*4882a593Smuzhiyun 
3131*4882a593Smuzhiyun 	mac_list = kcalloc(ETH_ALEN, uc_count, GFP_ATOMIC);
3132*4882a593Smuzhiyun 	if (mac_list == NULL)
3133*4882a593Smuzhiyun 		goto mode_default;
3134*4882a593Smuzhiyun 
3135*4882a593Smuzhiyun 	entry = 0;
3136*4882a593Smuzhiyun 	netdev_for_each_uc_addr(ha, netdev) {
3137*4882a593Smuzhiyun 		ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]);
3138*4882a593Smuzhiyun 		entry++;
3139*4882a593Smuzhiyun 	}
3140*4882a593Smuzhiyun 
3141*4882a593Smuzhiyun 	ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3142*4882a593Smuzhiyun 	kfree(mac_list);
3143*4882a593Smuzhiyun 
3144*4882a593Smuzhiyun 	if (ret != BNA_CB_SUCCESS)
3145*4882a593Smuzhiyun 		goto mode_default;
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun 	return;
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 	/* ucast packets not in UCAM are routed to default function */
3150*4882a593Smuzhiyun mode_default:
3151*4882a593Smuzhiyun 	bnad->cfg_flags |= BNAD_CF_DEFAULT;
3152*4882a593Smuzhiyun 	bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3153*4882a593Smuzhiyun }
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun static void
bnad_set_rx_mcast_fltr(struct bnad * bnad)3156*4882a593Smuzhiyun bnad_set_rx_mcast_fltr(struct bnad *bnad)
3157*4882a593Smuzhiyun {
3158*4882a593Smuzhiyun 	struct net_device *netdev = bnad->netdev;
3159*4882a593Smuzhiyun 	int mc_count = netdev_mc_count(netdev);
3160*4882a593Smuzhiyun 	enum bna_cb_status ret;
3161*4882a593Smuzhiyun 	u8 *mac_list;
3162*4882a593Smuzhiyun 
3163*4882a593Smuzhiyun 	if (netdev->flags & IFF_ALLMULTI)
3164*4882a593Smuzhiyun 		goto mode_allmulti;
3165*4882a593Smuzhiyun 
3166*4882a593Smuzhiyun 	if (netdev_mc_empty(netdev))
3167*4882a593Smuzhiyun 		return;
3168*4882a593Smuzhiyun 
3169*4882a593Smuzhiyun 	if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3170*4882a593Smuzhiyun 		goto mode_allmulti;
3171*4882a593Smuzhiyun 
3172*4882a593Smuzhiyun 	mac_list = kcalloc(mc_count + 1, ETH_ALEN, GFP_ATOMIC);
3173*4882a593Smuzhiyun 
3174*4882a593Smuzhiyun 	if (mac_list == NULL)
3175*4882a593Smuzhiyun 		goto mode_allmulti;
3176*4882a593Smuzhiyun 
3177*4882a593Smuzhiyun 	ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]);
3178*4882a593Smuzhiyun 
3179*4882a593Smuzhiyun 	/* copy rest of the MCAST addresses */
3180*4882a593Smuzhiyun 	bnad_netdev_mc_list_get(netdev, mac_list);
3181*4882a593Smuzhiyun 	ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3182*4882a593Smuzhiyun 	kfree(mac_list);
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	if (ret != BNA_CB_SUCCESS)
3185*4882a593Smuzhiyun 		goto mode_allmulti;
3186*4882a593Smuzhiyun 
3187*4882a593Smuzhiyun 	return;
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun mode_allmulti:
3190*4882a593Smuzhiyun 	bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3191*4882a593Smuzhiyun 	bna_rx_mcast_delall(bnad->rx_info[0].rx);
3192*4882a593Smuzhiyun }
3193*4882a593Smuzhiyun 
3194*4882a593Smuzhiyun void
bnad_set_rx_mode(struct net_device * netdev)3195*4882a593Smuzhiyun bnad_set_rx_mode(struct net_device *netdev)
3196*4882a593Smuzhiyun {
3197*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3198*4882a593Smuzhiyun 	enum bna_rxmode new_mode, mode_mask;
3199*4882a593Smuzhiyun 	unsigned long flags;
3200*4882a593Smuzhiyun 
3201*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3202*4882a593Smuzhiyun 
3203*4882a593Smuzhiyun 	if (bnad->rx_info[0].rx == NULL) {
3204*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3205*4882a593Smuzhiyun 		return;
3206*4882a593Smuzhiyun 	}
3207*4882a593Smuzhiyun 
3208*4882a593Smuzhiyun 	/* clear bnad flags to update it with new settings */
3209*4882a593Smuzhiyun 	bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3210*4882a593Smuzhiyun 			BNAD_CF_ALLMULTI);
3211*4882a593Smuzhiyun 
3212*4882a593Smuzhiyun 	new_mode = 0;
3213*4882a593Smuzhiyun 	if (netdev->flags & IFF_PROMISC) {
3214*4882a593Smuzhiyun 		new_mode |= BNAD_RXMODE_PROMISC_DEFAULT;
3215*4882a593Smuzhiyun 		bnad->cfg_flags |= BNAD_CF_PROMISC;
3216*4882a593Smuzhiyun 	} else {
3217*4882a593Smuzhiyun 		bnad_set_rx_mcast_fltr(bnad);
3218*4882a593Smuzhiyun 
3219*4882a593Smuzhiyun 		if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3220*4882a593Smuzhiyun 			new_mode |= BNA_RXMODE_ALLMULTI;
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun 		bnad_set_rx_ucast_fltr(bnad);
3223*4882a593Smuzhiyun 
3224*4882a593Smuzhiyun 		if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3225*4882a593Smuzhiyun 			new_mode |= BNA_RXMODE_DEFAULT;
3226*4882a593Smuzhiyun 	}
3227*4882a593Smuzhiyun 
3228*4882a593Smuzhiyun 	mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT |
3229*4882a593Smuzhiyun 			BNA_RXMODE_ALLMULTI;
3230*4882a593Smuzhiyun 	bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3231*4882a593Smuzhiyun 
3232*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3233*4882a593Smuzhiyun }
3234*4882a593Smuzhiyun 
3235*4882a593Smuzhiyun /*
3236*4882a593Smuzhiyun  * bna_lock is used to sync writes to netdev->addr
3237*4882a593Smuzhiyun  * conf_lock cannot be used since this call may be made
3238*4882a593Smuzhiyun  * in a non-blocking context.
3239*4882a593Smuzhiyun  */
3240*4882a593Smuzhiyun static int
bnad_set_mac_address(struct net_device * netdev,void * addr)3241*4882a593Smuzhiyun bnad_set_mac_address(struct net_device *netdev, void *addr)
3242*4882a593Smuzhiyun {
3243*4882a593Smuzhiyun 	int err;
3244*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3245*4882a593Smuzhiyun 	struct sockaddr *sa = (struct sockaddr *)addr;
3246*4882a593Smuzhiyun 	unsigned long flags;
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3249*4882a593Smuzhiyun 
3250*4882a593Smuzhiyun 	err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3251*4882a593Smuzhiyun 	if (!err)
3252*4882a593Smuzhiyun 		ether_addr_copy(netdev->dev_addr, sa->sa_data);
3253*4882a593Smuzhiyun 
3254*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3255*4882a593Smuzhiyun 
3256*4882a593Smuzhiyun 	return err;
3257*4882a593Smuzhiyun }
3258*4882a593Smuzhiyun 
3259*4882a593Smuzhiyun static int
bnad_mtu_set(struct bnad * bnad,int frame_size)3260*4882a593Smuzhiyun bnad_mtu_set(struct bnad *bnad, int frame_size)
3261*4882a593Smuzhiyun {
3262*4882a593Smuzhiyun 	unsigned long flags;
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun 	init_completion(&bnad->bnad_completions.mtu_comp);
3265*4882a593Smuzhiyun 
3266*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3267*4882a593Smuzhiyun 	bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3268*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3269*4882a593Smuzhiyun 
3270*4882a593Smuzhiyun 	wait_for_completion(&bnad->bnad_completions.mtu_comp);
3271*4882a593Smuzhiyun 
3272*4882a593Smuzhiyun 	return bnad->bnad_completions.mtu_comp_status;
3273*4882a593Smuzhiyun }
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun static int
bnad_change_mtu(struct net_device * netdev,int new_mtu)3276*4882a593Smuzhiyun bnad_change_mtu(struct net_device *netdev, int new_mtu)
3277*4882a593Smuzhiyun {
3278*4882a593Smuzhiyun 	int err, mtu;
3279*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3280*4882a593Smuzhiyun 	u32 frame, new_frame;
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
3283*4882a593Smuzhiyun 
3284*4882a593Smuzhiyun 	mtu = netdev->mtu;
3285*4882a593Smuzhiyun 	netdev->mtu = new_mtu;
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 	frame = BNAD_FRAME_SIZE(mtu);
3288*4882a593Smuzhiyun 	new_frame = BNAD_FRAME_SIZE(new_mtu);
3289*4882a593Smuzhiyun 
3290*4882a593Smuzhiyun 	/* check if multi-buffer needs to be enabled */
3291*4882a593Smuzhiyun 	if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3292*4882a593Smuzhiyun 	    netif_running(bnad->netdev)) {
3293*4882a593Smuzhiyun 		/* only when transition is over 4K */
3294*4882a593Smuzhiyun 		if ((frame <= 4096 && new_frame > 4096) ||
3295*4882a593Smuzhiyun 		    (frame > 4096 && new_frame <= 4096))
3296*4882a593Smuzhiyun 			bnad_reinit_rx(bnad);
3297*4882a593Smuzhiyun 	}
3298*4882a593Smuzhiyun 
3299*4882a593Smuzhiyun 	err = bnad_mtu_set(bnad, new_frame);
3300*4882a593Smuzhiyun 	if (err)
3301*4882a593Smuzhiyun 		err = -EBUSY;
3302*4882a593Smuzhiyun 
3303*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3304*4882a593Smuzhiyun 	return err;
3305*4882a593Smuzhiyun }
3306*4882a593Smuzhiyun 
3307*4882a593Smuzhiyun static int
bnad_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3308*4882a593Smuzhiyun bnad_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
3309*4882a593Smuzhiyun {
3310*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3311*4882a593Smuzhiyun 	unsigned long flags;
3312*4882a593Smuzhiyun 
3313*4882a593Smuzhiyun 	if (!bnad->rx_info[0].rx)
3314*4882a593Smuzhiyun 		return 0;
3315*4882a593Smuzhiyun 
3316*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3319*4882a593Smuzhiyun 	bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3320*4882a593Smuzhiyun 	set_bit(vid, bnad->active_vlans);
3321*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3322*4882a593Smuzhiyun 
3323*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3324*4882a593Smuzhiyun 
3325*4882a593Smuzhiyun 	return 0;
3326*4882a593Smuzhiyun }
3327*4882a593Smuzhiyun 
3328*4882a593Smuzhiyun static int
bnad_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3329*4882a593Smuzhiyun bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
3330*4882a593Smuzhiyun {
3331*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3332*4882a593Smuzhiyun 	unsigned long flags;
3333*4882a593Smuzhiyun 
3334*4882a593Smuzhiyun 	if (!bnad->rx_info[0].rx)
3335*4882a593Smuzhiyun 		return 0;
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
3338*4882a593Smuzhiyun 
3339*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3340*4882a593Smuzhiyun 	clear_bit(vid, bnad->active_vlans);
3341*4882a593Smuzhiyun 	bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3342*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3343*4882a593Smuzhiyun 
3344*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	return 0;
3347*4882a593Smuzhiyun }
3348*4882a593Smuzhiyun 
bnad_set_features(struct net_device * dev,netdev_features_t features)3349*4882a593Smuzhiyun static int bnad_set_features(struct net_device *dev, netdev_features_t features)
3350*4882a593Smuzhiyun {
3351*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(dev);
3352*4882a593Smuzhiyun 	netdev_features_t changed = features ^ dev->features;
3353*4882a593Smuzhiyun 
3354*4882a593Smuzhiyun 	if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
3355*4882a593Smuzhiyun 		unsigned long flags;
3356*4882a593Smuzhiyun 
3357*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
3358*4882a593Smuzhiyun 
3359*4882a593Smuzhiyun 		if (features & NETIF_F_HW_VLAN_CTAG_RX)
3360*4882a593Smuzhiyun 			bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3361*4882a593Smuzhiyun 		else
3362*4882a593Smuzhiyun 			bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3363*4882a593Smuzhiyun 
3364*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
3365*4882a593Smuzhiyun 	}
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 	return 0;
3368*4882a593Smuzhiyun }
3369*4882a593Smuzhiyun 
3370*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
3371*4882a593Smuzhiyun static void
bnad_netpoll(struct net_device * netdev)3372*4882a593Smuzhiyun bnad_netpoll(struct net_device *netdev)
3373*4882a593Smuzhiyun {
3374*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
3375*4882a593Smuzhiyun 	struct bnad_rx_info *rx_info;
3376*4882a593Smuzhiyun 	struct bnad_rx_ctrl *rx_ctrl;
3377*4882a593Smuzhiyun 	u32 curr_mask;
3378*4882a593Smuzhiyun 	int i, j;
3379*4882a593Smuzhiyun 
3380*4882a593Smuzhiyun 	if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3381*4882a593Smuzhiyun 		bna_intx_disable(&bnad->bna, curr_mask);
3382*4882a593Smuzhiyun 		bnad_isr(bnad->pcidev->irq, netdev);
3383*4882a593Smuzhiyun 		bna_intx_enable(&bnad->bna, curr_mask);
3384*4882a593Smuzhiyun 	} else {
3385*4882a593Smuzhiyun 		/*
3386*4882a593Smuzhiyun 		 * Tx processing may happen in sending context, so no need
3387*4882a593Smuzhiyun 		 * to explicitly process completions here
3388*4882a593Smuzhiyun 		 */
3389*4882a593Smuzhiyun 
3390*4882a593Smuzhiyun 		/* Rx processing */
3391*4882a593Smuzhiyun 		for (i = 0; i < bnad->num_rx; i++) {
3392*4882a593Smuzhiyun 			rx_info = &bnad->rx_info[i];
3393*4882a593Smuzhiyun 			if (!rx_info->rx)
3394*4882a593Smuzhiyun 				continue;
3395*4882a593Smuzhiyun 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3396*4882a593Smuzhiyun 				rx_ctrl = &rx_info->rx_ctrl[j];
3397*4882a593Smuzhiyun 				if (rx_ctrl->ccb)
3398*4882a593Smuzhiyun 					bnad_netif_rx_schedule_poll(bnad,
3399*4882a593Smuzhiyun 							    rx_ctrl->ccb);
3400*4882a593Smuzhiyun 			}
3401*4882a593Smuzhiyun 		}
3402*4882a593Smuzhiyun 	}
3403*4882a593Smuzhiyun }
3404*4882a593Smuzhiyun #endif
3405*4882a593Smuzhiyun 
3406*4882a593Smuzhiyun static const struct net_device_ops bnad_netdev_ops = {
3407*4882a593Smuzhiyun 	.ndo_open		= bnad_open,
3408*4882a593Smuzhiyun 	.ndo_stop		= bnad_stop,
3409*4882a593Smuzhiyun 	.ndo_start_xmit		= bnad_start_xmit,
3410*4882a593Smuzhiyun 	.ndo_get_stats64	= bnad_get_stats64,
3411*4882a593Smuzhiyun 	.ndo_set_rx_mode	= bnad_set_rx_mode,
3412*4882a593Smuzhiyun 	.ndo_validate_addr      = eth_validate_addr,
3413*4882a593Smuzhiyun 	.ndo_set_mac_address    = bnad_set_mac_address,
3414*4882a593Smuzhiyun 	.ndo_change_mtu		= bnad_change_mtu,
3415*4882a593Smuzhiyun 	.ndo_vlan_rx_add_vid    = bnad_vlan_rx_add_vid,
3416*4882a593Smuzhiyun 	.ndo_vlan_rx_kill_vid   = bnad_vlan_rx_kill_vid,
3417*4882a593Smuzhiyun 	.ndo_set_features	= bnad_set_features,
3418*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
3419*4882a593Smuzhiyun 	.ndo_poll_controller    = bnad_netpoll
3420*4882a593Smuzhiyun #endif
3421*4882a593Smuzhiyun };
3422*4882a593Smuzhiyun 
3423*4882a593Smuzhiyun static void
bnad_netdev_init(struct bnad * bnad,bool using_dac)3424*4882a593Smuzhiyun bnad_netdev_init(struct bnad *bnad, bool using_dac)
3425*4882a593Smuzhiyun {
3426*4882a593Smuzhiyun 	struct net_device *netdev = bnad->netdev;
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
3429*4882a593Smuzhiyun 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3430*4882a593Smuzhiyun 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
3431*4882a593Smuzhiyun 		NETIF_F_HW_VLAN_CTAG_RX;
3432*4882a593Smuzhiyun 
3433*4882a593Smuzhiyun 	netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
3434*4882a593Smuzhiyun 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3435*4882a593Smuzhiyun 		NETIF_F_TSO | NETIF_F_TSO6;
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun 	netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
3438*4882a593Smuzhiyun 
3439*4882a593Smuzhiyun 	if (using_dac)
3440*4882a593Smuzhiyun 		netdev->features |= NETIF_F_HIGHDMA;
3441*4882a593Smuzhiyun 
3442*4882a593Smuzhiyun 	netdev->mem_start = bnad->mmio_start;
3443*4882a593Smuzhiyun 	netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3444*4882a593Smuzhiyun 
3445*4882a593Smuzhiyun 	/* MTU range: 46 - 9000 */
3446*4882a593Smuzhiyun 	netdev->min_mtu = ETH_ZLEN - ETH_HLEN;
3447*4882a593Smuzhiyun 	netdev->max_mtu = BNAD_JUMBO_MTU;
3448*4882a593Smuzhiyun 
3449*4882a593Smuzhiyun 	netdev->netdev_ops = &bnad_netdev_ops;
3450*4882a593Smuzhiyun 	bnad_set_ethtool_ops(netdev);
3451*4882a593Smuzhiyun }
3452*4882a593Smuzhiyun 
3453*4882a593Smuzhiyun /*
3454*4882a593Smuzhiyun  * 1. Initialize the bnad structure
3455*4882a593Smuzhiyun  * 2. Setup netdev pointer in pci_dev
3456*4882a593Smuzhiyun  * 3. Initialize no. of TxQ & CQs & MSIX vectors
3457*4882a593Smuzhiyun  * 4. Initialize work queue.
3458*4882a593Smuzhiyun  */
3459*4882a593Smuzhiyun static int
bnad_init(struct bnad * bnad,struct pci_dev * pdev,struct net_device * netdev)3460*4882a593Smuzhiyun bnad_init(struct bnad *bnad,
3461*4882a593Smuzhiyun 	  struct pci_dev *pdev, struct net_device *netdev)
3462*4882a593Smuzhiyun {
3463*4882a593Smuzhiyun 	unsigned long flags;
3464*4882a593Smuzhiyun 
3465*4882a593Smuzhiyun 	SET_NETDEV_DEV(netdev, &pdev->dev);
3466*4882a593Smuzhiyun 	pci_set_drvdata(pdev, netdev);
3467*4882a593Smuzhiyun 
3468*4882a593Smuzhiyun 	bnad->netdev = netdev;
3469*4882a593Smuzhiyun 	bnad->pcidev = pdev;
3470*4882a593Smuzhiyun 	bnad->mmio_start = pci_resource_start(pdev, 0);
3471*4882a593Smuzhiyun 	bnad->mmio_len = pci_resource_len(pdev, 0);
3472*4882a593Smuzhiyun 	bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3473*4882a593Smuzhiyun 	if (!bnad->bar0) {
3474*4882a593Smuzhiyun 		dev_err(&pdev->dev, "ioremap for bar0 failed\n");
3475*4882a593Smuzhiyun 		return -ENOMEM;
3476*4882a593Smuzhiyun 	}
3477*4882a593Smuzhiyun 	dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3478*4882a593Smuzhiyun 		 (unsigned long long) bnad->mmio_len);
3479*4882a593Smuzhiyun 
3480*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3481*4882a593Smuzhiyun 	if (!bnad_msix_disable)
3482*4882a593Smuzhiyun 		bnad->cfg_flags = BNAD_CF_MSIX;
3483*4882a593Smuzhiyun 
3484*4882a593Smuzhiyun 	bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3485*4882a593Smuzhiyun 
3486*4882a593Smuzhiyun 	bnad_q_num_init(bnad);
3487*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3488*4882a593Smuzhiyun 
3489*4882a593Smuzhiyun 	bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3490*4882a593Smuzhiyun 		(bnad->num_rx * bnad->num_rxp_per_rx) +
3491*4882a593Smuzhiyun 			 BNAD_MAILBOX_MSIX_VECTORS;
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 	bnad->txq_depth = BNAD_TXQ_DEPTH;
3494*4882a593Smuzhiyun 	bnad->rxq_depth = BNAD_RXQ_DEPTH;
3495*4882a593Smuzhiyun 
3496*4882a593Smuzhiyun 	bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3497*4882a593Smuzhiyun 	bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3498*4882a593Smuzhiyun 
3499*4882a593Smuzhiyun 	sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3500*4882a593Smuzhiyun 	bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3501*4882a593Smuzhiyun 	if (!bnad->work_q) {
3502*4882a593Smuzhiyun 		iounmap(bnad->bar0);
3503*4882a593Smuzhiyun 		return -ENOMEM;
3504*4882a593Smuzhiyun 	}
3505*4882a593Smuzhiyun 
3506*4882a593Smuzhiyun 	return 0;
3507*4882a593Smuzhiyun }
3508*4882a593Smuzhiyun 
3509*4882a593Smuzhiyun /*
3510*4882a593Smuzhiyun  * Must be called after bnad_pci_uninit()
3511*4882a593Smuzhiyun  * so that iounmap() and pci_set_drvdata(NULL)
3512*4882a593Smuzhiyun  * happens only after PCI uninitialization.
3513*4882a593Smuzhiyun  */
3514*4882a593Smuzhiyun static void
bnad_uninit(struct bnad * bnad)3515*4882a593Smuzhiyun bnad_uninit(struct bnad *bnad)
3516*4882a593Smuzhiyun {
3517*4882a593Smuzhiyun 	if (bnad->work_q) {
3518*4882a593Smuzhiyun 		flush_workqueue(bnad->work_q);
3519*4882a593Smuzhiyun 		destroy_workqueue(bnad->work_q);
3520*4882a593Smuzhiyun 		bnad->work_q = NULL;
3521*4882a593Smuzhiyun 	}
3522*4882a593Smuzhiyun 
3523*4882a593Smuzhiyun 	if (bnad->bar0)
3524*4882a593Smuzhiyun 		iounmap(bnad->bar0);
3525*4882a593Smuzhiyun }
3526*4882a593Smuzhiyun 
3527*4882a593Smuzhiyun /*
3528*4882a593Smuzhiyun  * Initialize locks
3529*4882a593Smuzhiyun 	a) Per ioceth mutes used for serializing configuration
3530*4882a593Smuzhiyun 	   changes from OS interface
3531*4882a593Smuzhiyun 	b) spin lock used to protect bna state machine
3532*4882a593Smuzhiyun  */
3533*4882a593Smuzhiyun static void
bnad_lock_init(struct bnad * bnad)3534*4882a593Smuzhiyun bnad_lock_init(struct bnad *bnad)
3535*4882a593Smuzhiyun {
3536*4882a593Smuzhiyun 	spin_lock_init(&bnad->bna_lock);
3537*4882a593Smuzhiyun 	mutex_init(&bnad->conf_mutex);
3538*4882a593Smuzhiyun }
3539*4882a593Smuzhiyun 
3540*4882a593Smuzhiyun static void
bnad_lock_uninit(struct bnad * bnad)3541*4882a593Smuzhiyun bnad_lock_uninit(struct bnad *bnad)
3542*4882a593Smuzhiyun {
3543*4882a593Smuzhiyun 	mutex_destroy(&bnad->conf_mutex);
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun 
3546*4882a593Smuzhiyun /* PCI Initialization */
3547*4882a593Smuzhiyun static int
bnad_pci_init(struct bnad * bnad,struct pci_dev * pdev,bool * using_dac)3548*4882a593Smuzhiyun bnad_pci_init(struct bnad *bnad,
3549*4882a593Smuzhiyun 	      struct pci_dev *pdev, bool *using_dac)
3550*4882a593Smuzhiyun {
3551*4882a593Smuzhiyun 	int err;
3552*4882a593Smuzhiyun 
3553*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
3554*4882a593Smuzhiyun 	if (err)
3555*4882a593Smuzhiyun 		return err;
3556*4882a593Smuzhiyun 	err = pci_request_regions(pdev, BNAD_NAME);
3557*4882a593Smuzhiyun 	if (err)
3558*4882a593Smuzhiyun 		goto disable_device;
3559*4882a593Smuzhiyun 	if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
3560*4882a593Smuzhiyun 		*using_dac = true;
3561*4882a593Smuzhiyun 	} else {
3562*4882a593Smuzhiyun 		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3563*4882a593Smuzhiyun 		if (err)
3564*4882a593Smuzhiyun 			goto release_regions;
3565*4882a593Smuzhiyun 		*using_dac = false;
3566*4882a593Smuzhiyun 	}
3567*4882a593Smuzhiyun 	pci_set_master(pdev);
3568*4882a593Smuzhiyun 	return 0;
3569*4882a593Smuzhiyun 
3570*4882a593Smuzhiyun release_regions:
3571*4882a593Smuzhiyun 	pci_release_regions(pdev);
3572*4882a593Smuzhiyun disable_device:
3573*4882a593Smuzhiyun 	pci_disable_device(pdev);
3574*4882a593Smuzhiyun 
3575*4882a593Smuzhiyun 	return err;
3576*4882a593Smuzhiyun }
3577*4882a593Smuzhiyun 
3578*4882a593Smuzhiyun static void
bnad_pci_uninit(struct pci_dev * pdev)3579*4882a593Smuzhiyun bnad_pci_uninit(struct pci_dev *pdev)
3580*4882a593Smuzhiyun {
3581*4882a593Smuzhiyun 	pci_release_regions(pdev);
3582*4882a593Smuzhiyun 	pci_disable_device(pdev);
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun 
3585*4882a593Smuzhiyun static int
bnad_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pcidev_id)3586*4882a593Smuzhiyun bnad_pci_probe(struct pci_dev *pdev,
3587*4882a593Smuzhiyun 		const struct pci_device_id *pcidev_id)
3588*4882a593Smuzhiyun {
3589*4882a593Smuzhiyun 	bool	using_dac;
3590*4882a593Smuzhiyun 	int	err;
3591*4882a593Smuzhiyun 	struct bnad *bnad;
3592*4882a593Smuzhiyun 	struct bna *bna;
3593*4882a593Smuzhiyun 	struct net_device *netdev;
3594*4882a593Smuzhiyun 	struct bfa_pcidev pcidev_info;
3595*4882a593Smuzhiyun 	unsigned long flags;
3596*4882a593Smuzhiyun 
3597*4882a593Smuzhiyun 	mutex_lock(&bnad_fwimg_mutex);
3598*4882a593Smuzhiyun 	if (!cna_get_firmware_buf(pdev)) {
3599*4882a593Smuzhiyun 		mutex_unlock(&bnad_fwimg_mutex);
3600*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to load firmware image!\n");
3601*4882a593Smuzhiyun 		return -ENODEV;
3602*4882a593Smuzhiyun 	}
3603*4882a593Smuzhiyun 	mutex_unlock(&bnad_fwimg_mutex);
3604*4882a593Smuzhiyun 
3605*4882a593Smuzhiyun 	/*
3606*4882a593Smuzhiyun 	 * Allocates sizeof(struct net_device + struct bnad)
3607*4882a593Smuzhiyun 	 * bnad = netdev->priv
3608*4882a593Smuzhiyun 	 */
3609*4882a593Smuzhiyun 	netdev = alloc_etherdev(sizeof(struct bnad));
3610*4882a593Smuzhiyun 	if (!netdev) {
3611*4882a593Smuzhiyun 		err = -ENOMEM;
3612*4882a593Smuzhiyun 		return err;
3613*4882a593Smuzhiyun 	}
3614*4882a593Smuzhiyun 	bnad = netdev_priv(netdev);
3615*4882a593Smuzhiyun 	bnad_lock_init(bnad);
3616*4882a593Smuzhiyun 	bnad->id = atomic_inc_return(&bna_id) - 1;
3617*4882a593Smuzhiyun 
3618*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
3619*4882a593Smuzhiyun 	/*
3620*4882a593Smuzhiyun 	 * PCI initialization
3621*4882a593Smuzhiyun 	 *	Output : using_dac = 1 for 64 bit DMA
3622*4882a593Smuzhiyun 	 *			   = 0 for 32 bit DMA
3623*4882a593Smuzhiyun 	 */
3624*4882a593Smuzhiyun 	using_dac = false;
3625*4882a593Smuzhiyun 	err = bnad_pci_init(bnad, pdev, &using_dac);
3626*4882a593Smuzhiyun 	if (err)
3627*4882a593Smuzhiyun 		goto unlock_mutex;
3628*4882a593Smuzhiyun 
3629*4882a593Smuzhiyun 	/*
3630*4882a593Smuzhiyun 	 * Initialize bnad structure
3631*4882a593Smuzhiyun 	 * Setup relation between pci_dev & netdev
3632*4882a593Smuzhiyun 	 */
3633*4882a593Smuzhiyun 	err = bnad_init(bnad, pdev, netdev);
3634*4882a593Smuzhiyun 	if (err)
3635*4882a593Smuzhiyun 		goto pci_uninit;
3636*4882a593Smuzhiyun 
3637*4882a593Smuzhiyun 	/* Initialize netdev structure, set up ethtool ops */
3638*4882a593Smuzhiyun 	bnad_netdev_init(bnad, using_dac);
3639*4882a593Smuzhiyun 
3640*4882a593Smuzhiyun 	/* Set link to down state */
3641*4882a593Smuzhiyun 	netif_carrier_off(netdev);
3642*4882a593Smuzhiyun 
3643*4882a593Smuzhiyun 	/* Setup the debugfs node for this bfad */
3644*4882a593Smuzhiyun 	if (bna_debugfs_enable)
3645*4882a593Smuzhiyun 		bnad_debugfs_init(bnad);
3646*4882a593Smuzhiyun 
3647*4882a593Smuzhiyun 	/* Get resource requirement form bna */
3648*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3649*4882a593Smuzhiyun 	bna_res_req(&bnad->res_info[0]);
3650*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3651*4882a593Smuzhiyun 
3652*4882a593Smuzhiyun 	/* Allocate resources from bna */
3653*4882a593Smuzhiyun 	err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3654*4882a593Smuzhiyun 	if (err)
3655*4882a593Smuzhiyun 		goto drv_uninit;
3656*4882a593Smuzhiyun 
3657*4882a593Smuzhiyun 	bna = &bnad->bna;
3658*4882a593Smuzhiyun 
3659*4882a593Smuzhiyun 	/* Setup pcidev_info for bna_init() */
3660*4882a593Smuzhiyun 	pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3661*4882a593Smuzhiyun 	pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3662*4882a593Smuzhiyun 	pcidev_info.device_id = bnad->pcidev->device;
3663*4882a593Smuzhiyun 	pcidev_info.pci_bar_kva = bnad->bar0;
3664*4882a593Smuzhiyun 
3665*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3666*4882a593Smuzhiyun 	bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3667*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3668*4882a593Smuzhiyun 
3669*4882a593Smuzhiyun 	bnad->stats.bna_stats = &bna->stats;
3670*4882a593Smuzhiyun 
3671*4882a593Smuzhiyun 	bnad_enable_msix(bnad);
3672*4882a593Smuzhiyun 	err = bnad_mbox_irq_alloc(bnad);
3673*4882a593Smuzhiyun 	if (err)
3674*4882a593Smuzhiyun 		goto res_free;
3675*4882a593Smuzhiyun 
3676*4882a593Smuzhiyun 	/* Set up timers */
3677*4882a593Smuzhiyun 	timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3678*4882a593Smuzhiyun 	timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3679*4882a593Smuzhiyun 	timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3680*4882a593Smuzhiyun 	timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3681*4882a593Smuzhiyun 		    0);
3682*4882a593Smuzhiyun 
3683*4882a593Smuzhiyun 	/*
3684*4882a593Smuzhiyun 	 * Start the chip
3685*4882a593Smuzhiyun 	 * If the call back comes with error, we bail out.
3686*4882a593Smuzhiyun 	 * This is a catastrophic error.
3687*4882a593Smuzhiyun 	 */
3688*4882a593Smuzhiyun 	err = bnad_ioceth_enable(bnad);
3689*4882a593Smuzhiyun 	if (err) {
3690*4882a593Smuzhiyun 		dev_err(&pdev->dev, "initialization failed err=%d\n", err);
3691*4882a593Smuzhiyun 		goto probe_success;
3692*4882a593Smuzhiyun 	}
3693*4882a593Smuzhiyun 
3694*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3695*4882a593Smuzhiyun 	if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3696*4882a593Smuzhiyun 		bna_num_rxp_set(bna, BNAD_NUM_RXP + 1)) {
3697*4882a593Smuzhiyun 		bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3698*4882a593Smuzhiyun 			bna_attr(bna)->num_rxp - 1);
3699*4882a593Smuzhiyun 		if (bna_num_txq_set(bna, BNAD_NUM_TXQ + 1) ||
3700*4882a593Smuzhiyun 			bna_num_rxp_set(bna, BNAD_NUM_RXP + 1))
3701*4882a593Smuzhiyun 			err = -EIO;
3702*4882a593Smuzhiyun 	}
3703*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3704*4882a593Smuzhiyun 	if (err)
3705*4882a593Smuzhiyun 		goto disable_ioceth;
3706*4882a593Smuzhiyun 
3707*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3708*4882a593Smuzhiyun 	bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3709*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3710*4882a593Smuzhiyun 
3711*4882a593Smuzhiyun 	err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3712*4882a593Smuzhiyun 	if (err) {
3713*4882a593Smuzhiyun 		err = -EIO;
3714*4882a593Smuzhiyun 		goto disable_ioceth;
3715*4882a593Smuzhiyun 	}
3716*4882a593Smuzhiyun 
3717*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3718*4882a593Smuzhiyun 	bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3719*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3720*4882a593Smuzhiyun 
3721*4882a593Smuzhiyun 	/* Get the burnt-in mac */
3722*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3723*4882a593Smuzhiyun 	bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3724*4882a593Smuzhiyun 	bnad_set_netdev_perm_addr(bnad);
3725*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3726*4882a593Smuzhiyun 
3727*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3728*4882a593Smuzhiyun 
3729*4882a593Smuzhiyun 	/* Finally, reguister with net_device layer */
3730*4882a593Smuzhiyun 	err = register_netdev(netdev);
3731*4882a593Smuzhiyun 	if (err) {
3732*4882a593Smuzhiyun 		dev_err(&pdev->dev, "registering net device failed\n");
3733*4882a593Smuzhiyun 		goto probe_uninit;
3734*4882a593Smuzhiyun 	}
3735*4882a593Smuzhiyun 	set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3736*4882a593Smuzhiyun 
3737*4882a593Smuzhiyun 	return 0;
3738*4882a593Smuzhiyun 
3739*4882a593Smuzhiyun probe_success:
3740*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3741*4882a593Smuzhiyun 	return 0;
3742*4882a593Smuzhiyun 
3743*4882a593Smuzhiyun probe_uninit:
3744*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
3745*4882a593Smuzhiyun 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3746*4882a593Smuzhiyun disable_ioceth:
3747*4882a593Smuzhiyun 	bnad_ioceth_disable(bnad);
3748*4882a593Smuzhiyun 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3749*4882a593Smuzhiyun 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3750*4882a593Smuzhiyun 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3751*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3752*4882a593Smuzhiyun 	bna_uninit(bna);
3753*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3754*4882a593Smuzhiyun 	bnad_mbox_irq_free(bnad);
3755*4882a593Smuzhiyun 	bnad_disable_msix(bnad);
3756*4882a593Smuzhiyun res_free:
3757*4882a593Smuzhiyun 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3758*4882a593Smuzhiyun drv_uninit:
3759*4882a593Smuzhiyun 	/* Remove the debugfs node for this bnad */
3760*4882a593Smuzhiyun 	kfree(bnad->regdata);
3761*4882a593Smuzhiyun 	bnad_debugfs_uninit(bnad);
3762*4882a593Smuzhiyun 	bnad_uninit(bnad);
3763*4882a593Smuzhiyun pci_uninit:
3764*4882a593Smuzhiyun 	bnad_pci_uninit(pdev);
3765*4882a593Smuzhiyun unlock_mutex:
3766*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3767*4882a593Smuzhiyun 	bnad_lock_uninit(bnad);
3768*4882a593Smuzhiyun 	free_netdev(netdev);
3769*4882a593Smuzhiyun 	return err;
3770*4882a593Smuzhiyun }
3771*4882a593Smuzhiyun 
3772*4882a593Smuzhiyun static void
bnad_pci_remove(struct pci_dev * pdev)3773*4882a593Smuzhiyun bnad_pci_remove(struct pci_dev *pdev)
3774*4882a593Smuzhiyun {
3775*4882a593Smuzhiyun 	struct net_device *netdev = pci_get_drvdata(pdev);
3776*4882a593Smuzhiyun 	struct bnad *bnad;
3777*4882a593Smuzhiyun 	struct bna *bna;
3778*4882a593Smuzhiyun 	unsigned long flags;
3779*4882a593Smuzhiyun 
3780*4882a593Smuzhiyun 	if (!netdev)
3781*4882a593Smuzhiyun 		return;
3782*4882a593Smuzhiyun 
3783*4882a593Smuzhiyun 	bnad = netdev_priv(netdev);
3784*4882a593Smuzhiyun 	bna = &bnad->bna;
3785*4882a593Smuzhiyun 
3786*4882a593Smuzhiyun 	if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3787*4882a593Smuzhiyun 		unregister_netdev(netdev);
3788*4882a593Smuzhiyun 
3789*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
3790*4882a593Smuzhiyun 	bnad_ioceth_disable(bnad);
3791*4882a593Smuzhiyun 	del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3792*4882a593Smuzhiyun 	del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3793*4882a593Smuzhiyun 	del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3794*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
3795*4882a593Smuzhiyun 	bna_uninit(bna);
3796*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
3797*4882a593Smuzhiyun 
3798*4882a593Smuzhiyun 	bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3799*4882a593Smuzhiyun 	bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3800*4882a593Smuzhiyun 	bnad_mbox_irq_free(bnad);
3801*4882a593Smuzhiyun 	bnad_disable_msix(bnad);
3802*4882a593Smuzhiyun 	bnad_pci_uninit(pdev);
3803*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
3804*4882a593Smuzhiyun 	bnad_lock_uninit(bnad);
3805*4882a593Smuzhiyun 	/* Remove the debugfs node for this bnad */
3806*4882a593Smuzhiyun 	kfree(bnad->regdata);
3807*4882a593Smuzhiyun 	bnad_debugfs_uninit(bnad);
3808*4882a593Smuzhiyun 	bnad_uninit(bnad);
3809*4882a593Smuzhiyun 	free_netdev(netdev);
3810*4882a593Smuzhiyun }
3811*4882a593Smuzhiyun 
3812*4882a593Smuzhiyun static const struct pci_device_id bnad_pci_id_table[] = {
3813*4882a593Smuzhiyun 	{
3814*4882a593Smuzhiyun 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3815*4882a593Smuzhiyun 			PCI_DEVICE_ID_BROCADE_CT),
3816*4882a593Smuzhiyun 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3817*4882a593Smuzhiyun 		.class_mask =  0xffff00
3818*4882a593Smuzhiyun 	},
3819*4882a593Smuzhiyun 	{
3820*4882a593Smuzhiyun 		PCI_DEVICE(PCI_VENDOR_ID_BROCADE,
3821*4882a593Smuzhiyun 			BFA_PCI_DEVICE_ID_CT2),
3822*4882a593Smuzhiyun 		.class = PCI_CLASS_NETWORK_ETHERNET << 8,
3823*4882a593Smuzhiyun 		.class_mask =  0xffff00
3824*4882a593Smuzhiyun 	},
3825*4882a593Smuzhiyun 	{0,  },
3826*4882a593Smuzhiyun };
3827*4882a593Smuzhiyun 
3828*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, bnad_pci_id_table);
3829*4882a593Smuzhiyun 
3830*4882a593Smuzhiyun static struct pci_driver bnad_pci_driver = {
3831*4882a593Smuzhiyun 	.name = BNAD_NAME,
3832*4882a593Smuzhiyun 	.id_table = bnad_pci_id_table,
3833*4882a593Smuzhiyun 	.probe = bnad_pci_probe,
3834*4882a593Smuzhiyun 	.remove = bnad_pci_remove,
3835*4882a593Smuzhiyun };
3836*4882a593Smuzhiyun 
3837*4882a593Smuzhiyun static int __init
bnad_module_init(void)3838*4882a593Smuzhiyun bnad_module_init(void)
3839*4882a593Smuzhiyun {
3840*4882a593Smuzhiyun 	int err;
3841*4882a593Smuzhiyun 
3842*4882a593Smuzhiyun 	bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover);
3843*4882a593Smuzhiyun 
3844*4882a593Smuzhiyun 	err = pci_register_driver(&bnad_pci_driver);
3845*4882a593Smuzhiyun 	if (err < 0) {
3846*4882a593Smuzhiyun 		pr_err("bna: PCI driver registration failed err=%d\n", err);
3847*4882a593Smuzhiyun 		return err;
3848*4882a593Smuzhiyun 	}
3849*4882a593Smuzhiyun 
3850*4882a593Smuzhiyun 	return 0;
3851*4882a593Smuzhiyun }
3852*4882a593Smuzhiyun 
3853*4882a593Smuzhiyun static void __exit
bnad_module_exit(void)3854*4882a593Smuzhiyun bnad_module_exit(void)
3855*4882a593Smuzhiyun {
3856*4882a593Smuzhiyun 	pci_unregister_driver(&bnad_pci_driver);
3857*4882a593Smuzhiyun 	release_firmware(bfi_fw);
3858*4882a593Smuzhiyun }
3859*4882a593Smuzhiyun 
3860*4882a593Smuzhiyun module_init(bnad_module_init);
3861*4882a593Smuzhiyun module_exit(bnad_module_exit);
3862*4882a593Smuzhiyun 
3863*4882a593Smuzhiyun MODULE_AUTHOR("Brocade");
3864*4882a593Smuzhiyun MODULE_LICENSE("GPL");
3865*4882a593Smuzhiyun MODULE_DESCRIPTION("QLogic BR-series 10G PCIe Ethernet driver");
3866*4882a593Smuzhiyun MODULE_FIRMWARE(CNA_FW_FILE_CT);
3867*4882a593Smuzhiyun MODULE_FIRMWARE(CNA_FW_FILE_CT2);
3868