xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2018 Intel Corporation. */
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun #include "ixgbe.h"
5*4882a593Smuzhiyun #include <linux/if_ether.h>
6*4882a593Smuzhiyun #include <linux/gfp.h>
7*4882a593Smuzhiyun #include <linux/if_vlan.h>
8*4882a593Smuzhiyun #include <generated/utsrelease.h>
9*4882a593Smuzhiyun #include <scsi/scsi_cmnd.h>
10*4882a593Smuzhiyun #include <scsi/scsi_device.h>
11*4882a593Smuzhiyun #include <scsi/fc/fc_fs.h>
12*4882a593Smuzhiyun #include <scsi/fc/fc_fcoe.h>
13*4882a593Smuzhiyun #include <scsi/libfc.h>
14*4882a593Smuzhiyun #include <scsi/libfcoe.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /**
17*4882a593Smuzhiyun  * ixgbe_fcoe_clear_ddp - clear the given ddp context
18*4882a593Smuzhiyun  * @ddp: ptr to the ixgbe_fcoe_ddp
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  * Returns : none
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  */
ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp * ddp)23*4882a593Smuzhiyun static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun 	ddp->len = 0;
26*4882a593Smuzhiyun 	ddp->err = 1;
27*4882a593Smuzhiyun 	ddp->udl = NULL;
28*4882a593Smuzhiyun 	ddp->udp = 0UL;
29*4882a593Smuzhiyun 	ddp->sgl = NULL;
30*4882a593Smuzhiyun 	ddp->sgc = 0;
31*4882a593Smuzhiyun }
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * ixgbe_fcoe_ddp_put - free the ddp context for a given xid
35*4882a593Smuzhiyun  * @netdev: the corresponding net_device
36*4882a593Smuzhiyun  * @xid: the xid that corresponding ddp will be freed
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
39*4882a593Smuzhiyun  * and it is expected to be called by ULD, i.e., FCP layer of libfc
40*4882a593Smuzhiyun  * to release the corresponding ddp context when the I/O is done.
41*4882a593Smuzhiyun  *
42*4882a593Smuzhiyun  * Returns : data length already ddp-ed in bytes
43*4882a593Smuzhiyun  */
ixgbe_fcoe_ddp_put(struct net_device * netdev,u16 xid)44*4882a593Smuzhiyun int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	int len;
47*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe;
48*4882a593Smuzhiyun 	struct ixgbe_adapter *adapter;
49*4882a593Smuzhiyun 	struct ixgbe_fcoe_ddp *ddp;
50*4882a593Smuzhiyun 	struct ixgbe_hw *hw;
51*4882a593Smuzhiyun 	u32 fcbuff;
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	if (!netdev)
54*4882a593Smuzhiyun 		return 0;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	if (xid >= netdev->fcoe_ddp_xid)
57*4882a593Smuzhiyun 		return 0;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	adapter = netdev_priv(netdev);
60*4882a593Smuzhiyun 	fcoe = &adapter->fcoe;
61*4882a593Smuzhiyun 	ddp = &fcoe->ddp[xid];
62*4882a593Smuzhiyun 	if (!ddp->udl)
63*4882a593Smuzhiyun 		return 0;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	hw = &adapter->hw;
66*4882a593Smuzhiyun 	len = ddp->len;
67*4882a593Smuzhiyun 	/* if no error then skip ddp context invalidation */
68*4882a593Smuzhiyun 	if (!ddp->err)
69*4882a593Smuzhiyun 		goto skip_ddpinv;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (hw->mac.type == ixgbe_mac_X550) {
72*4882a593Smuzhiyun 		/* X550 does not require DDP FCoE lock */
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0);
75*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid),
76*4882a593Smuzhiyun 				(xid | IXGBE_FCFLTRW_WE));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 		/* program FCBUFF */
79*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 		/* program FCDMARW */
82*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
83*4882a593Smuzhiyun 				(xid | IXGBE_FCDMARW_WE));
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 		/* read FCBUFF to check context invalidated */
86*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid),
87*4882a593Smuzhiyun 				(xid | IXGBE_FCDMARW_RE));
88*4882a593Smuzhiyun 		fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid));
89*4882a593Smuzhiyun 	} else {
90*4882a593Smuzhiyun 		/* other hardware requires DDP FCoE lock */
91*4882a593Smuzhiyun 		spin_lock_bh(&fcoe->lock);
92*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0);
93*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW,
94*4882a593Smuzhiyun 				(xid | IXGBE_FCFLTRW_WE));
95*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0);
96*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
97*4882a593Smuzhiyun 				(xid | IXGBE_FCDMARW_WE));
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 		/* guaranteed to be invalidated after 100us */
100*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDMARW,
101*4882a593Smuzhiyun 				(xid | IXGBE_FCDMARW_RE));
102*4882a593Smuzhiyun 		fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF);
103*4882a593Smuzhiyun 		spin_unlock_bh(&fcoe->lock);
104*4882a593Smuzhiyun 		}
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (fcbuff & IXGBE_FCBUFF_VALID)
107*4882a593Smuzhiyun 		usleep_range(100, 150);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun skip_ddpinv:
110*4882a593Smuzhiyun 	if (ddp->sgl)
111*4882a593Smuzhiyun 		dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc,
112*4882a593Smuzhiyun 			     DMA_FROM_DEVICE);
113*4882a593Smuzhiyun 	if (ddp->pool) {
114*4882a593Smuzhiyun 		dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
115*4882a593Smuzhiyun 		ddp->pool = NULL;
116*4882a593Smuzhiyun 	}
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	ixgbe_fcoe_clear_ddp(ddp);
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return len;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun  * ixgbe_fcoe_ddp_setup - called to set up ddp context
125*4882a593Smuzhiyun  * @netdev: the corresponding net_device
126*4882a593Smuzhiyun  * @xid: the exchange id requesting ddp
127*4882a593Smuzhiyun  * @sgl: the scatter-gather list for this request
128*4882a593Smuzhiyun  * @sgc: the number of scatter-gather items
129*4882a593Smuzhiyun  * @target_mode: 1 to setup target mode, 0 to setup initiator mode
130*4882a593Smuzhiyun  *
131*4882a593Smuzhiyun  * Returns : 1 for success and 0 for no ddp
132*4882a593Smuzhiyun  */
ixgbe_fcoe_ddp_setup(struct net_device * netdev,u16 xid,struct scatterlist * sgl,unsigned int sgc,int target_mode)133*4882a593Smuzhiyun static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
134*4882a593Smuzhiyun 				struct scatterlist *sgl, unsigned int sgc,
135*4882a593Smuzhiyun 				int target_mode)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct ixgbe_adapter *adapter;
138*4882a593Smuzhiyun 	struct ixgbe_hw *hw;
139*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe;
140*4882a593Smuzhiyun 	struct ixgbe_fcoe_ddp *ddp;
141*4882a593Smuzhiyun 	struct ixgbe_fcoe_ddp_pool *ddp_pool;
142*4882a593Smuzhiyun 	struct scatterlist *sg;
143*4882a593Smuzhiyun 	unsigned int i, j, dmacount;
144*4882a593Smuzhiyun 	unsigned int len;
145*4882a593Smuzhiyun 	static const unsigned int bufflen = IXGBE_FCBUFF_MIN;
146*4882a593Smuzhiyun 	unsigned int firstoff = 0;
147*4882a593Smuzhiyun 	unsigned int lastsize;
148*4882a593Smuzhiyun 	unsigned int thisoff = 0;
149*4882a593Smuzhiyun 	unsigned int thislen = 0;
150*4882a593Smuzhiyun 	u32 fcbuff, fcdmarw, fcfltrw, fcrxctl;
151*4882a593Smuzhiyun 	dma_addr_t addr = 0;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	if (!netdev || !sgl)
154*4882a593Smuzhiyun 		return 0;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	adapter = netdev_priv(netdev);
157*4882a593Smuzhiyun 	if (xid >= netdev->fcoe_ddp_xid) {
158*4882a593Smuzhiyun 		e_warn(drv, "xid=0x%x out-of-range\n", xid);
159*4882a593Smuzhiyun 		return 0;
160*4882a593Smuzhiyun 	}
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* no DDP if we are already down or resetting */
163*4882a593Smuzhiyun 	if (test_bit(__IXGBE_DOWN, &adapter->state) ||
164*4882a593Smuzhiyun 	    test_bit(__IXGBE_RESETTING, &adapter->state))
165*4882a593Smuzhiyun 		return 0;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	fcoe = &adapter->fcoe;
168*4882a593Smuzhiyun 	ddp = &fcoe->ddp[xid];
169*4882a593Smuzhiyun 	if (ddp->sgl) {
170*4882a593Smuzhiyun 		e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
171*4882a593Smuzhiyun 		      xid, ddp->sgl, ddp->sgc);
172*4882a593Smuzhiyun 		return 0;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 	ixgbe_fcoe_clear_ddp(ddp);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	if (!fcoe->ddp_pool) {
178*4882a593Smuzhiyun 		e_warn(drv, "No ddp_pool resources allocated\n");
179*4882a593Smuzhiyun 		return 0;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
183*4882a593Smuzhiyun 	if (!ddp_pool->pool) {
184*4882a593Smuzhiyun 		e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
185*4882a593Smuzhiyun 		goto out_noddp;
186*4882a593Smuzhiyun 	}
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* setup dma from scsi command sgl */
189*4882a593Smuzhiyun 	dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
190*4882a593Smuzhiyun 	if (dmacount == 0) {
191*4882a593Smuzhiyun 		e_err(drv, "xid 0x%x DMA map error\n", xid);
192*4882a593Smuzhiyun 		goto out_noddp;
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* alloc the udl from per cpu ddp pool */
196*4882a593Smuzhiyun 	ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
197*4882a593Smuzhiyun 	if (!ddp->udl) {
198*4882a593Smuzhiyun 		e_err(drv, "failed allocated ddp context\n");
199*4882a593Smuzhiyun 		goto out_noddp_unmap;
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 	ddp->pool = ddp_pool->pool;
202*4882a593Smuzhiyun 	ddp->sgl = sgl;
203*4882a593Smuzhiyun 	ddp->sgc = sgc;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	j = 0;
206*4882a593Smuzhiyun 	for_each_sg(sgl, sg, dmacount, i) {
207*4882a593Smuzhiyun 		addr = sg_dma_address(sg);
208*4882a593Smuzhiyun 		len = sg_dma_len(sg);
209*4882a593Smuzhiyun 		while (len) {
210*4882a593Smuzhiyun 			/* max number of buffers allowed in one DDP context */
211*4882a593Smuzhiyun 			if (j >= IXGBE_BUFFCNT_MAX) {
212*4882a593Smuzhiyun 				ddp_pool->noddp++;
213*4882a593Smuzhiyun 				goto out_noddp_free;
214*4882a593Smuzhiyun 			}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 			/* get the offset of length of current buffer */
217*4882a593Smuzhiyun 			thisoff = addr & ((dma_addr_t)bufflen - 1);
218*4882a593Smuzhiyun 			thislen = min((bufflen - thisoff), len);
219*4882a593Smuzhiyun 			/*
220*4882a593Smuzhiyun 			 * all but the 1st buffer (j == 0)
221*4882a593Smuzhiyun 			 * must be aligned on bufflen
222*4882a593Smuzhiyun 			 */
223*4882a593Smuzhiyun 			if ((j != 0) && (thisoff))
224*4882a593Smuzhiyun 				goto out_noddp_free;
225*4882a593Smuzhiyun 			/*
226*4882a593Smuzhiyun 			 * all but the last buffer
227*4882a593Smuzhiyun 			 * ((i == (dmacount - 1)) && (thislen == len))
228*4882a593Smuzhiyun 			 * must end at bufflen
229*4882a593Smuzhiyun 			 */
230*4882a593Smuzhiyun 			if (((i != (dmacount - 1)) || (thislen != len))
231*4882a593Smuzhiyun 			    && ((thislen + thisoff) != bufflen))
232*4882a593Smuzhiyun 				goto out_noddp_free;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			ddp->udl[j] = (u64)(addr - thisoff);
235*4882a593Smuzhiyun 			/* only the first buffer may have none-zero offset */
236*4882a593Smuzhiyun 			if (j == 0)
237*4882a593Smuzhiyun 				firstoff = thisoff;
238*4882a593Smuzhiyun 			len -= thislen;
239*4882a593Smuzhiyun 			addr += thislen;
240*4882a593Smuzhiyun 			j++;
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun 	/* only the last buffer may have non-full bufflen */
244*4882a593Smuzhiyun 	lastsize = thisoff + thislen;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/*
247*4882a593Smuzhiyun 	 * lastsize can not be buffer len.
248*4882a593Smuzhiyun 	 * If it is then adding another buffer with lastsize = 1.
249*4882a593Smuzhiyun 	 */
250*4882a593Smuzhiyun 	if (lastsize == bufflen) {
251*4882a593Smuzhiyun 		if (j >= IXGBE_BUFFCNT_MAX) {
252*4882a593Smuzhiyun 			ddp_pool->noddp_ext_buff++;
253*4882a593Smuzhiyun 			goto out_noddp_free;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma);
257*4882a593Smuzhiyun 		j++;
258*4882a593Smuzhiyun 		lastsize = 1;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	put_cpu();
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
263*4882a593Smuzhiyun 	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
264*4882a593Smuzhiyun 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
265*4882a593Smuzhiyun 	/* Set WRCONTX bit to allow DDP for target */
266*4882a593Smuzhiyun 	if (target_mode)
267*4882a593Smuzhiyun 		fcbuff |= (IXGBE_FCBUFF_WRCONTX);
268*4882a593Smuzhiyun 	fcbuff |= (IXGBE_FCBUFF_VALID);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	fcdmarw = xid;
271*4882a593Smuzhiyun 	fcdmarw |= IXGBE_FCDMARW_WE;
272*4882a593Smuzhiyun 	fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	fcfltrw = xid;
275*4882a593Smuzhiyun 	fcfltrw |= IXGBE_FCFLTRW_WE;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/* program DMA context */
278*4882a593Smuzhiyun 	hw = &adapter->hw;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	/* turn on last frame indication for target mode as FCP_RSPtarget is
281*4882a593Smuzhiyun 	 * supposed to send FCP_RSP when it is done. */
282*4882a593Smuzhiyun 	if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) {
283*4882a593Smuzhiyun 		set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode);
284*4882a593Smuzhiyun 		fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL);
285*4882a593Smuzhiyun 		fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH;
286*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl);
287*4882a593Smuzhiyun 	}
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (hw->mac.type == ixgbe_mac_X550) {
290*4882a593Smuzhiyun 		/* X550 does not require DDP lock */
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid),
293*4882a593Smuzhiyun 				ddp->udp & DMA_BIT_MASK(32));
294*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32);
295*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff);
296*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw);
297*4882a593Smuzhiyun 		/* program filter context */
298*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID);
299*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0);
300*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw);
301*4882a593Smuzhiyun 	} else {
302*4882a593Smuzhiyun 		/* DDP lock for indirect DDP context access */
303*4882a593Smuzhiyun 		spin_lock_bh(&fcoe->lock);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32));
306*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32);
307*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff);
308*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw);
309*4882a593Smuzhiyun 		/* program filter context */
310*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0);
311*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID);
312*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 		spin_unlock_bh(&fcoe->lock);
315*4882a593Smuzhiyun 	}
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return 1;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun out_noddp_free:
320*4882a593Smuzhiyun 	dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
321*4882a593Smuzhiyun 	ixgbe_fcoe_clear_ddp(ddp);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun out_noddp_unmap:
324*4882a593Smuzhiyun 	dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
325*4882a593Smuzhiyun out_noddp:
326*4882a593Smuzhiyun 	put_cpu();
327*4882a593Smuzhiyun 	return 0;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun /**
331*4882a593Smuzhiyun  * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode
332*4882a593Smuzhiyun  * @netdev: the corresponding net_device
333*4882a593Smuzhiyun  * @xid: the exchange id requesting ddp
334*4882a593Smuzhiyun  * @sgl: the scatter-gather list for this request
335*4882a593Smuzhiyun  * @sgc: the number of scatter-gather items
336*4882a593Smuzhiyun  *
337*4882a593Smuzhiyun  * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
338*4882a593Smuzhiyun  * and is expected to be called from ULD, e.g., FCP layer of libfc
339*4882a593Smuzhiyun  * to set up ddp for the corresponding xid of the given sglist for
340*4882a593Smuzhiyun  * the corresponding I/O.
341*4882a593Smuzhiyun  *
342*4882a593Smuzhiyun  * Returns : 1 for success and 0 for no ddp
343*4882a593Smuzhiyun  */
ixgbe_fcoe_ddp_get(struct net_device * netdev,u16 xid,struct scatterlist * sgl,unsigned int sgc)344*4882a593Smuzhiyun int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
345*4882a593Smuzhiyun 		       struct scatterlist *sgl, unsigned int sgc)
346*4882a593Smuzhiyun {
347*4882a593Smuzhiyun 	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun /**
351*4882a593Smuzhiyun  * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode
352*4882a593Smuzhiyun  * @netdev: the corresponding net_device
353*4882a593Smuzhiyun  * @xid: the exchange id requesting ddp
354*4882a593Smuzhiyun  * @sgl: the scatter-gather list for this request
355*4882a593Smuzhiyun  * @sgc: the number of scatter-gather items
356*4882a593Smuzhiyun  *
357*4882a593Smuzhiyun  * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
358*4882a593Smuzhiyun  * and is expected to be called from ULD, e.g., FCP layer of libfc
359*4882a593Smuzhiyun  * to set up ddp for the corresponding xid of the given sglist for
360*4882a593Smuzhiyun  * the corresponding I/O. The DDP in target mode is a write I/O request
361*4882a593Smuzhiyun  * from the initiator.
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  * Returns : 1 for success and 0 for no ddp
364*4882a593Smuzhiyun  */
ixgbe_fcoe_ddp_target(struct net_device * netdev,u16 xid,struct scatterlist * sgl,unsigned int sgc)365*4882a593Smuzhiyun int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid,
366*4882a593Smuzhiyun 			    struct scatterlist *sgl, unsigned int sgc)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun  * ixgbe_fcoe_ddp - check ddp status and mark it done
373*4882a593Smuzhiyun  * @adapter: ixgbe adapter
374*4882a593Smuzhiyun  * @rx_desc: advanced rx descriptor
375*4882a593Smuzhiyun  * @skb: the skb holding the received data
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * This checks ddp status.
378*4882a593Smuzhiyun  *
379*4882a593Smuzhiyun  * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates
380*4882a593Smuzhiyun  * not passing the skb to ULD, > 0 indicates is the length of data
381*4882a593Smuzhiyun  * being ddped.
382*4882a593Smuzhiyun  */
ixgbe_fcoe_ddp(struct ixgbe_adapter * adapter,union ixgbe_adv_rx_desc * rx_desc,struct sk_buff * skb)383*4882a593Smuzhiyun int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
384*4882a593Smuzhiyun 		   union ixgbe_adv_rx_desc *rx_desc,
385*4882a593Smuzhiyun 		   struct sk_buff *skb)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	int rc = -EINVAL;
388*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe;
389*4882a593Smuzhiyun 	struct ixgbe_fcoe_ddp *ddp;
390*4882a593Smuzhiyun 	struct fc_frame_header *fh;
391*4882a593Smuzhiyun 	struct fcoe_crc_eof *crc;
392*4882a593Smuzhiyun 	__le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR);
393*4882a593Smuzhiyun 	__le32 ddp_err;
394*4882a593Smuzhiyun 	int ddp_max;
395*4882a593Smuzhiyun 	u32 fctl;
396*4882a593Smuzhiyun 	u16 xid;
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	if (fcerr == cpu_to_le32(IXGBE_FCERR_BADCRC))
399*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_NONE;
400*4882a593Smuzhiyun 	else
401*4882a593Smuzhiyun 		skb->ip_summed = CHECKSUM_UNNECESSARY;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
404*4882a593Smuzhiyun 		fh = (struct fc_frame_header *)(skb->data +
405*4882a593Smuzhiyun 			sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr));
406*4882a593Smuzhiyun 	else
407*4882a593Smuzhiyun 		fh = (struct fc_frame_header *)(skb->data +
408*4882a593Smuzhiyun 			sizeof(struct fcoe_hdr));
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	fctl = ntoh24(fh->fh_f_ctl);
411*4882a593Smuzhiyun 	if (fctl & FC_FC_EX_CTX)
412*4882a593Smuzhiyun 		xid =  be16_to_cpu(fh->fh_ox_id);
413*4882a593Smuzhiyun 	else
414*4882a593Smuzhiyun 		xid =  be16_to_cpu(fh->fh_rx_id);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	ddp_max = IXGBE_FCOE_DDP_MAX;
417*4882a593Smuzhiyun 	/* X550 has different DDP Max limit */
418*4882a593Smuzhiyun 	if (adapter->hw.mac.type == ixgbe_mac_X550)
419*4882a593Smuzhiyun 		ddp_max = IXGBE_FCOE_DDP_MAX_X550;
420*4882a593Smuzhiyun 	if (xid >= ddp_max)
421*4882a593Smuzhiyun 		return -EINVAL;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	fcoe = &adapter->fcoe;
424*4882a593Smuzhiyun 	ddp = &fcoe->ddp[xid];
425*4882a593Smuzhiyun 	if (!ddp->udl)
426*4882a593Smuzhiyun 		return -EINVAL;
427*4882a593Smuzhiyun 
428*4882a593Smuzhiyun 	ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
429*4882a593Smuzhiyun 					      IXGBE_RXDADV_ERR_FCERR);
430*4882a593Smuzhiyun 	if (ddp_err)
431*4882a593Smuzhiyun 		return -EINVAL;
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
434*4882a593Smuzhiyun 	/* return 0 to bypass going to ULD for DDPed data */
435*4882a593Smuzhiyun 	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_DDP):
436*4882a593Smuzhiyun 		/* update length of DDPed data */
437*4882a593Smuzhiyun 		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
438*4882a593Smuzhiyun 		rc = 0;
439*4882a593Smuzhiyun 		break;
440*4882a593Smuzhiyun 	/* unmap the sg list when FCPRSP is received */
441*4882a593Smuzhiyun 	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP):
442*4882a593Smuzhiyun 		dma_unmap_sg(&adapter->pdev->dev, ddp->sgl,
443*4882a593Smuzhiyun 			     ddp->sgc, DMA_FROM_DEVICE);
444*4882a593Smuzhiyun 		ddp->err = (__force u32)ddp_err;
445*4882a593Smuzhiyun 		ddp->sgl = NULL;
446*4882a593Smuzhiyun 		ddp->sgc = 0;
447*4882a593Smuzhiyun 		fallthrough;
448*4882a593Smuzhiyun 	/* if DDP length is present pass it through to ULD */
449*4882a593Smuzhiyun 	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NODDP):
450*4882a593Smuzhiyun 		/* update length of DDPed data */
451*4882a593Smuzhiyun 		ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss);
452*4882a593Smuzhiyun 		if (ddp->len)
453*4882a593Smuzhiyun 			rc = ddp->len;
454*4882a593Smuzhiyun 		break;
455*4882a593Smuzhiyun 	/* no match will return as an error */
456*4882a593Smuzhiyun 	case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_NOMTCH):
457*4882a593Smuzhiyun 	default:
458*4882a593Smuzhiyun 		break;
459*4882a593Smuzhiyun 	}
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	/* In target mode, check the last data frame of the sequence.
462*4882a593Smuzhiyun 	 * For DDP in target mode, data is already DDPed but the header
463*4882a593Smuzhiyun 	 * indication of the last data frame ould allow is to tell if we
464*4882a593Smuzhiyun 	 * got all the data and the ULP can send FCP_RSP back, as this is
465*4882a593Smuzhiyun 	 * not a full fcoe frame, we fill the trailer here so it won't be
466*4882a593Smuzhiyun 	 * dropped by the ULP stack.
467*4882a593Smuzhiyun 	 */
468*4882a593Smuzhiyun 	if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) &&
469*4882a593Smuzhiyun 	    (fctl & FC_FC_END_SEQ)) {
470*4882a593Smuzhiyun 		skb_linearize(skb);
471*4882a593Smuzhiyun 		crc = skb_put(skb, sizeof(*crc));
472*4882a593Smuzhiyun 		crc->fcoe_eof = FC_EOF_T;
473*4882a593Smuzhiyun 	}
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	return rc;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun /**
479*4882a593Smuzhiyun  * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
480*4882a593Smuzhiyun  * @tx_ring: tx desc ring
481*4882a593Smuzhiyun  * @first: first tx_buffer structure containing skb, tx_flags, and protocol
482*4882a593Smuzhiyun  * @hdr_len: hdr_len to be returned
483*4882a593Smuzhiyun  *
484*4882a593Smuzhiyun  * This sets up large send offload for FCoE
485*4882a593Smuzhiyun  *
486*4882a593Smuzhiyun  * Returns : 0 indicates success, < 0 for error
487*4882a593Smuzhiyun  */
ixgbe_fso(struct ixgbe_ring * tx_ring,struct ixgbe_tx_buffer * first,u8 * hdr_len)488*4882a593Smuzhiyun int ixgbe_fso(struct ixgbe_ring *tx_ring,
489*4882a593Smuzhiyun 	      struct ixgbe_tx_buffer *first,
490*4882a593Smuzhiyun 	      u8 *hdr_len)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct sk_buff *skb = first->skb;
493*4882a593Smuzhiyun 	struct fc_frame_header *fh;
494*4882a593Smuzhiyun 	u32 vlan_macip_lens;
495*4882a593Smuzhiyun 	u32 fcoe_sof_eof = 0;
496*4882a593Smuzhiyun 	u32 mss_l4len_idx;
497*4882a593Smuzhiyun 	u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE;
498*4882a593Smuzhiyun 	u8 sof, eof;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) {
501*4882a593Smuzhiyun 		dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n",
502*4882a593Smuzhiyun 			skb_shinfo(skb)->gso_type);
503*4882a593Smuzhiyun 		return -EINVAL;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* resets the header to point fcoe/fc */
507*4882a593Smuzhiyun 	skb_set_network_header(skb, skb->mac_len);
508*4882a593Smuzhiyun 	skb_set_transport_header(skb, skb->mac_len +
509*4882a593Smuzhiyun 				 sizeof(struct fcoe_hdr));
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	/* sets up SOF and ORIS */
512*4882a593Smuzhiyun 	sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
513*4882a593Smuzhiyun 	switch (sof) {
514*4882a593Smuzhiyun 	case FC_SOF_I2:
515*4882a593Smuzhiyun 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_ORIS;
516*4882a593Smuzhiyun 		break;
517*4882a593Smuzhiyun 	case FC_SOF_I3:
518*4882a593Smuzhiyun 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF |
519*4882a593Smuzhiyun 			       IXGBE_ADVTXD_FCOEF_ORIS;
520*4882a593Smuzhiyun 		break;
521*4882a593Smuzhiyun 	case FC_SOF_N2:
522*4882a593Smuzhiyun 		break;
523*4882a593Smuzhiyun 	case FC_SOF_N3:
524*4882a593Smuzhiyun 		fcoe_sof_eof = IXGBE_ADVTXD_FCOEF_SOF;
525*4882a593Smuzhiyun 		break;
526*4882a593Smuzhiyun 	default:
527*4882a593Smuzhiyun 		dev_warn(tx_ring->dev, "unknown sof = 0x%x\n", sof);
528*4882a593Smuzhiyun 		return -EINVAL;
529*4882a593Smuzhiyun 	}
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	/* the first byte of the last dword is EOF */
532*4882a593Smuzhiyun 	skb_copy_bits(skb, skb->len - 4, &eof, 1);
533*4882a593Smuzhiyun 	/* sets up EOF and ORIE */
534*4882a593Smuzhiyun 	switch (eof) {
535*4882a593Smuzhiyun 	case FC_EOF_N:
536*4882a593Smuzhiyun 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N;
537*4882a593Smuzhiyun 		break;
538*4882a593Smuzhiyun 	case FC_EOF_T:
539*4882a593Smuzhiyun 		/* lso needs ORIE */
540*4882a593Smuzhiyun 		if (skb_is_gso(skb))
541*4882a593Smuzhiyun 			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N |
542*4882a593Smuzhiyun 					IXGBE_ADVTXD_FCOEF_ORIE;
543*4882a593Smuzhiyun 		else
544*4882a593Smuzhiyun 			fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T;
545*4882a593Smuzhiyun 		break;
546*4882a593Smuzhiyun 	case FC_EOF_NI:
547*4882a593Smuzhiyun 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI;
548*4882a593Smuzhiyun 		break;
549*4882a593Smuzhiyun 	case FC_EOF_A:
550*4882a593Smuzhiyun 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A;
551*4882a593Smuzhiyun 		break;
552*4882a593Smuzhiyun 	default:
553*4882a593Smuzhiyun 		dev_warn(tx_ring->dev, "unknown eof = 0x%x\n", eof);
554*4882a593Smuzhiyun 		return -EINVAL;
555*4882a593Smuzhiyun 	}
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	/* sets up PARINC indicating data offset */
558*4882a593Smuzhiyun 	fh = (struct fc_frame_header *)skb_transport_header(skb);
559*4882a593Smuzhiyun 	if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
560*4882a593Smuzhiyun 		fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	/* include trailer in headlen as it is replicated per frame */
563*4882a593Smuzhiyun 	*hdr_len = sizeof(struct fcoe_crc_eof);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	/* hdr_len includes fc_hdr if FCoE LSO is enabled */
566*4882a593Smuzhiyun 	if (skb_is_gso(skb)) {
567*4882a593Smuzhiyun 		*hdr_len += skb_transport_offset(skb) +
568*4882a593Smuzhiyun 			    sizeof(struct fc_frame_header);
569*4882a593Smuzhiyun 		/* update gso_segs and bytecount */
570*4882a593Smuzhiyun 		first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
571*4882a593Smuzhiyun 					       skb_shinfo(skb)->gso_size);
572*4882a593Smuzhiyun 		first->bytecount += (first->gso_segs - 1) * *hdr_len;
573*4882a593Smuzhiyun 		first->tx_flags |= IXGBE_TX_FLAGS_TSO;
574*4882a593Smuzhiyun 		/* Hardware expects L4T to be RSV for FCoE TSO */
575*4882a593Smuzhiyun 		type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	/* set flag indicating FCOE to ixgbe_tx_map call */
579*4882a593Smuzhiyun 	first->tx_flags |= IXGBE_TX_FLAGS_FCOE | IXGBE_TX_FLAGS_CC;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	/* mss_l4len_id: use 0 for FSO as TSO, no need for L4LEN */
582*4882a593Smuzhiyun 	mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
585*4882a593Smuzhiyun 	vlan_macip_lens = skb_transport_offset(skb) +
586*4882a593Smuzhiyun 			  sizeof(struct fc_frame_header);
587*4882a593Smuzhiyun 	vlan_macip_lens |= (skb_transport_offset(skb) - 4)
588*4882a593Smuzhiyun 			   << IXGBE_ADVTXD_MACLEN_SHIFT;
589*4882a593Smuzhiyun 	vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/* write context desc */
592*4882a593Smuzhiyun 	ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
593*4882a593Smuzhiyun 			  type_tucmd, mss_l4len_idx);
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	return 0;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun 
ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe * fcoe,unsigned int cpu)598*4882a593Smuzhiyun static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	struct ixgbe_fcoe_ddp_pool *ddp_pool;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
603*4882a593Smuzhiyun 	dma_pool_destroy(ddp_pool->pool);
604*4882a593Smuzhiyun 	ddp_pool->pool = NULL;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe * fcoe,struct device * dev,unsigned int cpu)607*4882a593Smuzhiyun static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe,
608*4882a593Smuzhiyun 				     struct device *dev,
609*4882a593Smuzhiyun 				     unsigned int cpu)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	struct ixgbe_fcoe_ddp_pool *ddp_pool;
612*4882a593Smuzhiyun 	struct dma_pool *pool;
613*4882a593Smuzhiyun 	char pool_name[32];
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu);
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX,
618*4882a593Smuzhiyun 			       IXGBE_FCPTR_ALIGN, PAGE_SIZE);
619*4882a593Smuzhiyun 	if (!pool)
620*4882a593Smuzhiyun 		return -ENOMEM;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
623*4882a593Smuzhiyun 	ddp_pool->pool = pool;
624*4882a593Smuzhiyun 	ddp_pool->noddp = 0;
625*4882a593Smuzhiyun 	ddp_pool->noddp_ext_buff = 0;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	return 0;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun /**
631*4882a593Smuzhiyun  * ixgbe_configure_fcoe - configures registers for fcoe at start
632*4882a593Smuzhiyun  * @adapter: ptr to ixgbe adapter
633*4882a593Smuzhiyun  *
634*4882a593Smuzhiyun  * This sets up FCoE related registers
635*4882a593Smuzhiyun  *
636*4882a593Smuzhiyun  * Returns : none
637*4882a593Smuzhiyun  */
ixgbe_configure_fcoe(struct ixgbe_adapter * adapter)638*4882a593Smuzhiyun void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
639*4882a593Smuzhiyun {
640*4882a593Smuzhiyun 	struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE];
641*4882a593Smuzhiyun 	struct ixgbe_hw *hw = &adapter->hw;
642*4882a593Smuzhiyun 	int i, fcoe_q, fcoe_i, fcoe_q_h = 0;
643*4882a593Smuzhiyun 	int fcreta_size;
644*4882a593Smuzhiyun 	u32 etqf;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	/* Minimal functionality for FCoE requires at least CRC offloads */
647*4882a593Smuzhiyun 	if (!(adapter->netdev->features & NETIF_F_FCOE_CRC))
648*4882a593Smuzhiyun 		return;
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* Enable L2 EtherType filter for FCoE, needed for FCoE CRC and DDP */
651*4882a593Smuzhiyun 	etqf = ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN;
652*4882a593Smuzhiyun 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
653*4882a593Smuzhiyun 		etqf |= IXGBE_ETQF_POOL_ENABLE;
654*4882a593Smuzhiyun 		etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf);
657*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	/* leave registers un-configured if FCoE is disabled */
660*4882a593Smuzhiyun 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
661*4882a593Smuzhiyun 		return;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	/* Use one or more Rx queues for FCoE by redirection table */
664*4882a593Smuzhiyun 	fcreta_size = IXGBE_FCRETA_SIZE;
665*4882a593Smuzhiyun 	if (adapter->hw.mac.type == ixgbe_mac_X550)
666*4882a593Smuzhiyun 		fcreta_size = IXGBE_FCRETA_SIZE_X550;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	for (i = 0; i < fcreta_size; i++) {
669*4882a593Smuzhiyun 		if (adapter->hw.mac.type == ixgbe_mac_X550) {
670*4882a593Smuzhiyun 			int fcoe_i_h = fcoe->offset + ((i + fcreta_size) %
671*4882a593Smuzhiyun 							fcoe->indices);
672*4882a593Smuzhiyun 			fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx;
673*4882a593Smuzhiyun 			fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) &
674*4882a593Smuzhiyun 				   IXGBE_FCRETA_ENTRY_HIGH_MASK;
675*4882a593Smuzhiyun 		}
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 		fcoe_i = fcoe->offset + (i % fcoe->indices);
678*4882a593Smuzhiyun 		fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
679*4882a593Smuzhiyun 		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
680*4882a593Smuzhiyun 		fcoe_q |= fcoe_q_h;
681*4882a593Smuzhiyun 		IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	/* Enable L2 EtherType filter for FIP */
686*4882a593Smuzhiyun 	etqf = ETH_P_FIP | IXGBE_ETQF_FILTER_EN;
687*4882a593Smuzhiyun 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
688*4882a593Smuzhiyun 		etqf |= IXGBE_ETQF_POOL_ENABLE;
689*4882a593Smuzhiyun 		etqf |= VMDQ_P(0) << IXGBE_ETQF_POOL_SHIFT;
690*4882a593Smuzhiyun 	}
691*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), etqf);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	/* Send FIP frames to the first FCoE queue */
694*4882a593Smuzhiyun 	fcoe_q = adapter->rx_ring[fcoe->offset]->reg_idx;
695*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
696*4882a593Smuzhiyun 			IXGBE_ETQS_QUEUE_EN |
697*4882a593Smuzhiyun 			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	/* Configure FCoE Rx control */
700*4882a593Smuzhiyun 	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
701*4882a593Smuzhiyun 			IXGBE_FCRXCTRL_FCCRCBO |
702*4882a593Smuzhiyun 			(FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT));
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun /**
706*4882a593Smuzhiyun  * ixgbe_free_fcoe_ddp_resources - release all fcoe ddp context resources
707*4882a593Smuzhiyun  * @adapter : ixgbe adapter
708*4882a593Smuzhiyun  *
709*4882a593Smuzhiyun  * Cleans up outstanding ddp context resources
710*4882a593Smuzhiyun  *
711*4882a593Smuzhiyun  * Returns : none
712*4882a593Smuzhiyun  */
ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter * adapter)713*4882a593Smuzhiyun void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
716*4882a593Smuzhiyun 	int cpu, i, ddp_max;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	/* do nothing if no DDP pools were allocated */
719*4882a593Smuzhiyun 	if (!fcoe->ddp_pool)
720*4882a593Smuzhiyun 		return;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	ddp_max = IXGBE_FCOE_DDP_MAX;
723*4882a593Smuzhiyun 	/* X550 has different DDP Max limit */
724*4882a593Smuzhiyun 	if (adapter->hw.mac.type == ixgbe_mac_X550)
725*4882a593Smuzhiyun 		ddp_max = IXGBE_FCOE_DDP_MAX_X550;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	for (i = 0; i < ddp_max; i++)
728*4882a593Smuzhiyun 		ixgbe_fcoe_ddp_put(adapter->netdev, i);
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
731*4882a593Smuzhiyun 		ixgbe_fcoe_dma_pool_free(fcoe, cpu);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 	dma_unmap_single(&adapter->pdev->dev,
734*4882a593Smuzhiyun 			 fcoe->extra_ddp_buffer_dma,
735*4882a593Smuzhiyun 			 IXGBE_FCBUFF_MIN,
736*4882a593Smuzhiyun 			 DMA_FROM_DEVICE);
737*4882a593Smuzhiyun 	kfree(fcoe->extra_ddp_buffer);
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	fcoe->extra_ddp_buffer = NULL;
740*4882a593Smuzhiyun 	fcoe->extra_ddp_buffer_dma = 0;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun /**
744*4882a593Smuzhiyun  * ixgbe_setup_fcoe_ddp_resources - setup all fcoe ddp context resources
745*4882a593Smuzhiyun  * @adapter: ixgbe adapter
746*4882a593Smuzhiyun  *
747*4882a593Smuzhiyun  * Sets up ddp context resouces
748*4882a593Smuzhiyun  *
749*4882a593Smuzhiyun  * Returns : 0 indicates success or -EINVAL on failure
750*4882a593Smuzhiyun  */
ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter * adapter)751*4882a593Smuzhiyun int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter)
752*4882a593Smuzhiyun {
753*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
754*4882a593Smuzhiyun 	struct device *dev = &adapter->pdev->dev;
755*4882a593Smuzhiyun 	void *buffer;
756*4882a593Smuzhiyun 	dma_addr_t dma;
757*4882a593Smuzhiyun 	unsigned int cpu;
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* do nothing if no DDP pools were allocated */
760*4882a593Smuzhiyun 	if (!fcoe->ddp_pool)
761*4882a593Smuzhiyun 		return 0;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	/* Extra buffer to be shared by all DDPs for HW work around */
764*4882a593Smuzhiyun 	buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_KERNEL);
765*4882a593Smuzhiyun 	if (!buffer)
766*4882a593Smuzhiyun 		return -ENOMEM;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE);
769*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma)) {
770*4882a593Smuzhiyun 		e_err(drv, "failed to map extra DDP buffer\n");
771*4882a593Smuzhiyun 		kfree(buffer);
772*4882a593Smuzhiyun 		return -ENOMEM;
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 
775*4882a593Smuzhiyun 	fcoe->extra_ddp_buffer = buffer;
776*4882a593Smuzhiyun 	fcoe->extra_ddp_buffer_dma = dma;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	/* allocate pci pool for each cpu */
779*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
780*4882a593Smuzhiyun 		int err = ixgbe_fcoe_dma_pool_alloc(fcoe, dev, cpu);
781*4882a593Smuzhiyun 		if (!err)
782*4882a593Smuzhiyun 			continue;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 		e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu);
785*4882a593Smuzhiyun 		ixgbe_free_fcoe_ddp_resources(adapter);
786*4882a593Smuzhiyun 		return -ENOMEM;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	return 0;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
ixgbe_fcoe_ddp_enable(struct ixgbe_adapter * adapter)792*4882a593Smuzhiyun static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
797*4882a593Smuzhiyun 		return -EINVAL;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	fcoe->ddp_pool = alloc_percpu(struct ixgbe_fcoe_ddp_pool);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (!fcoe->ddp_pool) {
802*4882a593Smuzhiyun 		e_err(drv, "failed to allocate percpu DDP resources\n");
803*4882a593Smuzhiyun 		return -ENOMEM;
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1;
807*4882a593Smuzhiyun 	/* X550 has different DDP Max limit */
808*4882a593Smuzhiyun 	if (adapter->hw.mac.type == ixgbe_mac_X550)
809*4882a593Smuzhiyun 		adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	return 0;
812*4882a593Smuzhiyun }
813*4882a593Smuzhiyun 
ixgbe_fcoe_ddp_disable(struct ixgbe_adapter * adapter)814*4882a593Smuzhiyun static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	adapter->netdev->fcoe_ddp_xid = 0;
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	if (!fcoe->ddp_pool)
821*4882a593Smuzhiyun 		return;
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	free_percpu(fcoe->ddp_pool);
824*4882a593Smuzhiyun 	fcoe->ddp_pool = NULL;
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun /**
828*4882a593Smuzhiyun  * ixgbe_fcoe_enable - turn on FCoE offload feature
829*4882a593Smuzhiyun  * @netdev: the corresponding netdev
830*4882a593Smuzhiyun  *
831*4882a593Smuzhiyun  * Turns on FCoE offload feature in 82599.
832*4882a593Smuzhiyun  *
833*4882a593Smuzhiyun  * Returns : 0 indicates success or -EINVAL on failure
834*4882a593Smuzhiyun  */
ixgbe_fcoe_enable(struct net_device * netdev)835*4882a593Smuzhiyun int ixgbe_fcoe_enable(struct net_device *netdev)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
838*4882a593Smuzhiyun 	struct ixgbe_fcoe *fcoe = &adapter->fcoe;
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	atomic_inc(&fcoe->refcnt);
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE))
843*4882a593Smuzhiyun 		return -EINVAL;
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
846*4882a593Smuzhiyun 		return -EINVAL;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	e_info(drv, "Enabling FCoE offload features.\n");
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
851*4882a593Smuzhiyun 		e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
852*4882a593Smuzhiyun 
853*4882a593Smuzhiyun 	if (netif_running(netdev))
854*4882a593Smuzhiyun 		netdev->netdev_ops->ndo_stop(netdev);
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/* Allocate per CPU memory to track DDP pools */
857*4882a593Smuzhiyun 	ixgbe_fcoe_ddp_enable(adapter);
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	/* enable FCoE and notify stack */
860*4882a593Smuzhiyun 	adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
861*4882a593Smuzhiyun 	netdev->features |= NETIF_F_FCOE_MTU;
862*4882a593Smuzhiyun 	netdev_features_change(netdev);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	/* release existing queues and reallocate them */
865*4882a593Smuzhiyun 	ixgbe_clear_interrupt_scheme(adapter);
866*4882a593Smuzhiyun 	ixgbe_init_interrupt_scheme(adapter);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	if (netif_running(netdev))
869*4882a593Smuzhiyun 		netdev->netdev_ops->ndo_open(netdev);
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	return 0;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun /**
875*4882a593Smuzhiyun  * ixgbe_fcoe_disable - turn off FCoE offload feature
876*4882a593Smuzhiyun  * @netdev: the corresponding netdev
877*4882a593Smuzhiyun  *
878*4882a593Smuzhiyun  * Turns off FCoE offload feature in 82599.
879*4882a593Smuzhiyun  *
880*4882a593Smuzhiyun  * Returns : 0 indicates success or -EINVAL on failure
881*4882a593Smuzhiyun  */
ixgbe_fcoe_disable(struct net_device * netdev)882*4882a593Smuzhiyun int ixgbe_fcoe_disable(struct net_device *netdev)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
885*4882a593Smuzhiyun 
886*4882a593Smuzhiyun 	if (!atomic_dec_and_test(&adapter->fcoe.refcnt))
887*4882a593Smuzhiyun 		return -EINVAL;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
890*4882a593Smuzhiyun 		return -EINVAL;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	e_info(drv, "Disabling FCoE offload features.\n");
893*4882a593Smuzhiyun 	if (netif_running(netdev))
894*4882a593Smuzhiyun 		netdev->netdev_ops->ndo_stop(netdev);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	/* Free per CPU memory to track DDP pools */
897*4882a593Smuzhiyun 	ixgbe_fcoe_ddp_disable(adapter);
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	/* disable FCoE and notify stack */
900*4882a593Smuzhiyun 	adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
901*4882a593Smuzhiyun 	netdev->features &= ~NETIF_F_FCOE_MTU;
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	netdev_features_change(netdev);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/* release existing queues and reallocate them */
906*4882a593Smuzhiyun 	ixgbe_clear_interrupt_scheme(adapter);
907*4882a593Smuzhiyun 	ixgbe_init_interrupt_scheme(adapter);
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	if (netif_running(netdev))
910*4882a593Smuzhiyun 		netdev->netdev_ops->ndo_open(netdev);
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	return 0;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun /**
916*4882a593Smuzhiyun  * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
917*4882a593Smuzhiyun  * @netdev : ixgbe adapter
918*4882a593Smuzhiyun  * @wwn : the world wide name
919*4882a593Smuzhiyun  * @type: the type of world wide name
920*4882a593Smuzhiyun  *
921*4882a593Smuzhiyun  * Returns the node or port world wide name if both the prefix and the san
922*4882a593Smuzhiyun  * mac address are valid, then the wwn is formed based on the NAA-2 for
923*4882a593Smuzhiyun  * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
924*4882a593Smuzhiyun  *
925*4882a593Smuzhiyun  * Returns : 0 on success
926*4882a593Smuzhiyun  */
ixgbe_fcoe_get_wwn(struct net_device * netdev,u64 * wwn,int type)927*4882a593Smuzhiyun int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun 	u16 prefix = 0xffff;
930*4882a593Smuzhiyun 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
931*4882a593Smuzhiyun 	struct ixgbe_mac_info *mac = &adapter->hw.mac;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	switch (type) {
934*4882a593Smuzhiyun 	case NETDEV_FCOE_WWNN:
935*4882a593Smuzhiyun 		prefix = mac->wwnn_prefix;
936*4882a593Smuzhiyun 		break;
937*4882a593Smuzhiyun 	case NETDEV_FCOE_WWPN:
938*4882a593Smuzhiyun 		prefix = mac->wwpn_prefix;
939*4882a593Smuzhiyun 		break;
940*4882a593Smuzhiyun 	default:
941*4882a593Smuzhiyun 		break;
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	if ((prefix != 0xffff) &&
945*4882a593Smuzhiyun 	    is_valid_ether_addr(mac->san_addr)) {
946*4882a593Smuzhiyun 		*wwn = ((u64) prefix << 48) |
947*4882a593Smuzhiyun 		       ((u64) mac->san_addr[0] << 40) |
948*4882a593Smuzhiyun 		       ((u64) mac->san_addr[1] << 32) |
949*4882a593Smuzhiyun 		       ((u64) mac->san_addr[2] << 24) |
950*4882a593Smuzhiyun 		       ((u64) mac->san_addr[3] << 16) |
951*4882a593Smuzhiyun 		       ((u64) mac->san_addr[4] << 8)  |
952*4882a593Smuzhiyun 		       ((u64) mac->san_addr[5]);
953*4882a593Smuzhiyun 		return 0;
954*4882a593Smuzhiyun 	}
955*4882a593Smuzhiyun 	return -EINVAL;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun /**
959*4882a593Smuzhiyun  * ixgbe_fcoe_get_hbainfo - get FCoE HBA information
960*4882a593Smuzhiyun  * @netdev : ixgbe adapter
961*4882a593Smuzhiyun  * @info : HBA information
962*4882a593Smuzhiyun  *
963*4882a593Smuzhiyun  * Returns ixgbe HBA information
964*4882a593Smuzhiyun  *
965*4882a593Smuzhiyun  * Returns : 0 on success
966*4882a593Smuzhiyun  */
ixgbe_fcoe_get_hbainfo(struct net_device * netdev,struct netdev_fcoe_hbainfo * info)967*4882a593Smuzhiyun int ixgbe_fcoe_get_hbainfo(struct net_device *netdev,
968*4882a593Smuzhiyun 			   struct netdev_fcoe_hbainfo *info)
969*4882a593Smuzhiyun {
970*4882a593Smuzhiyun 	struct ixgbe_adapter *adapter = netdev_priv(netdev);
971*4882a593Smuzhiyun 	struct ixgbe_hw *hw = &adapter->hw;
972*4882a593Smuzhiyun 	u64 dsn;
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun 	if (!info)
975*4882a593Smuzhiyun 		return -EINVAL;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 	/* Don't return information on unsupported devices */
978*4882a593Smuzhiyun 	if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
979*4882a593Smuzhiyun 		return -EINVAL;
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	/* Manufacturer */
982*4882a593Smuzhiyun 	snprintf(info->manufacturer, sizeof(info->manufacturer),
983*4882a593Smuzhiyun 		 "Intel Corporation");
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/* Serial Number */
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	/* Get the PCI-e Device Serial Number Capability */
988*4882a593Smuzhiyun 	dsn = pci_get_dsn(adapter->pdev);
989*4882a593Smuzhiyun 	if (dsn)
990*4882a593Smuzhiyun 		snprintf(info->serial_number, sizeof(info->serial_number),
991*4882a593Smuzhiyun 			 "%016llX", dsn);
992*4882a593Smuzhiyun 	else
993*4882a593Smuzhiyun 		snprintf(info->serial_number, sizeof(info->serial_number),
994*4882a593Smuzhiyun 			 "Unknown");
995*4882a593Smuzhiyun 
996*4882a593Smuzhiyun 	/* Hardware Version */
997*4882a593Smuzhiyun 	snprintf(info->hardware_version,
998*4882a593Smuzhiyun 		 sizeof(info->hardware_version),
999*4882a593Smuzhiyun 		 "Rev %d", hw->revision_id);
1000*4882a593Smuzhiyun 	/* Driver Name/Version */
1001*4882a593Smuzhiyun 	snprintf(info->driver_version,
1002*4882a593Smuzhiyun 		 sizeof(info->driver_version),
1003*4882a593Smuzhiyun 		 "%s v%s",
1004*4882a593Smuzhiyun 		 ixgbe_driver_name,
1005*4882a593Smuzhiyun 		 UTS_RELEASE);
1006*4882a593Smuzhiyun 	/* Firmware Version */
1007*4882a593Smuzhiyun 	strlcpy(info->firmware_version, adapter->eeprom_id,
1008*4882a593Smuzhiyun 		sizeof(info->firmware_version));
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	/* Model */
1011*4882a593Smuzhiyun 	if (hw->mac.type == ixgbe_mac_82599EB) {
1012*4882a593Smuzhiyun 		snprintf(info->model,
1013*4882a593Smuzhiyun 			 sizeof(info->model),
1014*4882a593Smuzhiyun 			 "Intel 82599");
1015*4882a593Smuzhiyun 	} else if (hw->mac.type == ixgbe_mac_X550) {
1016*4882a593Smuzhiyun 		snprintf(info->model,
1017*4882a593Smuzhiyun 			 sizeof(info->model),
1018*4882a593Smuzhiyun 			 "Intel X550");
1019*4882a593Smuzhiyun 	} else {
1020*4882a593Smuzhiyun 		snprintf(info->model,
1021*4882a593Smuzhiyun 			 sizeof(info->model),
1022*4882a593Smuzhiyun 			 "Intel X540");
1023*4882a593Smuzhiyun 	}
1024*4882a593Smuzhiyun 
1025*4882a593Smuzhiyun 	/* Model Description */
1026*4882a593Smuzhiyun 	snprintf(info->model_description,
1027*4882a593Smuzhiyun 		 sizeof(info->model_description),
1028*4882a593Smuzhiyun 		 "%s",
1029*4882a593Smuzhiyun 		 ixgbe_default_device_descr);
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	return 0;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun /**
1035*4882a593Smuzhiyun  * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to
1036*4882a593Smuzhiyun  * @adapter: pointer to the device adapter structure
1037*4882a593Smuzhiyun  *
1038*4882a593Smuzhiyun  * Return : TC that FCoE is mapped to
1039*4882a593Smuzhiyun  */
ixgbe_fcoe_get_tc(struct ixgbe_adapter * adapter)1040*4882a593Smuzhiyun u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter)
1041*4882a593Smuzhiyun {
1042*4882a593Smuzhiyun #ifdef CONFIG_IXGBE_DCB
1043*4882a593Smuzhiyun 	return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up);
1044*4882a593Smuzhiyun #else
1045*4882a593Smuzhiyun 	return 0;
1046*4882a593Smuzhiyun #endif
1047*4882a593Smuzhiyun }
1048