xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * This file is part of the Chelsio T4 PCI-E SR-IOV Virtual Function Ethernet
3*4882a593Smuzhiyun  * driver for Linux.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
8*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
9*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
10*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
11*4882a593Smuzhiyun  * OpenIB.org BSD license below:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
14*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
15*4882a593Smuzhiyun  *     conditions are met:
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
18*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
19*4882a593Smuzhiyun  *        disclaimer.
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
22*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
23*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
24*4882a593Smuzhiyun  *        provided with the distribution.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33*4882a593Smuzhiyun  * SOFTWARE.
34*4882a593Smuzhiyun  */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <linux/module.h>
39*4882a593Smuzhiyun #include <linux/moduleparam.h>
40*4882a593Smuzhiyun #include <linux/init.h>
41*4882a593Smuzhiyun #include <linux/pci.h>
42*4882a593Smuzhiyun #include <linux/dma-mapping.h>
43*4882a593Smuzhiyun #include <linux/netdevice.h>
44*4882a593Smuzhiyun #include <linux/etherdevice.h>
45*4882a593Smuzhiyun #include <linux/debugfs.h>
46*4882a593Smuzhiyun #include <linux/ethtool.h>
47*4882a593Smuzhiyun #include <linux/mdio.h>
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #include "t4vf_common.h"
50*4882a593Smuzhiyun #include "t4vf_defs.h"
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #include "../cxgb4/t4_regs.h"
53*4882a593Smuzhiyun #include "../cxgb4/t4_msg.h"
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun /*
56*4882a593Smuzhiyun  * Generic information about the driver.
57*4882a593Smuzhiyun  */
58*4882a593Smuzhiyun #define DRV_DESC "Chelsio T4/T5/T6 Virtual Function (VF) Network Driver"
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Module Parameters.
62*4882a593Smuzhiyun  * ==================
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /*
66*4882a593Smuzhiyun  * Default ethtool "message level" for adapters.
67*4882a593Smuzhiyun  */
68*4882a593Smuzhiyun #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
69*4882a593Smuzhiyun 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
70*4882a593Smuzhiyun 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /*
73*4882a593Smuzhiyun  * The driver uses the best interrupt scheme available on a platform in the
74*4882a593Smuzhiyun  * order MSI-X then MSI.  This parameter determines which of these schemes the
75*4882a593Smuzhiyun  * driver may consider as follows:
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  *     msi = 2: choose from among MSI-X and MSI
78*4882a593Smuzhiyun  *     msi = 1: only consider MSI interrupts
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Note that unlike the Physical Function driver, this Virtual Function driver
81*4882a593Smuzhiyun  * does _not_ support legacy INTx interrupts (this limitation is mandated by
82*4882a593Smuzhiyun  * the PCI-E SR-IOV standard).
83*4882a593Smuzhiyun  */
84*4882a593Smuzhiyun #define MSI_MSIX	2
85*4882a593Smuzhiyun #define MSI_MSI		1
86*4882a593Smuzhiyun #define MSI_DEFAULT	MSI_MSIX
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static int msi = MSI_DEFAULT;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun module_param(msi, int, 0644);
91*4882a593Smuzhiyun MODULE_PARM_DESC(msi, "whether to use MSI-X or MSI");
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /*
94*4882a593Smuzhiyun  * Fundamental constants.
95*4882a593Smuzhiyun  * ======================
96*4882a593Smuzhiyun  */
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun enum {
99*4882a593Smuzhiyun 	MAX_TXQ_ENTRIES		= 16384,
100*4882a593Smuzhiyun 	MAX_RSPQ_ENTRIES	= 16384,
101*4882a593Smuzhiyun 	MAX_RX_BUFFERS		= 16384,
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	MIN_TXQ_ENTRIES		= 32,
104*4882a593Smuzhiyun 	MIN_RSPQ_ENTRIES	= 128,
105*4882a593Smuzhiyun 	MIN_FL_ENTRIES		= 16,
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	/*
108*4882a593Smuzhiyun 	 * For purposes of manipulating the Free List size we need to
109*4882a593Smuzhiyun 	 * recognize that Free Lists are actually Egress Queues (the host
110*4882a593Smuzhiyun 	 * produces free buffers which the hardware consumes), Egress Queues
111*4882a593Smuzhiyun 	 * indices are all in units of Egress Context Units bytes, and free
112*4882a593Smuzhiyun 	 * list entries are 64-bit PCI DMA addresses.  And since the state of
113*4882a593Smuzhiyun 	 * the Producer Index == the Consumer Index implies an EMPTY list, we
114*4882a593Smuzhiyun 	 * always have at least one Egress Unit's worth of Free List entries
115*4882a593Smuzhiyun 	 * unused.  See sge.c for more details ...
116*4882a593Smuzhiyun 	 */
117*4882a593Smuzhiyun 	EQ_UNIT = SGE_EQ_IDXSIZE,
118*4882a593Smuzhiyun 	FL_PER_EQ_UNIT = EQ_UNIT / sizeof(__be64),
119*4882a593Smuzhiyun 	MIN_FL_RESID = FL_PER_EQ_UNIT,
120*4882a593Smuzhiyun };
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /*
123*4882a593Smuzhiyun  * Global driver state.
124*4882a593Smuzhiyun  * ====================
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun static struct dentry *cxgb4vf_debugfs_root;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /*
130*4882a593Smuzhiyun  * OS "Callback" functions.
131*4882a593Smuzhiyun  * ========================
132*4882a593Smuzhiyun  */
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun  * The link status has changed on the indicated "port" (Virtual Interface).
136*4882a593Smuzhiyun  */
t4vf_os_link_changed(struct adapter * adapter,int pidx,int link_ok)137*4882a593Smuzhiyun void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct net_device *dev = adapter->port[pidx];
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/*
142*4882a593Smuzhiyun 	 * If the port is disabled or the current recorded "link up"
143*4882a593Smuzhiyun 	 * status matches the new status, just return.
144*4882a593Smuzhiyun 	 */
145*4882a593Smuzhiyun 	if (!netif_running(dev) || link_ok == netif_carrier_ok(dev))
146*4882a593Smuzhiyun 		return;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	/*
149*4882a593Smuzhiyun 	 * Tell the OS that the link status has changed and print a short
150*4882a593Smuzhiyun 	 * informative message on the console about the event.
151*4882a593Smuzhiyun 	 */
152*4882a593Smuzhiyun 	if (link_ok) {
153*4882a593Smuzhiyun 		const char *s;
154*4882a593Smuzhiyun 		const char *fc;
155*4882a593Smuzhiyun 		const struct port_info *pi = netdev_priv(dev);
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		netif_carrier_on(dev);
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 		switch (pi->link_cfg.speed) {
160*4882a593Smuzhiyun 		case 100:
161*4882a593Smuzhiyun 			s = "100Mbps";
162*4882a593Smuzhiyun 			break;
163*4882a593Smuzhiyun 		case 1000:
164*4882a593Smuzhiyun 			s = "1Gbps";
165*4882a593Smuzhiyun 			break;
166*4882a593Smuzhiyun 		case 10000:
167*4882a593Smuzhiyun 			s = "10Gbps";
168*4882a593Smuzhiyun 			break;
169*4882a593Smuzhiyun 		case 25000:
170*4882a593Smuzhiyun 			s = "25Gbps";
171*4882a593Smuzhiyun 			break;
172*4882a593Smuzhiyun 		case 40000:
173*4882a593Smuzhiyun 			s = "40Gbps";
174*4882a593Smuzhiyun 			break;
175*4882a593Smuzhiyun 		case 100000:
176*4882a593Smuzhiyun 			s = "100Gbps";
177*4882a593Smuzhiyun 			break;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 		default:
180*4882a593Smuzhiyun 			s = "unknown";
181*4882a593Smuzhiyun 			break;
182*4882a593Smuzhiyun 		}
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		switch ((int)pi->link_cfg.fc) {
185*4882a593Smuzhiyun 		case PAUSE_RX:
186*4882a593Smuzhiyun 			fc = "RX";
187*4882a593Smuzhiyun 			break;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		case PAUSE_TX:
190*4882a593Smuzhiyun 			fc = "TX";
191*4882a593Smuzhiyun 			break;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		case PAUSE_RX | PAUSE_TX:
194*4882a593Smuzhiyun 			fc = "RX/TX";
195*4882a593Smuzhiyun 			break;
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 		default:
198*4882a593Smuzhiyun 			fc = "no";
199*4882a593Smuzhiyun 			break;
200*4882a593Smuzhiyun 		}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s, fc);
203*4882a593Smuzhiyun 	} else {
204*4882a593Smuzhiyun 		netif_carrier_off(dev);
205*4882a593Smuzhiyun 		netdev_info(dev, "link down\n");
206*4882a593Smuzhiyun 	}
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun  * THe port module type has changed on the indicated "port" (Virtual
211*4882a593Smuzhiyun  * Interface).
212*4882a593Smuzhiyun  */
t4vf_os_portmod_changed(struct adapter * adapter,int pidx)213*4882a593Smuzhiyun void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	static const char * const mod_str[] = {
216*4882a593Smuzhiyun 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
217*4882a593Smuzhiyun 	};
218*4882a593Smuzhiyun 	const struct net_device *dev = adapter->port[pidx];
219*4882a593Smuzhiyun 	const struct port_info *pi = netdev_priv(dev);
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
222*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
223*4882a593Smuzhiyun 			 dev->name);
224*4882a593Smuzhiyun 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
225*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
226*4882a593Smuzhiyun 			 dev->name, mod_str[pi->mod_type]);
227*4882a593Smuzhiyun 	else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
228*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: unsupported optical port "
229*4882a593Smuzhiyun 			 "module inserted\n", dev->name);
230*4882a593Smuzhiyun 	else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
231*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
232*4882a593Smuzhiyun 			 "forcing TWINAX\n", dev->name);
233*4882a593Smuzhiyun 	else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
234*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
235*4882a593Smuzhiyun 			 dev->name);
236*4882a593Smuzhiyun 	else
237*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: unknown module type %d "
238*4882a593Smuzhiyun 			 "inserted\n", dev->name, pi->mod_type);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
cxgb4vf_set_addr_hash(struct port_info * pi)241*4882a593Smuzhiyun static int cxgb4vf_set_addr_hash(struct port_info *pi)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
244*4882a593Smuzhiyun 	u64 vec = 0;
245*4882a593Smuzhiyun 	bool ucast = false;
246*4882a593Smuzhiyun 	struct hash_mac_addr *entry;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* Calculate the hash vector for the updated list and program it */
249*4882a593Smuzhiyun 	list_for_each_entry(entry, &adapter->mac_hlist, list) {
250*4882a593Smuzhiyun 		ucast |= is_unicast_ether_addr(entry->addr);
251*4882a593Smuzhiyun 		vec |= (1ULL << hash_mac_addr(entry->addr));
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun 	return t4vf_set_addr_hash(adapter, pi->viid, ucast, vec, false);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun  *	cxgb4vf_change_mac - Update match filter for a MAC address.
258*4882a593Smuzhiyun  *	@pi: the port_info
259*4882a593Smuzhiyun  *	@viid: the VI id
260*4882a593Smuzhiyun  *	@tcam_idx: TCAM index of existing filter for old value of MAC address,
261*4882a593Smuzhiyun  *		   or -1
262*4882a593Smuzhiyun  *	@addr: the new MAC address value
263*4882a593Smuzhiyun  *	@persistent: whether a new MAC allocation should be persistent
264*4882a593Smuzhiyun  *
265*4882a593Smuzhiyun  *	Modifies an MPS filter and sets it to the new MAC address if
266*4882a593Smuzhiyun  *	@tcam_idx >= 0, or adds the MAC address to a new filter if
267*4882a593Smuzhiyun  *	@tcam_idx < 0. In the latter case the address is added persistently
268*4882a593Smuzhiyun  *	if @persist is %true.
269*4882a593Smuzhiyun  *	Addresses are programmed to hash region, if tcam runs out of entries.
270*4882a593Smuzhiyun  *
271*4882a593Smuzhiyun  */
cxgb4vf_change_mac(struct port_info * pi,unsigned int viid,int * tcam_idx,const u8 * addr,bool persistent)272*4882a593Smuzhiyun static int cxgb4vf_change_mac(struct port_info *pi, unsigned int viid,
273*4882a593Smuzhiyun 			      int *tcam_idx, const u8 *addr, bool persistent)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun 	struct hash_mac_addr *new_entry, *entry;
276*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
277*4882a593Smuzhiyun 	int ret;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	ret = t4vf_change_mac(adapter, viid, *tcam_idx, addr, persistent);
280*4882a593Smuzhiyun 	/* We ran out of TCAM entries. try programming hash region. */
281*4882a593Smuzhiyun 	if (ret == -ENOMEM) {
282*4882a593Smuzhiyun 		/* If the MAC address to be updated is in the hash addr
283*4882a593Smuzhiyun 		 * list, update it from the list
284*4882a593Smuzhiyun 		 */
285*4882a593Smuzhiyun 		list_for_each_entry(entry, &adapter->mac_hlist, list) {
286*4882a593Smuzhiyun 			if (entry->iface_mac) {
287*4882a593Smuzhiyun 				ether_addr_copy(entry->addr, addr);
288*4882a593Smuzhiyun 				goto set_hash;
289*4882a593Smuzhiyun 			}
290*4882a593Smuzhiyun 		}
291*4882a593Smuzhiyun 		new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
292*4882a593Smuzhiyun 		if (!new_entry)
293*4882a593Smuzhiyun 			return -ENOMEM;
294*4882a593Smuzhiyun 		ether_addr_copy(new_entry->addr, addr);
295*4882a593Smuzhiyun 		new_entry->iface_mac = true;
296*4882a593Smuzhiyun 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
297*4882a593Smuzhiyun set_hash:
298*4882a593Smuzhiyun 		ret = cxgb4vf_set_addr_hash(pi);
299*4882a593Smuzhiyun 	} else if (ret >= 0) {
300*4882a593Smuzhiyun 		*tcam_idx = ret;
301*4882a593Smuzhiyun 		ret = 0;
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	return ret;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun /*
308*4882a593Smuzhiyun  * Net device operations.
309*4882a593Smuzhiyun  * ======================
310*4882a593Smuzhiyun  */
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun /*
316*4882a593Smuzhiyun  * Perform the MAC and PHY actions needed to enable a "port" (Virtual
317*4882a593Smuzhiyun  * Interface).
318*4882a593Smuzhiyun  */
link_start(struct net_device * dev)319*4882a593Smuzhiyun static int link_start(struct net_device *dev)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	int ret;
322*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	/*
325*4882a593Smuzhiyun 	 * We do not set address filters and promiscuity here, the stack does
326*4882a593Smuzhiyun 	 * that step explicitly. Enable vlan accel.
327*4882a593Smuzhiyun 	 */
328*4882a593Smuzhiyun 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, dev->mtu, -1, -1, -1, 1,
329*4882a593Smuzhiyun 			      true);
330*4882a593Smuzhiyun 	if (ret == 0)
331*4882a593Smuzhiyun 		ret = cxgb4vf_change_mac(pi, pi->viid,
332*4882a593Smuzhiyun 					 &pi->xact_addr_filt,
333*4882a593Smuzhiyun 					 dev->dev_addr, true);
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	/*
336*4882a593Smuzhiyun 	 * We don't need to actually "start the link" itself since the
337*4882a593Smuzhiyun 	 * firmware will do that for us when the first Virtual Interface
338*4882a593Smuzhiyun 	 * is enabled on a port.
339*4882a593Smuzhiyun 	 */
340*4882a593Smuzhiyun 	if (ret == 0)
341*4882a593Smuzhiyun 		ret = t4vf_enable_pi(pi->adapter, pi, true, true);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return ret;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun /*
347*4882a593Smuzhiyun  * Name the MSI-X interrupts.
348*4882a593Smuzhiyun  */
name_msix_vecs(struct adapter * adapter)349*4882a593Smuzhiyun static void name_msix_vecs(struct adapter *adapter)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	int namelen = sizeof(adapter->msix_info[0].desc) - 1;
352*4882a593Smuzhiyun 	int pidx;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	/*
355*4882a593Smuzhiyun 	 * Firmware events.
356*4882a593Smuzhiyun 	 */
357*4882a593Smuzhiyun 	snprintf(adapter->msix_info[MSIX_FW].desc, namelen,
358*4882a593Smuzhiyun 		 "%s-FWeventq", adapter->name);
359*4882a593Smuzhiyun 	adapter->msix_info[MSIX_FW].desc[namelen] = 0;
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	/*
362*4882a593Smuzhiyun 	 * Ethernet queues.
363*4882a593Smuzhiyun 	 */
364*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
365*4882a593Smuzhiyun 		struct net_device *dev = adapter->port[pidx];
366*4882a593Smuzhiyun 		const struct port_info *pi = netdev_priv(dev);
367*4882a593Smuzhiyun 		int qs, msi;
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 		for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
370*4882a593Smuzhiyun 			snprintf(adapter->msix_info[msi].desc, namelen,
371*4882a593Smuzhiyun 				 "%s-%d", dev->name, qs);
372*4882a593Smuzhiyun 			adapter->msix_info[msi].desc[namelen] = 0;
373*4882a593Smuzhiyun 		}
374*4882a593Smuzhiyun 	}
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun /*
378*4882a593Smuzhiyun  * Request all of our MSI-X resources.
379*4882a593Smuzhiyun  */
request_msix_queue_irqs(struct adapter * adapter)380*4882a593Smuzhiyun static int request_msix_queue_irqs(struct adapter *adapter)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
383*4882a593Smuzhiyun 	int rxq, msi, err;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	/*
386*4882a593Smuzhiyun 	 * Firmware events.
387*4882a593Smuzhiyun 	 */
388*4882a593Smuzhiyun 	err = request_irq(adapter->msix_info[MSIX_FW].vec, t4vf_sge_intr_msix,
389*4882a593Smuzhiyun 			  0, adapter->msix_info[MSIX_FW].desc, &s->fw_evtq);
390*4882a593Smuzhiyun 	if (err)
391*4882a593Smuzhiyun 		return err;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	/*
394*4882a593Smuzhiyun 	 * Ethernet queues.
395*4882a593Smuzhiyun 	 */
396*4882a593Smuzhiyun 	msi = MSIX_IQFLINT;
397*4882a593Smuzhiyun 	for_each_ethrxq(s, rxq) {
398*4882a593Smuzhiyun 		err = request_irq(adapter->msix_info[msi].vec,
399*4882a593Smuzhiyun 				  t4vf_sge_intr_msix, 0,
400*4882a593Smuzhiyun 				  adapter->msix_info[msi].desc,
401*4882a593Smuzhiyun 				  &s->ethrxq[rxq].rspq);
402*4882a593Smuzhiyun 		if (err)
403*4882a593Smuzhiyun 			goto err_free_irqs;
404*4882a593Smuzhiyun 		msi++;
405*4882a593Smuzhiyun 	}
406*4882a593Smuzhiyun 	return 0;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun err_free_irqs:
409*4882a593Smuzhiyun 	while (--rxq >= 0)
410*4882a593Smuzhiyun 		free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq);
411*4882a593Smuzhiyun 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
412*4882a593Smuzhiyun 	return err;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun /*
416*4882a593Smuzhiyun  * Free our MSI-X resources.
417*4882a593Smuzhiyun  */
free_msix_queue_irqs(struct adapter * adapter)418*4882a593Smuzhiyun static void free_msix_queue_irqs(struct adapter *adapter)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
421*4882a593Smuzhiyun 	int rxq, msi;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
424*4882a593Smuzhiyun 	msi = MSIX_IQFLINT;
425*4882a593Smuzhiyun 	for_each_ethrxq(s, rxq)
426*4882a593Smuzhiyun 		free_irq(adapter->msix_info[msi++].vec,
427*4882a593Smuzhiyun 			 &s->ethrxq[rxq].rspq);
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun /*
431*4882a593Smuzhiyun  * Turn on NAPI and start up interrupts on a response queue.
432*4882a593Smuzhiyun  */
qenable(struct sge_rspq * rspq)433*4882a593Smuzhiyun static void qenable(struct sge_rspq *rspq)
434*4882a593Smuzhiyun {
435*4882a593Smuzhiyun 	napi_enable(&rspq->napi);
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/*
438*4882a593Smuzhiyun 	 * 0-increment the Going To Sleep register to start the timer and
439*4882a593Smuzhiyun 	 * enable interrupts.
440*4882a593Smuzhiyun 	 */
441*4882a593Smuzhiyun 	t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
442*4882a593Smuzhiyun 		     CIDXINC_V(0) |
443*4882a593Smuzhiyun 		     SEINTARM_V(rspq->intr_params) |
444*4882a593Smuzhiyun 		     INGRESSQID_V(rspq->cntxt_id));
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun /*
448*4882a593Smuzhiyun  * Enable NAPI scheduling and interrupt generation for all Receive Queues.
449*4882a593Smuzhiyun  */
enable_rx(struct adapter * adapter)450*4882a593Smuzhiyun static void enable_rx(struct adapter *adapter)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun 	int rxq;
453*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	for_each_ethrxq(s, rxq)
456*4882a593Smuzhiyun 		qenable(&s->ethrxq[rxq].rspq);
457*4882a593Smuzhiyun 	qenable(&s->fw_evtq);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/*
460*4882a593Smuzhiyun 	 * The interrupt queue doesn't use NAPI so we do the 0-increment of
461*4882a593Smuzhiyun 	 * its Going To Sleep register here to get it started.
462*4882a593Smuzhiyun 	 */
463*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSI)
464*4882a593Smuzhiyun 		t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS,
465*4882a593Smuzhiyun 			     CIDXINC_V(0) |
466*4882a593Smuzhiyun 			     SEINTARM_V(s->intrq.intr_params) |
467*4882a593Smuzhiyun 			     INGRESSQID_V(s->intrq.cntxt_id));
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun  * Wait until all NAPI handlers are descheduled.
473*4882a593Smuzhiyun  */
quiesce_rx(struct adapter * adapter)474*4882a593Smuzhiyun static void quiesce_rx(struct adapter *adapter)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
477*4882a593Smuzhiyun 	int rxq;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	for_each_ethrxq(s, rxq)
480*4882a593Smuzhiyun 		napi_disable(&s->ethrxq[rxq].rspq.napi);
481*4882a593Smuzhiyun 	napi_disable(&s->fw_evtq.napi);
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun /*
485*4882a593Smuzhiyun  * Response queue handler for the firmware event queue.
486*4882a593Smuzhiyun  */
fwevtq_handler(struct sge_rspq * rspq,const __be64 * rsp,const struct pkt_gl * gl)487*4882a593Smuzhiyun static int fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp,
488*4882a593Smuzhiyun 			  const struct pkt_gl *gl)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun 	/*
491*4882a593Smuzhiyun 	 * Extract response opcode and get pointer to CPL message body.
492*4882a593Smuzhiyun 	 */
493*4882a593Smuzhiyun 	struct adapter *adapter = rspq->adapter;
494*4882a593Smuzhiyun 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
495*4882a593Smuzhiyun 	void *cpl = (void *)(rsp + 1);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	switch (opcode) {
498*4882a593Smuzhiyun 	case CPL_FW6_MSG: {
499*4882a593Smuzhiyun 		/*
500*4882a593Smuzhiyun 		 * We've received an asynchronous message from the firmware.
501*4882a593Smuzhiyun 		 */
502*4882a593Smuzhiyun 		const struct cpl_fw6_msg *fw_msg = cpl;
503*4882a593Smuzhiyun 		if (fw_msg->type == FW6_TYPE_CMD_RPL)
504*4882a593Smuzhiyun 			t4vf_handle_fw_rpl(adapter, fw_msg->data);
505*4882a593Smuzhiyun 		break;
506*4882a593Smuzhiyun 	}
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	case CPL_FW4_MSG: {
509*4882a593Smuzhiyun 		/* FW can send EGR_UPDATEs encapsulated in a CPL_FW4_MSG.
510*4882a593Smuzhiyun 		 */
511*4882a593Smuzhiyun 		const struct cpl_sge_egr_update *p = (void *)(rsp + 3);
512*4882a593Smuzhiyun 		opcode = CPL_OPCODE_G(ntohl(p->opcode_qid));
513*4882a593Smuzhiyun 		if (opcode != CPL_SGE_EGR_UPDATE) {
514*4882a593Smuzhiyun 			dev_err(adapter->pdev_dev, "unexpected FW4/CPL %#x on FW event queue\n"
515*4882a593Smuzhiyun 				, opcode);
516*4882a593Smuzhiyun 			break;
517*4882a593Smuzhiyun 		}
518*4882a593Smuzhiyun 		cpl = (void *)p;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 		fallthrough;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	case CPL_SGE_EGR_UPDATE: {
523*4882a593Smuzhiyun 		/*
524*4882a593Smuzhiyun 		 * We've received an Egress Queue Status Update message.  We
525*4882a593Smuzhiyun 		 * get these, if the SGE is configured to send these when the
526*4882a593Smuzhiyun 		 * firmware passes certain points in processing our TX
527*4882a593Smuzhiyun 		 * Ethernet Queue or if we make an explicit request for one.
528*4882a593Smuzhiyun 		 * We use these updates to determine when we may need to
529*4882a593Smuzhiyun 		 * restart a TX Ethernet Queue which was stopped for lack of
530*4882a593Smuzhiyun 		 * free TX Queue Descriptors ...
531*4882a593Smuzhiyun 		 */
532*4882a593Smuzhiyun 		const struct cpl_sge_egr_update *p = cpl;
533*4882a593Smuzhiyun 		unsigned int qid = EGR_QID_G(be32_to_cpu(p->opcode_qid));
534*4882a593Smuzhiyun 		struct sge *s = &adapter->sge;
535*4882a593Smuzhiyun 		struct sge_txq *tq;
536*4882a593Smuzhiyun 		struct sge_eth_txq *txq;
537*4882a593Smuzhiyun 		unsigned int eq_idx;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		/*
540*4882a593Smuzhiyun 		 * Perform sanity checking on the Queue ID to make sure it
541*4882a593Smuzhiyun 		 * really refers to one of our TX Ethernet Egress Queues which
542*4882a593Smuzhiyun 		 * is active and matches the queue's ID.  None of these error
543*4882a593Smuzhiyun 		 * conditions should ever happen so we may want to either make
544*4882a593Smuzhiyun 		 * them fatal and/or conditionalized under DEBUG.
545*4882a593Smuzhiyun 		 */
546*4882a593Smuzhiyun 		eq_idx = EQ_IDX(s, qid);
547*4882a593Smuzhiyun 		if (unlikely(eq_idx >= MAX_EGRQ)) {
548*4882a593Smuzhiyun 			dev_err(adapter->pdev_dev,
549*4882a593Smuzhiyun 				"Egress Update QID %d out of range\n", qid);
550*4882a593Smuzhiyun 			break;
551*4882a593Smuzhiyun 		}
552*4882a593Smuzhiyun 		tq = s->egr_map[eq_idx];
553*4882a593Smuzhiyun 		if (unlikely(tq == NULL)) {
554*4882a593Smuzhiyun 			dev_err(adapter->pdev_dev,
555*4882a593Smuzhiyun 				"Egress Update QID %d TXQ=NULL\n", qid);
556*4882a593Smuzhiyun 			break;
557*4882a593Smuzhiyun 		}
558*4882a593Smuzhiyun 		txq = container_of(tq, struct sge_eth_txq, q);
559*4882a593Smuzhiyun 		if (unlikely(tq->abs_id != qid)) {
560*4882a593Smuzhiyun 			dev_err(adapter->pdev_dev,
561*4882a593Smuzhiyun 				"Egress Update QID %d refers to TXQ %d\n",
562*4882a593Smuzhiyun 				qid, tq->abs_id);
563*4882a593Smuzhiyun 			break;
564*4882a593Smuzhiyun 		}
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 		/*
567*4882a593Smuzhiyun 		 * Restart a stopped TX Queue which has less than half of its
568*4882a593Smuzhiyun 		 * TX ring in use ...
569*4882a593Smuzhiyun 		 */
570*4882a593Smuzhiyun 		txq->q.restarts++;
571*4882a593Smuzhiyun 		netif_tx_wake_queue(txq->txq);
572*4882a593Smuzhiyun 		break;
573*4882a593Smuzhiyun 	}
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	default:
576*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev,
577*4882a593Smuzhiyun 			"unexpected CPL %#x on FW event queue\n", opcode);
578*4882a593Smuzhiyun 	}
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	return 0;
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun /*
584*4882a593Smuzhiyun  * Allocate SGE TX/RX response queues.  Determine how many sets of SGE queues
585*4882a593Smuzhiyun  * to use and initializes them.  We support multiple "Queue Sets" per port if
586*4882a593Smuzhiyun  * we have MSI-X, otherwise just one queue set per port.
587*4882a593Smuzhiyun  */
setup_sge_queues(struct adapter * adapter)588*4882a593Smuzhiyun static int setup_sge_queues(struct adapter *adapter)
589*4882a593Smuzhiyun {
590*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
591*4882a593Smuzhiyun 	int err, pidx, msix;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	/*
594*4882a593Smuzhiyun 	 * Clear "Queue Set" Free List Starving and TX Queue Mapping Error
595*4882a593Smuzhiyun 	 * state.
596*4882a593Smuzhiyun 	 */
597*4882a593Smuzhiyun 	bitmap_zero(s->starving_fl, MAX_EGRQ);
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	/*
600*4882a593Smuzhiyun 	 * If we're using MSI interrupt mode we need to set up a "forwarded
601*4882a593Smuzhiyun 	 * interrupt" queue which we'll set up with our MSI vector.  The rest
602*4882a593Smuzhiyun 	 * of the ingress queues will be set up to forward their interrupts to
603*4882a593Smuzhiyun 	 * this queue ...  This must be first since t4vf_sge_alloc_rxq() uses
604*4882a593Smuzhiyun 	 * the intrq's queue ID as the interrupt forwarding queue for the
605*4882a593Smuzhiyun 	 * subsequent calls ...
606*4882a593Smuzhiyun 	 */
607*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSI) {
608*4882a593Smuzhiyun 		err = t4vf_sge_alloc_rxq(adapter, &s->intrq, false,
609*4882a593Smuzhiyun 					 adapter->port[0], 0, NULL, NULL);
610*4882a593Smuzhiyun 		if (err)
611*4882a593Smuzhiyun 			goto err_free_queues;
612*4882a593Smuzhiyun 	}
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/*
615*4882a593Smuzhiyun 	 * Allocate our ingress queue for asynchronous firmware messages.
616*4882a593Smuzhiyun 	 */
617*4882a593Smuzhiyun 	err = t4vf_sge_alloc_rxq(adapter, &s->fw_evtq, true, adapter->port[0],
618*4882a593Smuzhiyun 				 MSIX_FW, NULL, fwevtq_handler);
619*4882a593Smuzhiyun 	if (err)
620*4882a593Smuzhiyun 		goto err_free_queues;
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	/*
623*4882a593Smuzhiyun 	 * Allocate each "port"'s initial Queue Sets.  These can be changed
624*4882a593Smuzhiyun 	 * later on ... up to the point where any interface on the adapter is
625*4882a593Smuzhiyun 	 * brought up at which point lots of things get nailed down
626*4882a593Smuzhiyun 	 * permanently ...
627*4882a593Smuzhiyun 	 */
628*4882a593Smuzhiyun 	msix = MSIX_IQFLINT;
629*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
630*4882a593Smuzhiyun 		struct net_device *dev = adapter->port[pidx];
631*4882a593Smuzhiyun 		struct port_info *pi = netdev_priv(dev);
632*4882a593Smuzhiyun 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
633*4882a593Smuzhiyun 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
634*4882a593Smuzhiyun 		int qs;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
637*4882a593Smuzhiyun 			err = t4vf_sge_alloc_rxq(adapter, &rxq->rspq, false,
638*4882a593Smuzhiyun 						 dev, msix++,
639*4882a593Smuzhiyun 						 &rxq->fl, t4vf_ethrx_handler);
640*4882a593Smuzhiyun 			if (err)
641*4882a593Smuzhiyun 				goto err_free_queues;
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 			err = t4vf_sge_alloc_eth_txq(adapter, txq, dev,
644*4882a593Smuzhiyun 					     netdev_get_tx_queue(dev, qs),
645*4882a593Smuzhiyun 					     s->fw_evtq.cntxt_id);
646*4882a593Smuzhiyun 			if (err)
647*4882a593Smuzhiyun 				goto err_free_queues;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 			rxq->rspq.idx = qs;
650*4882a593Smuzhiyun 			memset(&rxq->stats, 0, sizeof(rxq->stats));
651*4882a593Smuzhiyun 		}
652*4882a593Smuzhiyun 	}
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	/*
655*4882a593Smuzhiyun 	 * Create the reverse mappings for the queues.
656*4882a593Smuzhiyun 	 */
657*4882a593Smuzhiyun 	s->egr_base = s->ethtxq[0].q.abs_id - s->ethtxq[0].q.cntxt_id;
658*4882a593Smuzhiyun 	s->ingr_base = s->ethrxq[0].rspq.abs_id - s->ethrxq[0].rspq.cntxt_id;
659*4882a593Smuzhiyun 	IQ_MAP(s, s->fw_evtq.abs_id) = &s->fw_evtq;
660*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
661*4882a593Smuzhiyun 		struct net_device *dev = adapter->port[pidx];
662*4882a593Smuzhiyun 		struct port_info *pi = netdev_priv(dev);
663*4882a593Smuzhiyun 		struct sge_eth_rxq *rxq = &s->ethrxq[pi->first_qset];
664*4882a593Smuzhiyun 		struct sge_eth_txq *txq = &s->ethtxq[pi->first_qset];
665*4882a593Smuzhiyun 		int qs;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
668*4882a593Smuzhiyun 			IQ_MAP(s, rxq->rspq.abs_id) = &rxq->rspq;
669*4882a593Smuzhiyun 			EQ_MAP(s, txq->q.abs_id) = &txq->q;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 			/*
672*4882a593Smuzhiyun 			 * The FW_IQ_CMD doesn't return the Absolute Queue IDs
673*4882a593Smuzhiyun 			 * for Free Lists but since all of the Egress Queues
674*4882a593Smuzhiyun 			 * (including Free Lists) have Relative Queue IDs
675*4882a593Smuzhiyun 			 * which are computed as Absolute - Base Queue ID, we
676*4882a593Smuzhiyun 			 * can synthesize the Absolute Queue IDs for the Free
677*4882a593Smuzhiyun 			 * Lists.  This is useful for debugging purposes when
678*4882a593Smuzhiyun 			 * we want to dump Queue Contexts via the PF Driver.
679*4882a593Smuzhiyun 			 */
680*4882a593Smuzhiyun 			rxq->fl.abs_id = rxq->fl.cntxt_id + s->egr_base;
681*4882a593Smuzhiyun 			EQ_MAP(s, rxq->fl.abs_id) = &rxq->fl;
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 	}
684*4882a593Smuzhiyun 	return 0;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun err_free_queues:
687*4882a593Smuzhiyun 	t4vf_free_sge_resources(adapter);
688*4882a593Smuzhiyun 	return err;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun /*
692*4882a593Smuzhiyun  * Set up Receive Side Scaling (RSS) to distribute packets to multiple receive
693*4882a593Smuzhiyun  * queues.  We configure the RSS CPU lookup table to distribute to the number
694*4882a593Smuzhiyun  * of HW receive queues, and the response queue lookup table to narrow that
695*4882a593Smuzhiyun  * down to the response queues actually configured for each "port" (Virtual
696*4882a593Smuzhiyun  * Interface).  We always configure the RSS mapping for all ports since the
697*4882a593Smuzhiyun  * mapping table has plenty of entries.
698*4882a593Smuzhiyun  */
setup_rss(struct adapter * adapter)699*4882a593Smuzhiyun static int setup_rss(struct adapter *adapter)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun 	int pidx;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
704*4882a593Smuzhiyun 		struct port_info *pi = adap2pinfo(adapter, pidx);
705*4882a593Smuzhiyun 		struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
706*4882a593Smuzhiyun 		u16 rss[MAX_PORT_QSETS];
707*4882a593Smuzhiyun 		int qs, err;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 		for (qs = 0; qs < pi->nqsets; qs++)
710*4882a593Smuzhiyun 			rss[qs] = rxq[qs].rspq.abs_id;
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun 		err = t4vf_config_rss_range(adapter, pi->viid,
713*4882a593Smuzhiyun 					    0, pi->rss_size, rss, pi->nqsets);
714*4882a593Smuzhiyun 		if (err)
715*4882a593Smuzhiyun 			return err;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 		/*
718*4882a593Smuzhiyun 		 * Perform Global RSS Mode-specific initialization.
719*4882a593Smuzhiyun 		 */
720*4882a593Smuzhiyun 		switch (adapter->params.rss.mode) {
721*4882a593Smuzhiyun 		case FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL:
722*4882a593Smuzhiyun 			/*
723*4882a593Smuzhiyun 			 * If Tunnel All Lookup isn't specified in the global
724*4882a593Smuzhiyun 			 * RSS Configuration, then we need to specify a
725*4882a593Smuzhiyun 			 * default Ingress Queue for any ingress packets which
726*4882a593Smuzhiyun 			 * aren't hashed.  We'll use our first ingress queue
727*4882a593Smuzhiyun 			 * ...
728*4882a593Smuzhiyun 			 */
729*4882a593Smuzhiyun 			if (!adapter->params.rss.u.basicvirtual.tnlalllookup) {
730*4882a593Smuzhiyun 				union rss_vi_config config;
731*4882a593Smuzhiyun 				err = t4vf_read_rss_vi_config(adapter,
732*4882a593Smuzhiyun 							      pi->viid,
733*4882a593Smuzhiyun 							      &config);
734*4882a593Smuzhiyun 				if (err)
735*4882a593Smuzhiyun 					return err;
736*4882a593Smuzhiyun 				config.basicvirtual.defaultq =
737*4882a593Smuzhiyun 					rxq[0].rspq.abs_id;
738*4882a593Smuzhiyun 				err = t4vf_write_rss_vi_config(adapter,
739*4882a593Smuzhiyun 							       pi->viid,
740*4882a593Smuzhiyun 							       &config);
741*4882a593Smuzhiyun 				if (err)
742*4882a593Smuzhiyun 					return err;
743*4882a593Smuzhiyun 			}
744*4882a593Smuzhiyun 			break;
745*4882a593Smuzhiyun 		}
746*4882a593Smuzhiyun 	}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	return 0;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun /*
752*4882a593Smuzhiyun  * Bring the adapter up.  Called whenever we go from no "ports" open to having
753*4882a593Smuzhiyun  * one open.  This function performs the actions necessary to make an adapter
754*4882a593Smuzhiyun  * operational, such as completing the initialization of HW modules, and
755*4882a593Smuzhiyun  * enabling interrupts.  Must be called with the rtnl lock held.  (Note that
756*4882a593Smuzhiyun  * this is called "cxgb_up" in the PF Driver.)
757*4882a593Smuzhiyun  */
adapter_up(struct adapter * adapter)758*4882a593Smuzhiyun static int adapter_up(struct adapter *adapter)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	int err;
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	/*
763*4882a593Smuzhiyun 	 * If this is the first time we've been called, perform basic
764*4882a593Smuzhiyun 	 * adapter setup.  Once we've done this, many of our adapter
765*4882a593Smuzhiyun 	 * parameters can no longer be changed ...
766*4882a593Smuzhiyun 	 */
767*4882a593Smuzhiyun 	if ((adapter->flags & CXGB4VF_FULL_INIT_DONE) == 0) {
768*4882a593Smuzhiyun 		err = setup_sge_queues(adapter);
769*4882a593Smuzhiyun 		if (err)
770*4882a593Smuzhiyun 			return err;
771*4882a593Smuzhiyun 		err = setup_rss(adapter);
772*4882a593Smuzhiyun 		if (err) {
773*4882a593Smuzhiyun 			t4vf_free_sge_resources(adapter);
774*4882a593Smuzhiyun 			return err;
775*4882a593Smuzhiyun 		}
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 		if (adapter->flags & CXGB4VF_USING_MSIX)
778*4882a593Smuzhiyun 			name_msix_vecs(adapter);
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 		adapter->flags |= CXGB4VF_FULL_INIT_DONE;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/*
784*4882a593Smuzhiyun 	 * Acquire our interrupt resources.  We only support MSI-X and MSI.
785*4882a593Smuzhiyun 	 */
786*4882a593Smuzhiyun 	BUG_ON((adapter->flags &
787*4882a593Smuzhiyun 	       (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
788*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSIX)
789*4882a593Smuzhiyun 		err = request_msix_queue_irqs(adapter);
790*4882a593Smuzhiyun 	else
791*4882a593Smuzhiyun 		err = request_irq(adapter->pdev->irq,
792*4882a593Smuzhiyun 				  t4vf_intr_handler(adapter), 0,
793*4882a593Smuzhiyun 				  adapter->name, adapter);
794*4882a593Smuzhiyun 	if (err) {
795*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "request_irq failed, err %d\n",
796*4882a593Smuzhiyun 			err);
797*4882a593Smuzhiyun 		return err;
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 	/*
801*4882a593Smuzhiyun 	 * Enable NAPI ingress processing and return success.
802*4882a593Smuzhiyun 	 */
803*4882a593Smuzhiyun 	enable_rx(adapter);
804*4882a593Smuzhiyun 	t4vf_sge_start(adapter);
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	return 0;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun  * Bring the adapter down.  Called whenever the last "port" (Virtual
811*4882a593Smuzhiyun  * Interface) closed.  (Note that this routine is called "cxgb_down" in the PF
812*4882a593Smuzhiyun  * Driver.)
813*4882a593Smuzhiyun  */
adapter_down(struct adapter * adapter)814*4882a593Smuzhiyun static void adapter_down(struct adapter *adapter)
815*4882a593Smuzhiyun {
816*4882a593Smuzhiyun 	/*
817*4882a593Smuzhiyun 	 * Free interrupt resources.
818*4882a593Smuzhiyun 	 */
819*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSIX)
820*4882a593Smuzhiyun 		free_msix_queue_irqs(adapter);
821*4882a593Smuzhiyun 	else
822*4882a593Smuzhiyun 		free_irq(adapter->pdev->irq, adapter);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	/*
825*4882a593Smuzhiyun 	 * Wait for NAPI handlers to finish.
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun 	quiesce_rx(adapter);
828*4882a593Smuzhiyun }
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun /*
831*4882a593Smuzhiyun  * Start up a net device.
832*4882a593Smuzhiyun  */
cxgb4vf_open(struct net_device * dev)833*4882a593Smuzhiyun static int cxgb4vf_open(struct net_device *dev)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun 	int err;
836*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
837*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	/*
840*4882a593Smuzhiyun 	 * If we don't have a connection to the firmware there's nothing we
841*4882a593Smuzhiyun 	 * can do.
842*4882a593Smuzhiyun 	 */
843*4882a593Smuzhiyun 	if (!(adapter->flags & CXGB4VF_FW_OK))
844*4882a593Smuzhiyun 		return -ENXIO;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	/*
847*4882a593Smuzhiyun 	 * If this is the first interface that we're opening on the "adapter",
848*4882a593Smuzhiyun 	 * bring the "adapter" up now.
849*4882a593Smuzhiyun 	 */
850*4882a593Smuzhiyun 	if (adapter->open_device_map == 0) {
851*4882a593Smuzhiyun 		err = adapter_up(adapter);
852*4882a593Smuzhiyun 		if (err)
853*4882a593Smuzhiyun 			return err;
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/* It's possible that the basic port information could have
857*4882a593Smuzhiyun 	 * changed since we first read it.
858*4882a593Smuzhiyun 	 */
859*4882a593Smuzhiyun 	err = t4vf_update_port_info(pi);
860*4882a593Smuzhiyun 	if (err < 0)
861*4882a593Smuzhiyun 		goto err_unwind;
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	/*
864*4882a593Smuzhiyun 	 * Note that this interface is up and start everything up ...
865*4882a593Smuzhiyun 	 */
866*4882a593Smuzhiyun 	err = link_start(dev);
867*4882a593Smuzhiyun 	if (err)
868*4882a593Smuzhiyun 		goto err_unwind;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	pi->vlan_id = t4vf_get_vf_vlan_acl(adapter);
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	netif_tx_start_all_queues(dev);
873*4882a593Smuzhiyun 	set_bit(pi->port_id, &adapter->open_device_map);
874*4882a593Smuzhiyun 	return 0;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun err_unwind:
877*4882a593Smuzhiyun 	if (adapter->open_device_map == 0)
878*4882a593Smuzhiyun 		adapter_down(adapter);
879*4882a593Smuzhiyun 	return err;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun /*
883*4882a593Smuzhiyun  * Shut down a net device.  This routine is called "cxgb_close" in the PF
884*4882a593Smuzhiyun  * Driver ...
885*4882a593Smuzhiyun  */
cxgb4vf_stop(struct net_device * dev)886*4882a593Smuzhiyun static int cxgb4vf_stop(struct net_device *dev)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
889*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	netif_tx_stop_all_queues(dev);
892*4882a593Smuzhiyun 	netif_carrier_off(dev);
893*4882a593Smuzhiyun 	t4vf_enable_pi(adapter, pi, false, false);
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	clear_bit(pi->port_id, &adapter->open_device_map);
896*4882a593Smuzhiyun 	if (adapter->open_device_map == 0)
897*4882a593Smuzhiyun 		adapter_down(adapter);
898*4882a593Smuzhiyun 	return 0;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun /*
902*4882a593Smuzhiyun  * Translate our basic statistics into the standard "ifconfig" statistics.
903*4882a593Smuzhiyun  */
cxgb4vf_get_stats(struct net_device * dev)904*4882a593Smuzhiyun static struct net_device_stats *cxgb4vf_get_stats(struct net_device *dev)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun 	struct t4vf_port_stats stats;
907*4882a593Smuzhiyun 	struct port_info *pi = netdev2pinfo(dev);
908*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
909*4882a593Smuzhiyun 	struct net_device_stats *ns = &dev->stats;
910*4882a593Smuzhiyun 	int err;
911*4882a593Smuzhiyun 
912*4882a593Smuzhiyun 	spin_lock(&adapter->stats_lock);
913*4882a593Smuzhiyun 	err = t4vf_get_port_stats(adapter, pi->pidx, &stats);
914*4882a593Smuzhiyun 	spin_unlock(&adapter->stats_lock);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	memset(ns, 0, sizeof(*ns));
917*4882a593Smuzhiyun 	if (err)
918*4882a593Smuzhiyun 		return ns;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	ns->tx_bytes = (stats.tx_bcast_bytes + stats.tx_mcast_bytes +
921*4882a593Smuzhiyun 			stats.tx_ucast_bytes + stats.tx_offload_bytes);
922*4882a593Smuzhiyun 	ns->tx_packets = (stats.tx_bcast_frames + stats.tx_mcast_frames +
923*4882a593Smuzhiyun 			  stats.tx_ucast_frames + stats.tx_offload_frames);
924*4882a593Smuzhiyun 	ns->rx_bytes = (stats.rx_bcast_bytes + stats.rx_mcast_bytes +
925*4882a593Smuzhiyun 			stats.rx_ucast_bytes);
926*4882a593Smuzhiyun 	ns->rx_packets = (stats.rx_bcast_frames + stats.rx_mcast_frames +
927*4882a593Smuzhiyun 			  stats.rx_ucast_frames);
928*4882a593Smuzhiyun 	ns->multicast = stats.rx_mcast_frames;
929*4882a593Smuzhiyun 	ns->tx_errors = stats.tx_drop_frames;
930*4882a593Smuzhiyun 	ns->rx_errors = stats.rx_err_frames;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	return ns;
933*4882a593Smuzhiyun }
934*4882a593Smuzhiyun 
cxgb4vf_mac_sync(struct net_device * netdev,const u8 * mac_addr)935*4882a593Smuzhiyun static int cxgb4vf_mac_sync(struct net_device *netdev, const u8 *mac_addr)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(netdev);
938*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
939*4882a593Smuzhiyun 	int ret;
940*4882a593Smuzhiyun 	u64 mhash = 0;
941*4882a593Smuzhiyun 	u64 uhash = 0;
942*4882a593Smuzhiyun 	bool free = false;
943*4882a593Smuzhiyun 	bool ucast = is_unicast_ether_addr(mac_addr);
944*4882a593Smuzhiyun 	const u8 *maclist[1] = {mac_addr};
945*4882a593Smuzhiyun 	struct hash_mac_addr *new_entry;
946*4882a593Smuzhiyun 
947*4882a593Smuzhiyun 	ret = t4vf_alloc_mac_filt(adapter, pi->viid, free, 1, maclist,
948*4882a593Smuzhiyun 				  NULL, ucast ? &uhash : &mhash, false);
949*4882a593Smuzhiyun 	if (ret < 0)
950*4882a593Smuzhiyun 		goto out;
951*4882a593Smuzhiyun 	/* if hash != 0, then add the addr to hash addr list
952*4882a593Smuzhiyun 	 * so on the end we will calculate the hash for the
953*4882a593Smuzhiyun 	 * list and program it
954*4882a593Smuzhiyun 	 */
955*4882a593Smuzhiyun 	if (uhash || mhash) {
956*4882a593Smuzhiyun 		new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC);
957*4882a593Smuzhiyun 		if (!new_entry)
958*4882a593Smuzhiyun 			return -ENOMEM;
959*4882a593Smuzhiyun 		ether_addr_copy(new_entry->addr, mac_addr);
960*4882a593Smuzhiyun 		list_add_tail(&new_entry->list, &adapter->mac_hlist);
961*4882a593Smuzhiyun 		ret = cxgb4vf_set_addr_hash(pi);
962*4882a593Smuzhiyun 	}
963*4882a593Smuzhiyun out:
964*4882a593Smuzhiyun 	return ret < 0 ? ret : 0;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun 
cxgb4vf_mac_unsync(struct net_device * netdev,const u8 * mac_addr)967*4882a593Smuzhiyun static int cxgb4vf_mac_unsync(struct net_device *netdev, const u8 *mac_addr)
968*4882a593Smuzhiyun {
969*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(netdev);
970*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
971*4882a593Smuzhiyun 	int ret;
972*4882a593Smuzhiyun 	const u8 *maclist[1] = {mac_addr};
973*4882a593Smuzhiyun 	struct hash_mac_addr *entry, *tmp;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	/* If the MAC address to be removed is in the hash addr
976*4882a593Smuzhiyun 	 * list, delete it from the list and update hash vector
977*4882a593Smuzhiyun 	 */
978*4882a593Smuzhiyun 	list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, list) {
979*4882a593Smuzhiyun 		if (ether_addr_equal(entry->addr, mac_addr)) {
980*4882a593Smuzhiyun 			list_del(&entry->list);
981*4882a593Smuzhiyun 			kfree(entry);
982*4882a593Smuzhiyun 			return cxgb4vf_set_addr_hash(pi);
983*4882a593Smuzhiyun 		}
984*4882a593Smuzhiyun 	}
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	ret = t4vf_free_mac_filt(adapter, pi->viid, 1, maclist, false);
987*4882a593Smuzhiyun 	return ret < 0 ? -EINVAL : 0;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
990*4882a593Smuzhiyun /*
991*4882a593Smuzhiyun  * Set RX properties of a port, such as promiscruity, address filters, and MTU.
992*4882a593Smuzhiyun  * If @mtu is -1 it is left unchanged.
993*4882a593Smuzhiyun  */
set_rxmode(struct net_device * dev,int mtu,bool sleep_ok)994*4882a593Smuzhiyun static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
995*4882a593Smuzhiyun {
996*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun 	__dev_uc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
999*4882a593Smuzhiyun 	__dev_mc_sync(dev, cxgb4vf_mac_sync, cxgb4vf_mac_unsync);
1000*4882a593Smuzhiyun 	return t4vf_set_rxmode(pi->adapter, pi->viid, -1,
1001*4882a593Smuzhiyun 			       (dev->flags & IFF_PROMISC) != 0,
1002*4882a593Smuzhiyun 			       (dev->flags & IFF_ALLMULTI) != 0,
1003*4882a593Smuzhiyun 			       1, -1, sleep_ok);
1004*4882a593Smuzhiyun }
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun /*
1007*4882a593Smuzhiyun  * Set the current receive modes on the device.
1008*4882a593Smuzhiyun  */
cxgb4vf_set_rxmode(struct net_device * dev)1009*4882a593Smuzhiyun static void cxgb4vf_set_rxmode(struct net_device *dev)
1010*4882a593Smuzhiyun {
1011*4882a593Smuzhiyun 	/* unfortunately we can't return errors to the stack */
1012*4882a593Smuzhiyun 	set_rxmode(dev, -1, false);
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun /*
1016*4882a593Smuzhiyun  * Find the entry in the interrupt holdoff timer value array which comes
1017*4882a593Smuzhiyun  * closest to the specified interrupt holdoff value.
1018*4882a593Smuzhiyun  */
closest_timer(const struct sge * s,int us)1019*4882a593Smuzhiyun static int closest_timer(const struct sge *s, int us)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun 	int i, timer_idx = 0, min_delta = INT_MAX;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1024*4882a593Smuzhiyun 		int delta = us - s->timer_val[i];
1025*4882a593Smuzhiyun 		if (delta < 0)
1026*4882a593Smuzhiyun 			delta = -delta;
1027*4882a593Smuzhiyun 		if (delta < min_delta) {
1028*4882a593Smuzhiyun 			min_delta = delta;
1029*4882a593Smuzhiyun 			timer_idx = i;
1030*4882a593Smuzhiyun 		}
1031*4882a593Smuzhiyun 	}
1032*4882a593Smuzhiyun 	return timer_idx;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun 
closest_thres(const struct sge * s,int thres)1035*4882a593Smuzhiyun static int closest_thres(const struct sge *s, int thres)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	int i, delta, pktcnt_idx = 0, min_delta = INT_MAX;
1038*4882a593Smuzhiyun 
1039*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1040*4882a593Smuzhiyun 		delta = thres - s->counter_val[i];
1041*4882a593Smuzhiyun 		if (delta < 0)
1042*4882a593Smuzhiyun 			delta = -delta;
1043*4882a593Smuzhiyun 		if (delta < min_delta) {
1044*4882a593Smuzhiyun 			min_delta = delta;
1045*4882a593Smuzhiyun 			pktcnt_idx = i;
1046*4882a593Smuzhiyun 		}
1047*4882a593Smuzhiyun 	}
1048*4882a593Smuzhiyun 	return pktcnt_idx;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun /*
1052*4882a593Smuzhiyun  * Return a queue's interrupt hold-off time in us.  0 means no timer.
1053*4882a593Smuzhiyun  */
qtimer_val(const struct adapter * adapter,const struct sge_rspq * rspq)1054*4882a593Smuzhiyun static unsigned int qtimer_val(const struct adapter *adapter,
1055*4882a593Smuzhiyun 			       const struct sge_rspq *rspq)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	unsigned int timer_idx = QINTR_TIMER_IDX_G(rspq->intr_params);
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 	return timer_idx < SGE_NTIMERS
1060*4882a593Smuzhiyun 		? adapter->sge.timer_val[timer_idx]
1061*4882a593Smuzhiyun 		: 0;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun /**
1065*4882a593Smuzhiyun  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1066*4882a593Smuzhiyun  *	@adapter: the adapter
1067*4882a593Smuzhiyun  *	@rspq: the RX response queue
1068*4882a593Smuzhiyun  *	@us: the hold-off time in us, or 0 to disable timer
1069*4882a593Smuzhiyun  *	@cnt: the hold-off packet count, or 0 to disable counter
1070*4882a593Smuzhiyun  *
1071*4882a593Smuzhiyun  *	Sets an RX response queue's interrupt hold-off time and packet count.
1072*4882a593Smuzhiyun  *	At least one of the two needs to be enabled for the queue to generate
1073*4882a593Smuzhiyun  *	interrupts.
1074*4882a593Smuzhiyun  */
set_rxq_intr_params(struct adapter * adapter,struct sge_rspq * rspq,unsigned int us,unsigned int cnt)1075*4882a593Smuzhiyun static int set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq,
1076*4882a593Smuzhiyun 			       unsigned int us, unsigned int cnt)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	unsigned int timer_idx;
1079*4882a593Smuzhiyun 
1080*4882a593Smuzhiyun 	/*
1081*4882a593Smuzhiyun 	 * If both the interrupt holdoff timer and count are specified as
1082*4882a593Smuzhiyun 	 * zero, default to a holdoff count of 1 ...
1083*4882a593Smuzhiyun 	 */
1084*4882a593Smuzhiyun 	if ((us | cnt) == 0)
1085*4882a593Smuzhiyun 		cnt = 1;
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/*
1088*4882a593Smuzhiyun 	 * If an interrupt holdoff count has been specified, then find the
1089*4882a593Smuzhiyun 	 * closest configured holdoff count and use that.  If the response
1090*4882a593Smuzhiyun 	 * queue has already been created, then update its queue context
1091*4882a593Smuzhiyun 	 * parameters ...
1092*4882a593Smuzhiyun 	 */
1093*4882a593Smuzhiyun 	if (cnt) {
1094*4882a593Smuzhiyun 		int err;
1095*4882a593Smuzhiyun 		u32 v, pktcnt_idx;
1096*4882a593Smuzhiyun 
1097*4882a593Smuzhiyun 		pktcnt_idx = closest_thres(&adapter->sge, cnt);
1098*4882a593Smuzhiyun 		if (rspq->desc && rspq->pktcnt_idx != pktcnt_idx) {
1099*4882a593Smuzhiyun 			v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
1100*4882a593Smuzhiyun 			    FW_PARAMS_PARAM_X_V(
1101*4882a593Smuzhiyun 					FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1102*4882a593Smuzhiyun 			    FW_PARAMS_PARAM_YZ_V(rspq->cntxt_id);
1103*4882a593Smuzhiyun 			err = t4vf_set_params(adapter, 1, &v, &pktcnt_idx);
1104*4882a593Smuzhiyun 			if (err)
1105*4882a593Smuzhiyun 				return err;
1106*4882a593Smuzhiyun 		}
1107*4882a593Smuzhiyun 		rspq->pktcnt_idx = pktcnt_idx;
1108*4882a593Smuzhiyun 	}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 	/*
1111*4882a593Smuzhiyun 	 * Compute the closest holdoff timer index from the supplied holdoff
1112*4882a593Smuzhiyun 	 * timer value.
1113*4882a593Smuzhiyun 	 */
1114*4882a593Smuzhiyun 	timer_idx = (us == 0
1115*4882a593Smuzhiyun 		     ? SGE_TIMER_RSTRT_CNTR
1116*4882a593Smuzhiyun 		     : closest_timer(&adapter->sge, us));
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	/*
1119*4882a593Smuzhiyun 	 * Update the response queue's interrupt coalescing parameters and
1120*4882a593Smuzhiyun 	 * return success.
1121*4882a593Smuzhiyun 	 */
1122*4882a593Smuzhiyun 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
1123*4882a593Smuzhiyun 			     QINTR_CNT_EN_V(cnt > 0));
1124*4882a593Smuzhiyun 	return 0;
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun /*
1128*4882a593Smuzhiyun  * Return a version number to identify the type of adapter.  The scheme is:
1129*4882a593Smuzhiyun  * - bits 0..9: chip version
1130*4882a593Smuzhiyun  * - bits 10..15: chip revision
1131*4882a593Smuzhiyun  */
mk_adap_vers(const struct adapter * adapter)1132*4882a593Smuzhiyun static inline unsigned int mk_adap_vers(const struct adapter *adapter)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	/*
1135*4882a593Smuzhiyun 	 * Chip version 4, revision 0x3f (cxgb4vf).
1136*4882a593Smuzhiyun 	 */
1137*4882a593Smuzhiyun 	return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun /*
1141*4882a593Smuzhiyun  * Execute the specified ioctl command.
1142*4882a593Smuzhiyun  */
cxgb4vf_do_ioctl(struct net_device * dev,struct ifreq * ifr,int cmd)1143*4882a593Smuzhiyun static int cxgb4vf_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	int ret = 0;
1146*4882a593Smuzhiyun 
1147*4882a593Smuzhiyun 	switch (cmd) {
1148*4882a593Smuzhiyun 	    /*
1149*4882a593Smuzhiyun 	     * The VF Driver doesn't have access to any of the other
1150*4882a593Smuzhiyun 	     * common Ethernet device ioctl()'s (like reading/writing
1151*4882a593Smuzhiyun 	     * PHY registers, etc.
1152*4882a593Smuzhiyun 	     */
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	default:
1155*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
1156*4882a593Smuzhiyun 		break;
1157*4882a593Smuzhiyun 	}
1158*4882a593Smuzhiyun 	return ret;
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun /*
1162*4882a593Smuzhiyun  * Change the device's MTU.
1163*4882a593Smuzhiyun  */
cxgb4vf_change_mtu(struct net_device * dev,int new_mtu)1164*4882a593Smuzhiyun static int cxgb4vf_change_mtu(struct net_device *dev, int new_mtu)
1165*4882a593Smuzhiyun {
1166*4882a593Smuzhiyun 	int ret;
1167*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	ret = t4vf_set_rxmode(pi->adapter, pi->viid, new_mtu,
1170*4882a593Smuzhiyun 			      -1, -1, -1, -1, true);
1171*4882a593Smuzhiyun 	if (!ret)
1172*4882a593Smuzhiyun 		dev->mtu = new_mtu;
1173*4882a593Smuzhiyun 	return ret;
1174*4882a593Smuzhiyun }
1175*4882a593Smuzhiyun 
cxgb4vf_fix_features(struct net_device * dev,netdev_features_t features)1176*4882a593Smuzhiyun static netdev_features_t cxgb4vf_fix_features(struct net_device *dev,
1177*4882a593Smuzhiyun 	netdev_features_t features)
1178*4882a593Smuzhiyun {
1179*4882a593Smuzhiyun 	/*
1180*4882a593Smuzhiyun 	 * Since there is no support for separate rx/tx vlan accel
1181*4882a593Smuzhiyun 	 * enable/disable make sure tx flag is always in same state as rx.
1182*4882a593Smuzhiyun 	 */
1183*4882a593Smuzhiyun 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
1184*4882a593Smuzhiyun 		features |= NETIF_F_HW_VLAN_CTAG_TX;
1185*4882a593Smuzhiyun 	else
1186*4882a593Smuzhiyun 		features &= ~NETIF_F_HW_VLAN_CTAG_TX;
1187*4882a593Smuzhiyun 
1188*4882a593Smuzhiyun 	return features;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun 
cxgb4vf_set_features(struct net_device * dev,netdev_features_t features)1191*4882a593Smuzhiyun static int cxgb4vf_set_features(struct net_device *dev,
1192*4882a593Smuzhiyun 	netdev_features_t features)
1193*4882a593Smuzhiyun {
1194*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1195*4882a593Smuzhiyun 	netdev_features_t changed = dev->features ^ features;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	if (changed & NETIF_F_HW_VLAN_CTAG_RX)
1198*4882a593Smuzhiyun 		t4vf_set_rxmode(pi->adapter, pi->viid, -1, -1, -1, -1,
1199*4882a593Smuzhiyun 				features & NETIF_F_HW_VLAN_CTAG_TX, 0);
1200*4882a593Smuzhiyun 
1201*4882a593Smuzhiyun 	return 0;
1202*4882a593Smuzhiyun }
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun /*
1205*4882a593Smuzhiyun  * Change the devices MAC address.
1206*4882a593Smuzhiyun  */
cxgb4vf_set_mac_addr(struct net_device * dev,void * _addr)1207*4882a593Smuzhiyun static int cxgb4vf_set_mac_addr(struct net_device *dev, void *_addr)
1208*4882a593Smuzhiyun {
1209*4882a593Smuzhiyun 	int ret;
1210*4882a593Smuzhiyun 	struct sockaddr *addr = _addr;
1211*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 	if (!is_valid_ether_addr(addr->sa_data))
1214*4882a593Smuzhiyun 		return -EADDRNOTAVAIL;
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	ret = cxgb4vf_change_mac(pi, pi->viid, &pi->xact_addr_filt,
1217*4882a593Smuzhiyun 				 addr->sa_data, true);
1218*4882a593Smuzhiyun 	if (ret < 0)
1219*4882a593Smuzhiyun 		return ret;
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1222*4882a593Smuzhiyun 	return 0;
1223*4882a593Smuzhiyun }
1224*4882a593Smuzhiyun 
1225*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1226*4882a593Smuzhiyun /*
1227*4882a593Smuzhiyun  * Poll all of our receive queues.  This is called outside of normal interrupt
1228*4882a593Smuzhiyun  * context.
1229*4882a593Smuzhiyun  */
cxgb4vf_poll_controller(struct net_device * dev)1230*4882a593Smuzhiyun static void cxgb4vf_poll_controller(struct net_device *dev)
1231*4882a593Smuzhiyun {
1232*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1233*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
1234*4882a593Smuzhiyun 
1235*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSIX) {
1236*4882a593Smuzhiyun 		struct sge_eth_rxq *rxq;
1237*4882a593Smuzhiyun 		int nqsets;
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 		rxq = &adapter->sge.ethrxq[pi->first_qset];
1240*4882a593Smuzhiyun 		for (nqsets = pi->nqsets; nqsets; nqsets--) {
1241*4882a593Smuzhiyun 			t4vf_sge_intr_msix(0, &rxq->rspq);
1242*4882a593Smuzhiyun 			rxq++;
1243*4882a593Smuzhiyun 		}
1244*4882a593Smuzhiyun 	} else
1245*4882a593Smuzhiyun 		t4vf_intr_handler(adapter)(0, adapter);
1246*4882a593Smuzhiyun }
1247*4882a593Smuzhiyun #endif
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun /*
1250*4882a593Smuzhiyun  * Ethtool operations.
1251*4882a593Smuzhiyun  * ===================
1252*4882a593Smuzhiyun  *
1253*4882a593Smuzhiyun  * Note that we don't support any ethtool operations which change the physical
1254*4882a593Smuzhiyun  * state of the port to which we're linked.
1255*4882a593Smuzhiyun  */
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun /**
1258*4882a593Smuzhiyun  *	from_fw_port_mod_type - translate Firmware Port/Module type to Ethtool
1259*4882a593Smuzhiyun  *	@port_type: Firmware Port Type
1260*4882a593Smuzhiyun  *	@mod_type: Firmware Module Type
1261*4882a593Smuzhiyun  *
1262*4882a593Smuzhiyun  *	Translate Firmware Port/Module type to Ethtool Port Type.
1263*4882a593Smuzhiyun  */
from_fw_port_mod_type(enum fw_port_type port_type,enum fw_port_module_type mod_type)1264*4882a593Smuzhiyun static int from_fw_port_mod_type(enum fw_port_type port_type,
1265*4882a593Smuzhiyun 				 enum fw_port_module_type mod_type)
1266*4882a593Smuzhiyun {
1267*4882a593Smuzhiyun 	if (port_type == FW_PORT_TYPE_BT_SGMII ||
1268*4882a593Smuzhiyun 	    port_type == FW_PORT_TYPE_BT_XFI ||
1269*4882a593Smuzhiyun 	    port_type == FW_PORT_TYPE_BT_XAUI) {
1270*4882a593Smuzhiyun 		return PORT_TP;
1271*4882a593Smuzhiyun 	} else if (port_type == FW_PORT_TYPE_FIBER_XFI ||
1272*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_FIBER_XAUI) {
1273*4882a593Smuzhiyun 		return PORT_FIBRE;
1274*4882a593Smuzhiyun 	} else if (port_type == FW_PORT_TYPE_SFP ||
1275*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_QSFP_10G ||
1276*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_QSA ||
1277*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_QSFP ||
1278*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_CR4_QSFP ||
1279*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_CR_QSFP ||
1280*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_CR2_QSFP ||
1281*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_SFP28) {
1282*4882a593Smuzhiyun 		if (mod_type == FW_PORT_MOD_TYPE_LR ||
1283*4882a593Smuzhiyun 		    mod_type == FW_PORT_MOD_TYPE_SR ||
1284*4882a593Smuzhiyun 		    mod_type == FW_PORT_MOD_TYPE_ER ||
1285*4882a593Smuzhiyun 		    mod_type == FW_PORT_MOD_TYPE_LRM)
1286*4882a593Smuzhiyun 			return PORT_FIBRE;
1287*4882a593Smuzhiyun 		else if (mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1288*4882a593Smuzhiyun 			 mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1289*4882a593Smuzhiyun 			return PORT_DA;
1290*4882a593Smuzhiyun 		else
1291*4882a593Smuzhiyun 			return PORT_OTHER;
1292*4882a593Smuzhiyun 	} else if (port_type == FW_PORT_TYPE_KR4_100G ||
1293*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_KR_SFP28 ||
1294*4882a593Smuzhiyun 		   port_type == FW_PORT_TYPE_KR_XLAUI) {
1295*4882a593Smuzhiyun 		return PORT_NONE;
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun 
1298*4882a593Smuzhiyun 	return PORT_OTHER;
1299*4882a593Smuzhiyun }
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun /**
1302*4882a593Smuzhiyun  *	fw_caps_to_lmm - translate Firmware to ethtool Link Mode Mask
1303*4882a593Smuzhiyun  *	@port_type: Firmware Port Type
1304*4882a593Smuzhiyun  *	@fw_caps: Firmware Port Capabilities
1305*4882a593Smuzhiyun  *	@link_mode_mask: ethtool Link Mode Mask
1306*4882a593Smuzhiyun  *
1307*4882a593Smuzhiyun  *	Translate a Firmware Port Capabilities specification to an ethtool
1308*4882a593Smuzhiyun  *	Link Mode Mask.
1309*4882a593Smuzhiyun  */
fw_caps_to_lmm(enum fw_port_type port_type,unsigned int fw_caps,unsigned long * link_mode_mask)1310*4882a593Smuzhiyun static void fw_caps_to_lmm(enum fw_port_type port_type,
1311*4882a593Smuzhiyun 			   unsigned int fw_caps,
1312*4882a593Smuzhiyun 			   unsigned long *link_mode_mask)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun 	#define SET_LMM(__lmm_name) \
1315*4882a593Smuzhiyun 		__set_bit(ETHTOOL_LINK_MODE_ ## __lmm_name ## _BIT, \
1316*4882a593Smuzhiyun 			  link_mode_mask)
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	#define FW_CAPS_TO_LMM(__fw_name, __lmm_name) \
1319*4882a593Smuzhiyun 		do { \
1320*4882a593Smuzhiyun 			if (fw_caps & FW_PORT_CAP32_ ## __fw_name) \
1321*4882a593Smuzhiyun 				SET_LMM(__lmm_name); \
1322*4882a593Smuzhiyun 		} while (0)
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	switch (port_type) {
1325*4882a593Smuzhiyun 	case FW_PORT_TYPE_BT_SGMII:
1326*4882a593Smuzhiyun 	case FW_PORT_TYPE_BT_XFI:
1327*4882a593Smuzhiyun 	case FW_PORT_TYPE_BT_XAUI:
1328*4882a593Smuzhiyun 		SET_LMM(TP);
1329*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_100M, 100baseT_Full);
1330*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1331*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1332*4882a593Smuzhiyun 		break;
1333*4882a593Smuzhiyun 
1334*4882a593Smuzhiyun 	case FW_PORT_TYPE_KX4:
1335*4882a593Smuzhiyun 	case FW_PORT_TYPE_KX:
1336*4882a593Smuzhiyun 		SET_LMM(Backplane);
1337*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1338*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1339*4882a593Smuzhiyun 		break;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	case FW_PORT_TYPE_KR:
1342*4882a593Smuzhiyun 		SET_LMM(Backplane);
1343*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1344*4882a593Smuzhiyun 		break;
1345*4882a593Smuzhiyun 
1346*4882a593Smuzhiyun 	case FW_PORT_TYPE_BP_AP:
1347*4882a593Smuzhiyun 		SET_LMM(Backplane);
1348*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1349*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1350*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1351*4882a593Smuzhiyun 		break;
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	case FW_PORT_TYPE_BP4_AP:
1354*4882a593Smuzhiyun 		SET_LMM(Backplane);
1355*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1356*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseR_FEC);
1357*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1358*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKX4_Full);
1359*4882a593Smuzhiyun 		break;
1360*4882a593Smuzhiyun 
1361*4882a593Smuzhiyun 	case FW_PORT_TYPE_FIBER_XFI:
1362*4882a593Smuzhiyun 	case FW_PORT_TYPE_FIBER_XAUI:
1363*4882a593Smuzhiyun 	case FW_PORT_TYPE_SFP:
1364*4882a593Smuzhiyun 	case FW_PORT_TYPE_QSFP_10G:
1365*4882a593Smuzhiyun 	case FW_PORT_TYPE_QSA:
1366*4882a593Smuzhiyun 		SET_LMM(FIBRE);
1367*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1368*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1369*4882a593Smuzhiyun 		break;
1370*4882a593Smuzhiyun 
1371*4882a593Smuzhiyun 	case FW_PORT_TYPE_BP40_BA:
1372*4882a593Smuzhiyun 	case FW_PORT_TYPE_QSFP:
1373*4882a593Smuzhiyun 		SET_LMM(FIBRE);
1374*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1375*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1376*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1377*4882a593Smuzhiyun 		break;
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	case FW_PORT_TYPE_CR_QSFP:
1380*4882a593Smuzhiyun 	case FW_PORT_TYPE_SFP28:
1381*4882a593Smuzhiyun 		SET_LMM(FIBRE);
1382*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1383*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseT_Full);
1384*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1385*4882a593Smuzhiyun 		break;
1386*4882a593Smuzhiyun 
1387*4882a593Smuzhiyun 	case FW_PORT_TYPE_KR_SFP28:
1388*4882a593Smuzhiyun 		SET_LMM(Backplane);
1389*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseT_Full);
1390*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1391*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseKR_Full);
1392*4882a593Smuzhiyun 		break;
1393*4882a593Smuzhiyun 
1394*4882a593Smuzhiyun 	case FW_PORT_TYPE_KR_XLAUI:
1395*4882a593Smuzhiyun 		SET_LMM(Backplane);
1396*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G, 1000baseKX_Full);
1397*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1398*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseKR4_Full);
1399*4882a593Smuzhiyun 		break;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	case FW_PORT_TYPE_CR2_QSFP:
1402*4882a593Smuzhiyun 		SET_LMM(FIBRE);
1403*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseSR2_Full);
1404*4882a593Smuzhiyun 		break;
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	case FW_PORT_TYPE_KR4_100G:
1407*4882a593Smuzhiyun 	case FW_PORT_TYPE_CR4_QSFP:
1408*4882a593Smuzhiyun 		SET_LMM(FIBRE);
1409*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_1G,  1000baseT_Full);
1410*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_10G, 10000baseKR_Full);
1411*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_40G, 40000baseSR4_Full);
1412*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_25G, 25000baseCR_Full);
1413*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_50G, 50000baseCR2_Full);
1414*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(SPEED_100G, 100000baseCR4_Full);
1415*4882a593Smuzhiyun 		break;
1416*4882a593Smuzhiyun 
1417*4882a593Smuzhiyun 	default:
1418*4882a593Smuzhiyun 		break;
1419*4882a593Smuzhiyun 	}
1420*4882a593Smuzhiyun 
1421*4882a593Smuzhiyun 	if (fw_caps & FW_PORT_CAP32_FEC_V(FW_PORT_CAP32_FEC_M)) {
1422*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(FEC_RS, FEC_RS);
1423*4882a593Smuzhiyun 		FW_CAPS_TO_LMM(FEC_BASER_RS, FEC_BASER);
1424*4882a593Smuzhiyun 	} else {
1425*4882a593Smuzhiyun 		SET_LMM(FEC_NONE);
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun 
1428*4882a593Smuzhiyun 	FW_CAPS_TO_LMM(ANEG, Autoneg);
1429*4882a593Smuzhiyun 	FW_CAPS_TO_LMM(802_3_PAUSE, Pause);
1430*4882a593Smuzhiyun 	FW_CAPS_TO_LMM(802_3_ASM_DIR, Asym_Pause);
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	#undef FW_CAPS_TO_LMM
1433*4882a593Smuzhiyun 	#undef SET_LMM
1434*4882a593Smuzhiyun }
1435*4882a593Smuzhiyun 
cxgb4vf_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * link_ksettings)1436*4882a593Smuzhiyun static int cxgb4vf_get_link_ksettings(struct net_device *dev,
1437*4882a593Smuzhiyun 				  struct ethtool_link_ksettings *link_ksettings)
1438*4882a593Smuzhiyun {
1439*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1440*4882a593Smuzhiyun 	struct ethtool_link_settings *base = &link_ksettings->base;
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	/* For the nonce, the Firmware doesn't send up Port State changes
1443*4882a593Smuzhiyun 	 * when the Virtual Interface attached to the Port is down.  So
1444*4882a593Smuzhiyun 	 * if it's down, let's grab any changes.
1445*4882a593Smuzhiyun 	 */
1446*4882a593Smuzhiyun 	if (!netif_running(dev))
1447*4882a593Smuzhiyun 		(void)t4vf_update_port_info(pi);
1448*4882a593Smuzhiyun 
1449*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(link_ksettings, supported);
1450*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(link_ksettings, advertising);
1451*4882a593Smuzhiyun 	ethtool_link_ksettings_zero_link_mode(link_ksettings, lp_advertising);
1452*4882a593Smuzhiyun 
1453*4882a593Smuzhiyun 	base->port = from_fw_port_mod_type(pi->port_type, pi->mod_type);
1454*4882a593Smuzhiyun 
1455*4882a593Smuzhiyun 	if (pi->mdio_addr >= 0) {
1456*4882a593Smuzhiyun 		base->phy_address = pi->mdio_addr;
1457*4882a593Smuzhiyun 		base->mdio_support = (pi->port_type == FW_PORT_TYPE_BT_SGMII
1458*4882a593Smuzhiyun 				      ? ETH_MDIO_SUPPORTS_C22
1459*4882a593Smuzhiyun 				      : ETH_MDIO_SUPPORTS_C45);
1460*4882a593Smuzhiyun 	} else {
1461*4882a593Smuzhiyun 		base->phy_address = 255;
1462*4882a593Smuzhiyun 		base->mdio_support = 0;
1463*4882a593Smuzhiyun 	}
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.pcaps,
1466*4882a593Smuzhiyun 		       link_ksettings->link_modes.supported);
1467*4882a593Smuzhiyun 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.acaps,
1468*4882a593Smuzhiyun 		       link_ksettings->link_modes.advertising);
1469*4882a593Smuzhiyun 	fw_caps_to_lmm(pi->port_type, pi->link_cfg.lpacaps,
1470*4882a593Smuzhiyun 		       link_ksettings->link_modes.lp_advertising);
1471*4882a593Smuzhiyun 
1472*4882a593Smuzhiyun 	if (netif_carrier_ok(dev)) {
1473*4882a593Smuzhiyun 		base->speed = pi->link_cfg.speed;
1474*4882a593Smuzhiyun 		base->duplex = DUPLEX_FULL;
1475*4882a593Smuzhiyun 	} else {
1476*4882a593Smuzhiyun 		base->speed = SPEED_UNKNOWN;
1477*4882a593Smuzhiyun 		base->duplex = DUPLEX_UNKNOWN;
1478*4882a593Smuzhiyun 	}
1479*4882a593Smuzhiyun 
1480*4882a593Smuzhiyun 	base->autoneg = pi->link_cfg.autoneg;
1481*4882a593Smuzhiyun 	if (pi->link_cfg.pcaps & FW_PORT_CAP32_ANEG)
1482*4882a593Smuzhiyun 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1483*4882a593Smuzhiyun 						     supported, Autoneg);
1484*4882a593Smuzhiyun 	if (pi->link_cfg.autoneg)
1485*4882a593Smuzhiyun 		ethtool_link_ksettings_add_link_mode(link_ksettings,
1486*4882a593Smuzhiyun 						     advertising, Autoneg);
1487*4882a593Smuzhiyun 
1488*4882a593Smuzhiyun 	return 0;
1489*4882a593Smuzhiyun }
1490*4882a593Smuzhiyun 
1491*4882a593Smuzhiyun /* Translate the Firmware FEC value into the ethtool value. */
fwcap_to_eth_fec(unsigned int fw_fec)1492*4882a593Smuzhiyun static inline unsigned int fwcap_to_eth_fec(unsigned int fw_fec)
1493*4882a593Smuzhiyun {
1494*4882a593Smuzhiyun 	unsigned int eth_fec = 0;
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	if (fw_fec & FW_PORT_CAP32_FEC_RS)
1497*4882a593Smuzhiyun 		eth_fec |= ETHTOOL_FEC_RS;
1498*4882a593Smuzhiyun 	if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
1499*4882a593Smuzhiyun 		eth_fec |= ETHTOOL_FEC_BASER;
1500*4882a593Smuzhiyun 
1501*4882a593Smuzhiyun 	/* if nothing is set, then FEC is off */
1502*4882a593Smuzhiyun 	if (!eth_fec)
1503*4882a593Smuzhiyun 		eth_fec = ETHTOOL_FEC_OFF;
1504*4882a593Smuzhiyun 
1505*4882a593Smuzhiyun 	return eth_fec;
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun /* Translate Common Code FEC value into ethtool value. */
cc_to_eth_fec(unsigned int cc_fec)1509*4882a593Smuzhiyun static inline unsigned int cc_to_eth_fec(unsigned int cc_fec)
1510*4882a593Smuzhiyun {
1511*4882a593Smuzhiyun 	unsigned int eth_fec = 0;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	if (cc_fec & FEC_AUTO)
1514*4882a593Smuzhiyun 		eth_fec |= ETHTOOL_FEC_AUTO;
1515*4882a593Smuzhiyun 	if (cc_fec & FEC_RS)
1516*4882a593Smuzhiyun 		eth_fec |= ETHTOOL_FEC_RS;
1517*4882a593Smuzhiyun 	if (cc_fec & FEC_BASER_RS)
1518*4882a593Smuzhiyun 		eth_fec |= ETHTOOL_FEC_BASER;
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	/* if nothing is set, then FEC is off */
1521*4882a593Smuzhiyun 	if (!eth_fec)
1522*4882a593Smuzhiyun 		eth_fec = ETHTOOL_FEC_OFF;
1523*4882a593Smuzhiyun 
1524*4882a593Smuzhiyun 	return eth_fec;
1525*4882a593Smuzhiyun }
1526*4882a593Smuzhiyun 
cxgb4vf_get_fecparam(struct net_device * dev,struct ethtool_fecparam * fec)1527*4882a593Smuzhiyun static int cxgb4vf_get_fecparam(struct net_device *dev,
1528*4882a593Smuzhiyun 				struct ethtool_fecparam *fec)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	const struct port_info *pi = netdev_priv(dev);
1531*4882a593Smuzhiyun 	const struct link_config *lc = &pi->link_cfg;
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	/* Translate the Firmware FEC Support into the ethtool value.  We
1534*4882a593Smuzhiyun 	 * always support IEEE 802.3 "automatic" selection of Link FEC type if
1535*4882a593Smuzhiyun 	 * any FEC is supported.
1536*4882a593Smuzhiyun 	 */
1537*4882a593Smuzhiyun 	fec->fec = fwcap_to_eth_fec(lc->pcaps);
1538*4882a593Smuzhiyun 	if (fec->fec != ETHTOOL_FEC_OFF)
1539*4882a593Smuzhiyun 		fec->fec |= ETHTOOL_FEC_AUTO;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	/* Translate the current internal FEC parameters into the
1542*4882a593Smuzhiyun 	 * ethtool values.
1543*4882a593Smuzhiyun 	 */
1544*4882a593Smuzhiyun 	fec->active_fec = cc_to_eth_fec(lc->fec);
1545*4882a593Smuzhiyun 	return 0;
1546*4882a593Smuzhiyun }
1547*4882a593Smuzhiyun 
1548*4882a593Smuzhiyun /*
1549*4882a593Smuzhiyun  * Return our driver information.
1550*4882a593Smuzhiyun  */
cxgb4vf_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * drvinfo)1551*4882a593Smuzhiyun static void cxgb4vf_get_drvinfo(struct net_device *dev,
1552*4882a593Smuzhiyun 				struct ethtool_drvinfo *drvinfo)
1553*4882a593Smuzhiyun {
1554*4882a593Smuzhiyun 	struct adapter *adapter = netdev2adap(dev);
1555*4882a593Smuzhiyun 
1556*4882a593Smuzhiyun 	strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
1557*4882a593Smuzhiyun 	strlcpy(drvinfo->bus_info, pci_name(to_pci_dev(dev->dev.parent)),
1558*4882a593Smuzhiyun 		sizeof(drvinfo->bus_info));
1559*4882a593Smuzhiyun 	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1560*4882a593Smuzhiyun 		 "%u.%u.%u.%u, TP %u.%u.%u.%u",
1561*4882a593Smuzhiyun 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.fwrev),
1562*4882a593Smuzhiyun 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.fwrev),
1563*4882a593Smuzhiyun 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.fwrev),
1564*4882a593Smuzhiyun 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.fwrev),
1565*4882a593Smuzhiyun 		 FW_HDR_FW_VER_MAJOR_G(adapter->params.dev.tprev),
1566*4882a593Smuzhiyun 		 FW_HDR_FW_VER_MINOR_G(adapter->params.dev.tprev),
1567*4882a593Smuzhiyun 		 FW_HDR_FW_VER_MICRO_G(adapter->params.dev.tprev),
1568*4882a593Smuzhiyun 		 FW_HDR_FW_VER_BUILD_G(adapter->params.dev.tprev));
1569*4882a593Smuzhiyun }
1570*4882a593Smuzhiyun 
1571*4882a593Smuzhiyun /*
1572*4882a593Smuzhiyun  * Return current adapter message level.
1573*4882a593Smuzhiyun  */
cxgb4vf_get_msglevel(struct net_device * dev)1574*4882a593Smuzhiyun static u32 cxgb4vf_get_msglevel(struct net_device *dev)
1575*4882a593Smuzhiyun {
1576*4882a593Smuzhiyun 	return netdev2adap(dev)->msg_enable;
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun 
1579*4882a593Smuzhiyun /*
1580*4882a593Smuzhiyun  * Set current adapter message level.
1581*4882a593Smuzhiyun  */
cxgb4vf_set_msglevel(struct net_device * dev,u32 msglevel)1582*4882a593Smuzhiyun static void cxgb4vf_set_msglevel(struct net_device *dev, u32 msglevel)
1583*4882a593Smuzhiyun {
1584*4882a593Smuzhiyun 	netdev2adap(dev)->msg_enable = msglevel;
1585*4882a593Smuzhiyun }
1586*4882a593Smuzhiyun 
1587*4882a593Smuzhiyun /*
1588*4882a593Smuzhiyun  * Return the device's current Queue Set ring size parameters along with the
1589*4882a593Smuzhiyun  * allowed maximum values.  Since ethtool doesn't understand the concept of
1590*4882a593Smuzhiyun  * multi-queue devices, we just return the current values associated with the
1591*4882a593Smuzhiyun  * first Queue Set.
1592*4882a593Smuzhiyun  */
cxgb4vf_get_ringparam(struct net_device * dev,struct ethtool_ringparam * rp)1593*4882a593Smuzhiyun static void cxgb4vf_get_ringparam(struct net_device *dev,
1594*4882a593Smuzhiyun 				  struct ethtool_ringparam *rp)
1595*4882a593Smuzhiyun {
1596*4882a593Smuzhiyun 	const struct port_info *pi = netdev_priv(dev);
1597*4882a593Smuzhiyun 	const struct sge *s = &pi->adapter->sge;
1598*4882a593Smuzhiyun 
1599*4882a593Smuzhiyun 	rp->rx_max_pending = MAX_RX_BUFFERS;
1600*4882a593Smuzhiyun 	rp->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1601*4882a593Smuzhiyun 	rp->rx_jumbo_max_pending = 0;
1602*4882a593Smuzhiyun 	rp->tx_max_pending = MAX_TXQ_ENTRIES;
1603*4882a593Smuzhiyun 
1604*4882a593Smuzhiyun 	rp->rx_pending = s->ethrxq[pi->first_qset].fl.size - MIN_FL_RESID;
1605*4882a593Smuzhiyun 	rp->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1606*4882a593Smuzhiyun 	rp->rx_jumbo_pending = 0;
1607*4882a593Smuzhiyun 	rp->tx_pending = s->ethtxq[pi->first_qset].q.size;
1608*4882a593Smuzhiyun }
1609*4882a593Smuzhiyun 
1610*4882a593Smuzhiyun /*
1611*4882a593Smuzhiyun  * Set the Queue Set ring size parameters for the device.  Again, since
1612*4882a593Smuzhiyun  * ethtool doesn't allow for the concept of multiple queues per device, we'll
1613*4882a593Smuzhiyun  * apply these new values across all of the Queue Sets associated with the
1614*4882a593Smuzhiyun  * device -- after vetting them of course!
1615*4882a593Smuzhiyun  */
cxgb4vf_set_ringparam(struct net_device * dev,struct ethtool_ringparam * rp)1616*4882a593Smuzhiyun static int cxgb4vf_set_ringparam(struct net_device *dev,
1617*4882a593Smuzhiyun 				 struct ethtool_ringparam *rp)
1618*4882a593Smuzhiyun {
1619*4882a593Smuzhiyun 	const struct port_info *pi = netdev_priv(dev);
1620*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
1621*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
1622*4882a593Smuzhiyun 	int qs;
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 	if (rp->rx_pending > MAX_RX_BUFFERS ||
1625*4882a593Smuzhiyun 	    rp->rx_jumbo_pending ||
1626*4882a593Smuzhiyun 	    rp->tx_pending > MAX_TXQ_ENTRIES ||
1627*4882a593Smuzhiyun 	    rp->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1628*4882a593Smuzhiyun 	    rp->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1629*4882a593Smuzhiyun 	    rp->rx_pending < MIN_FL_ENTRIES ||
1630*4882a593Smuzhiyun 	    rp->tx_pending < MIN_TXQ_ENTRIES)
1631*4882a593Smuzhiyun 		return -EINVAL;
1632*4882a593Smuzhiyun 
1633*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_FULL_INIT_DONE)
1634*4882a593Smuzhiyun 		return -EBUSY;
1635*4882a593Smuzhiyun 
1636*4882a593Smuzhiyun 	for (qs = pi->first_qset; qs < pi->first_qset + pi->nqsets; qs++) {
1637*4882a593Smuzhiyun 		s->ethrxq[qs].fl.size = rp->rx_pending + MIN_FL_RESID;
1638*4882a593Smuzhiyun 		s->ethrxq[qs].rspq.size = rp->rx_mini_pending;
1639*4882a593Smuzhiyun 		s->ethtxq[qs].q.size = rp->tx_pending;
1640*4882a593Smuzhiyun 	}
1641*4882a593Smuzhiyun 	return 0;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun 
1644*4882a593Smuzhiyun /*
1645*4882a593Smuzhiyun  * Return the interrupt holdoff timer and count for the first Queue Set on the
1646*4882a593Smuzhiyun  * device.  Our extension ioctl() (the cxgbtool interface) allows the
1647*4882a593Smuzhiyun  * interrupt holdoff timer to be read on all of the device's Queue Sets.
1648*4882a593Smuzhiyun  */
cxgb4vf_get_coalesce(struct net_device * dev,struct ethtool_coalesce * coalesce)1649*4882a593Smuzhiyun static int cxgb4vf_get_coalesce(struct net_device *dev,
1650*4882a593Smuzhiyun 				struct ethtool_coalesce *coalesce)
1651*4882a593Smuzhiyun {
1652*4882a593Smuzhiyun 	const struct port_info *pi = netdev_priv(dev);
1653*4882a593Smuzhiyun 	const struct adapter *adapter = pi->adapter;
1654*4882a593Smuzhiyun 	const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq;
1655*4882a593Smuzhiyun 
1656*4882a593Smuzhiyun 	coalesce->rx_coalesce_usecs = qtimer_val(adapter, rspq);
1657*4882a593Smuzhiyun 	coalesce->rx_max_coalesced_frames =
1658*4882a593Smuzhiyun 		((rspq->intr_params & QINTR_CNT_EN_F)
1659*4882a593Smuzhiyun 		 ? adapter->sge.counter_val[rspq->pktcnt_idx]
1660*4882a593Smuzhiyun 		 : 0);
1661*4882a593Smuzhiyun 	return 0;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun 
1664*4882a593Smuzhiyun /*
1665*4882a593Smuzhiyun  * Set the RX interrupt holdoff timer and count for the first Queue Set on the
1666*4882a593Smuzhiyun  * interface.  Our extension ioctl() (the cxgbtool interface) allows us to set
1667*4882a593Smuzhiyun  * the interrupt holdoff timer on any of the device's Queue Sets.
1668*4882a593Smuzhiyun  */
cxgb4vf_set_coalesce(struct net_device * dev,struct ethtool_coalesce * coalesce)1669*4882a593Smuzhiyun static int cxgb4vf_set_coalesce(struct net_device *dev,
1670*4882a593Smuzhiyun 				struct ethtool_coalesce *coalesce)
1671*4882a593Smuzhiyun {
1672*4882a593Smuzhiyun 	const struct port_info *pi = netdev_priv(dev);
1673*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
1674*4882a593Smuzhiyun 
1675*4882a593Smuzhiyun 	return set_rxq_intr_params(adapter,
1676*4882a593Smuzhiyun 				   &adapter->sge.ethrxq[pi->first_qset].rspq,
1677*4882a593Smuzhiyun 				   coalesce->rx_coalesce_usecs,
1678*4882a593Smuzhiyun 				   coalesce->rx_max_coalesced_frames);
1679*4882a593Smuzhiyun }
1680*4882a593Smuzhiyun 
1681*4882a593Smuzhiyun /*
1682*4882a593Smuzhiyun  * Report current port link pause parameter settings.
1683*4882a593Smuzhiyun  */
cxgb4vf_get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * pauseparam)1684*4882a593Smuzhiyun static void cxgb4vf_get_pauseparam(struct net_device *dev,
1685*4882a593Smuzhiyun 				   struct ethtool_pauseparam *pauseparam)
1686*4882a593Smuzhiyun {
1687*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	pauseparam->autoneg = (pi->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1690*4882a593Smuzhiyun 	pauseparam->rx_pause = (pi->link_cfg.advertised_fc & PAUSE_RX) != 0;
1691*4882a593Smuzhiyun 	pauseparam->tx_pause = (pi->link_cfg.advertised_fc & PAUSE_TX) != 0;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun 
1694*4882a593Smuzhiyun /*
1695*4882a593Smuzhiyun  * Identify the port by blinking the port's LED.
1696*4882a593Smuzhiyun  */
cxgb4vf_phys_id(struct net_device * dev,enum ethtool_phys_id_state state)1697*4882a593Smuzhiyun static int cxgb4vf_phys_id(struct net_device *dev,
1698*4882a593Smuzhiyun 			   enum ethtool_phys_id_state state)
1699*4882a593Smuzhiyun {
1700*4882a593Smuzhiyun 	unsigned int val;
1701*4882a593Smuzhiyun 	struct port_info *pi = netdev_priv(dev);
1702*4882a593Smuzhiyun 
1703*4882a593Smuzhiyun 	if (state == ETHTOOL_ID_ACTIVE)
1704*4882a593Smuzhiyun 		val = 0xffff;
1705*4882a593Smuzhiyun 	else if (state == ETHTOOL_ID_INACTIVE)
1706*4882a593Smuzhiyun 		val = 0;
1707*4882a593Smuzhiyun 	else
1708*4882a593Smuzhiyun 		return -EINVAL;
1709*4882a593Smuzhiyun 
1710*4882a593Smuzhiyun 	return t4vf_identify_port(pi->adapter, pi->viid, val);
1711*4882a593Smuzhiyun }
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun /*
1714*4882a593Smuzhiyun  * Port stats maintained per queue of the port.
1715*4882a593Smuzhiyun  */
1716*4882a593Smuzhiyun struct queue_port_stats {
1717*4882a593Smuzhiyun 	u64 tso;
1718*4882a593Smuzhiyun 	u64 tx_csum;
1719*4882a593Smuzhiyun 	u64 rx_csum;
1720*4882a593Smuzhiyun 	u64 vlan_ex;
1721*4882a593Smuzhiyun 	u64 vlan_ins;
1722*4882a593Smuzhiyun 	u64 lro_pkts;
1723*4882a593Smuzhiyun 	u64 lro_merged;
1724*4882a593Smuzhiyun };
1725*4882a593Smuzhiyun 
1726*4882a593Smuzhiyun /*
1727*4882a593Smuzhiyun  * Strings for the ETH_SS_STATS statistics set ("ethtool -S").  Note that
1728*4882a593Smuzhiyun  * these need to match the order of statistics returned by
1729*4882a593Smuzhiyun  * t4vf_get_port_stats().
1730*4882a593Smuzhiyun  */
1731*4882a593Smuzhiyun static const char stats_strings[][ETH_GSTRING_LEN] = {
1732*4882a593Smuzhiyun 	/*
1733*4882a593Smuzhiyun 	 * These must match the layout of the t4vf_port_stats structure.
1734*4882a593Smuzhiyun 	 */
1735*4882a593Smuzhiyun 	"TxBroadcastBytes  ",
1736*4882a593Smuzhiyun 	"TxBroadcastFrames ",
1737*4882a593Smuzhiyun 	"TxMulticastBytes  ",
1738*4882a593Smuzhiyun 	"TxMulticastFrames ",
1739*4882a593Smuzhiyun 	"TxUnicastBytes    ",
1740*4882a593Smuzhiyun 	"TxUnicastFrames   ",
1741*4882a593Smuzhiyun 	"TxDroppedFrames   ",
1742*4882a593Smuzhiyun 	"TxOffloadBytes    ",
1743*4882a593Smuzhiyun 	"TxOffloadFrames   ",
1744*4882a593Smuzhiyun 	"RxBroadcastBytes  ",
1745*4882a593Smuzhiyun 	"RxBroadcastFrames ",
1746*4882a593Smuzhiyun 	"RxMulticastBytes  ",
1747*4882a593Smuzhiyun 	"RxMulticastFrames ",
1748*4882a593Smuzhiyun 	"RxUnicastBytes    ",
1749*4882a593Smuzhiyun 	"RxUnicastFrames   ",
1750*4882a593Smuzhiyun 	"RxErrorFrames     ",
1751*4882a593Smuzhiyun 
1752*4882a593Smuzhiyun 	/*
1753*4882a593Smuzhiyun 	 * These are accumulated per-queue statistics and must match the
1754*4882a593Smuzhiyun 	 * order of the fields in the queue_port_stats structure.
1755*4882a593Smuzhiyun 	 */
1756*4882a593Smuzhiyun 	"TSO               ",
1757*4882a593Smuzhiyun 	"TxCsumOffload     ",
1758*4882a593Smuzhiyun 	"RxCsumGood        ",
1759*4882a593Smuzhiyun 	"VLANextractions   ",
1760*4882a593Smuzhiyun 	"VLANinsertions    ",
1761*4882a593Smuzhiyun 	"GROPackets        ",
1762*4882a593Smuzhiyun 	"GROMerged         ",
1763*4882a593Smuzhiyun };
1764*4882a593Smuzhiyun 
1765*4882a593Smuzhiyun /*
1766*4882a593Smuzhiyun  * Return the number of statistics in the specified statistics set.
1767*4882a593Smuzhiyun  */
cxgb4vf_get_sset_count(struct net_device * dev,int sset)1768*4882a593Smuzhiyun static int cxgb4vf_get_sset_count(struct net_device *dev, int sset)
1769*4882a593Smuzhiyun {
1770*4882a593Smuzhiyun 	switch (sset) {
1771*4882a593Smuzhiyun 	case ETH_SS_STATS:
1772*4882a593Smuzhiyun 		return ARRAY_SIZE(stats_strings);
1773*4882a593Smuzhiyun 	default:
1774*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1775*4882a593Smuzhiyun 	}
1776*4882a593Smuzhiyun 	/*NOTREACHED*/
1777*4882a593Smuzhiyun }
1778*4882a593Smuzhiyun 
1779*4882a593Smuzhiyun /*
1780*4882a593Smuzhiyun  * Return the strings for the specified statistics set.
1781*4882a593Smuzhiyun  */
cxgb4vf_get_strings(struct net_device * dev,u32 sset,u8 * data)1782*4882a593Smuzhiyun static void cxgb4vf_get_strings(struct net_device *dev,
1783*4882a593Smuzhiyun 				u32 sset,
1784*4882a593Smuzhiyun 				u8 *data)
1785*4882a593Smuzhiyun {
1786*4882a593Smuzhiyun 	switch (sset) {
1787*4882a593Smuzhiyun 	case ETH_SS_STATS:
1788*4882a593Smuzhiyun 		memcpy(data, stats_strings, sizeof(stats_strings));
1789*4882a593Smuzhiyun 		break;
1790*4882a593Smuzhiyun 	}
1791*4882a593Smuzhiyun }
1792*4882a593Smuzhiyun 
1793*4882a593Smuzhiyun /*
1794*4882a593Smuzhiyun  * Small utility routine to accumulate queue statistics across the queues of
1795*4882a593Smuzhiyun  * a "port".
1796*4882a593Smuzhiyun  */
collect_sge_port_stats(const struct adapter * adapter,const struct port_info * pi,struct queue_port_stats * stats)1797*4882a593Smuzhiyun static void collect_sge_port_stats(const struct adapter *adapter,
1798*4882a593Smuzhiyun 				   const struct port_info *pi,
1799*4882a593Smuzhiyun 				   struct queue_port_stats *stats)
1800*4882a593Smuzhiyun {
1801*4882a593Smuzhiyun 	const struct sge_eth_txq *txq = &adapter->sge.ethtxq[pi->first_qset];
1802*4882a593Smuzhiyun 	const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[pi->first_qset];
1803*4882a593Smuzhiyun 	int qs;
1804*4882a593Smuzhiyun 
1805*4882a593Smuzhiyun 	memset(stats, 0, sizeof(*stats));
1806*4882a593Smuzhiyun 	for (qs = 0; qs < pi->nqsets; qs++, rxq++, txq++) {
1807*4882a593Smuzhiyun 		stats->tso += txq->tso;
1808*4882a593Smuzhiyun 		stats->tx_csum += txq->tx_cso;
1809*4882a593Smuzhiyun 		stats->rx_csum += rxq->stats.rx_cso;
1810*4882a593Smuzhiyun 		stats->vlan_ex += rxq->stats.vlan_ex;
1811*4882a593Smuzhiyun 		stats->vlan_ins += txq->vlan_ins;
1812*4882a593Smuzhiyun 		stats->lro_pkts += rxq->stats.lro_pkts;
1813*4882a593Smuzhiyun 		stats->lro_merged += rxq->stats.lro_merged;
1814*4882a593Smuzhiyun 	}
1815*4882a593Smuzhiyun }
1816*4882a593Smuzhiyun 
1817*4882a593Smuzhiyun /*
1818*4882a593Smuzhiyun  * Return the ETH_SS_STATS statistics set.
1819*4882a593Smuzhiyun  */
cxgb4vf_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1820*4882a593Smuzhiyun static void cxgb4vf_get_ethtool_stats(struct net_device *dev,
1821*4882a593Smuzhiyun 				      struct ethtool_stats *stats,
1822*4882a593Smuzhiyun 				      u64 *data)
1823*4882a593Smuzhiyun {
1824*4882a593Smuzhiyun 	struct port_info *pi = netdev2pinfo(dev);
1825*4882a593Smuzhiyun 	struct adapter *adapter = pi->adapter;
1826*4882a593Smuzhiyun 	int err = t4vf_get_port_stats(adapter, pi->pidx,
1827*4882a593Smuzhiyun 				      (struct t4vf_port_stats *)data);
1828*4882a593Smuzhiyun 	if (err)
1829*4882a593Smuzhiyun 		memset(data, 0, sizeof(struct t4vf_port_stats));
1830*4882a593Smuzhiyun 
1831*4882a593Smuzhiyun 	data += sizeof(struct t4vf_port_stats) / sizeof(u64);
1832*4882a593Smuzhiyun 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun 
1835*4882a593Smuzhiyun /*
1836*4882a593Smuzhiyun  * Return the size of our register map.
1837*4882a593Smuzhiyun  */
cxgb4vf_get_regs_len(struct net_device * dev)1838*4882a593Smuzhiyun static int cxgb4vf_get_regs_len(struct net_device *dev)
1839*4882a593Smuzhiyun {
1840*4882a593Smuzhiyun 	return T4VF_REGMAP_SIZE;
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun 
1843*4882a593Smuzhiyun /*
1844*4882a593Smuzhiyun  * Dump a block of registers, start to end inclusive, into a buffer.
1845*4882a593Smuzhiyun  */
reg_block_dump(struct adapter * adapter,void * regbuf,unsigned int start,unsigned int end)1846*4882a593Smuzhiyun static void reg_block_dump(struct adapter *adapter, void *regbuf,
1847*4882a593Smuzhiyun 			   unsigned int start, unsigned int end)
1848*4882a593Smuzhiyun {
1849*4882a593Smuzhiyun 	u32 *bp = regbuf + start - T4VF_REGMAP_START;
1850*4882a593Smuzhiyun 
1851*4882a593Smuzhiyun 	for ( ; start <= end; start += sizeof(u32)) {
1852*4882a593Smuzhiyun 		/*
1853*4882a593Smuzhiyun 		 * Avoid reading the Mailbox Control register since that
1854*4882a593Smuzhiyun 		 * can trigger a Mailbox Ownership Arbitration cycle and
1855*4882a593Smuzhiyun 		 * interfere with communication with the firmware.
1856*4882a593Smuzhiyun 		 */
1857*4882a593Smuzhiyun 		if (start == T4VF_CIM_BASE_ADDR + CIM_VF_EXT_MAILBOX_CTRL)
1858*4882a593Smuzhiyun 			*bp++ = 0xffff;
1859*4882a593Smuzhiyun 		else
1860*4882a593Smuzhiyun 			*bp++ = t4_read_reg(adapter, start);
1861*4882a593Smuzhiyun 	}
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun 
1864*4882a593Smuzhiyun /*
1865*4882a593Smuzhiyun  * Copy our entire register map into the provided buffer.
1866*4882a593Smuzhiyun  */
cxgb4vf_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * regbuf)1867*4882a593Smuzhiyun static void cxgb4vf_get_regs(struct net_device *dev,
1868*4882a593Smuzhiyun 			     struct ethtool_regs *regs,
1869*4882a593Smuzhiyun 			     void *regbuf)
1870*4882a593Smuzhiyun {
1871*4882a593Smuzhiyun 	struct adapter *adapter = netdev2adap(dev);
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	regs->version = mk_adap_vers(adapter);
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	/*
1876*4882a593Smuzhiyun 	 * Fill in register buffer with our register map.
1877*4882a593Smuzhiyun 	 */
1878*4882a593Smuzhiyun 	memset(regbuf, 0, T4VF_REGMAP_SIZE);
1879*4882a593Smuzhiyun 
1880*4882a593Smuzhiyun 	reg_block_dump(adapter, regbuf,
1881*4882a593Smuzhiyun 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_FIRST,
1882*4882a593Smuzhiyun 		       T4VF_SGE_BASE_ADDR + T4VF_MOD_MAP_SGE_LAST);
1883*4882a593Smuzhiyun 	reg_block_dump(adapter, regbuf,
1884*4882a593Smuzhiyun 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
1885*4882a593Smuzhiyun 		       T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	/* T5 adds new registers in the PL Register map.
1888*4882a593Smuzhiyun 	 */
1889*4882a593Smuzhiyun 	reg_block_dump(adapter, regbuf,
1890*4882a593Smuzhiyun 		       T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
1891*4882a593Smuzhiyun 		       T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
1892*4882a593Smuzhiyun 		       ? PL_VF_WHOAMI_A : PL_VF_REVISION_A));
1893*4882a593Smuzhiyun 	reg_block_dump(adapter, regbuf,
1894*4882a593Smuzhiyun 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
1895*4882a593Smuzhiyun 		       T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 	reg_block_dump(adapter, regbuf,
1898*4882a593Smuzhiyun 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_FIRST,
1899*4882a593Smuzhiyun 		       T4VF_MBDATA_BASE_ADDR + T4VF_MBDATA_LAST);
1900*4882a593Smuzhiyun }
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun /*
1903*4882a593Smuzhiyun  * Report current Wake On LAN settings.
1904*4882a593Smuzhiyun  */
cxgb4vf_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1905*4882a593Smuzhiyun static void cxgb4vf_get_wol(struct net_device *dev,
1906*4882a593Smuzhiyun 			    struct ethtool_wolinfo *wol)
1907*4882a593Smuzhiyun {
1908*4882a593Smuzhiyun 	wol->supported = 0;
1909*4882a593Smuzhiyun 	wol->wolopts = 0;
1910*4882a593Smuzhiyun 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun 
1913*4882a593Smuzhiyun /*
1914*4882a593Smuzhiyun  * TCP Segmentation Offload flags which we support.
1915*4882a593Smuzhiyun  */
1916*4882a593Smuzhiyun #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1917*4882a593Smuzhiyun #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
1918*4882a593Smuzhiyun 		   NETIF_F_GRO | NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
1919*4882a593Smuzhiyun 
1920*4882a593Smuzhiyun static const struct ethtool_ops cxgb4vf_ethtool_ops = {
1921*4882a593Smuzhiyun 	.supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
1922*4882a593Smuzhiyun 				     ETHTOOL_COALESCE_RX_MAX_FRAMES,
1923*4882a593Smuzhiyun 	.get_link_ksettings	= cxgb4vf_get_link_ksettings,
1924*4882a593Smuzhiyun 	.get_fecparam		= cxgb4vf_get_fecparam,
1925*4882a593Smuzhiyun 	.get_drvinfo		= cxgb4vf_get_drvinfo,
1926*4882a593Smuzhiyun 	.get_msglevel		= cxgb4vf_get_msglevel,
1927*4882a593Smuzhiyun 	.set_msglevel		= cxgb4vf_set_msglevel,
1928*4882a593Smuzhiyun 	.get_ringparam		= cxgb4vf_get_ringparam,
1929*4882a593Smuzhiyun 	.set_ringparam		= cxgb4vf_set_ringparam,
1930*4882a593Smuzhiyun 	.get_coalesce		= cxgb4vf_get_coalesce,
1931*4882a593Smuzhiyun 	.set_coalesce		= cxgb4vf_set_coalesce,
1932*4882a593Smuzhiyun 	.get_pauseparam		= cxgb4vf_get_pauseparam,
1933*4882a593Smuzhiyun 	.get_link		= ethtool_op_get_link,
1934*4882a593Smuzhiyun 	.get_strings		= cxgb4vf_get_strings,
1935*4882a593Smuzhiyun 	.set_phys_id		= cxgb4vf_phys_id,
1936*4882a593Smuzhiyun 	.get_sset_count		= cxgb4vf_get_sset_count,
1937*4882a593Smuzhiyun 	.get_ethtool_stats	= cxgb4vf_get_ethtool_stats,
1938*4882a593Smuzhiyun 	.get_regs_len		= cxgb4vf_get_regs_len,
1939*4882a593Smuzhiyun 	.get_regs		= cxgb4vf_get_regs,
1940*4882a593Smuzhiyun 	.get_wol		= cxgb4vf_get_wol,
1941*4882a593Smuzhiyun };
1942*4882a593Smuzhiyun 
1943*4882a593Smuzhiyun /*
1944*4882a593Smuzhiyun  * /sys/kernel/debug/cxgb4vf support code and data.
1945*4882a593Smuzhiyun  * ================================================
1946*4882a593Smuzhiyun  */
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun /*
1949*4882a593Smuzhiyun  * Show Firmware Mailbox Command/Reply Log
1950*4882a593Smuzhiyun  *
1951*4882a593Smuzhiyun  * Note that we don't do any locking when dumping the Firmware Mailbox Log so
1952*4882a593Smuzhiyun  * it's possible that we can catch things during a log update and therefore
1953*4882a593Smuzhiyun  * see partially corrupted log entries.  But i9t's probably Good Enough(tm).
1954*4882a593Smuzhiyun  * If we ever decide that we want to make sure that we're dumping a coherent
1955*4882a593Smuzhiyun  * log, we'd need to perform locking in the mailbox logging and in
1956*4882a593Smuzhiyun  * mboxlog_open() where we'd need to grab the entire mailbox log in one go
1957*4882a593Smuzhiyun  * like we do for the Firmware Device Log.  But as stated above, meh ...
1958*4882a593Smuzhiyun  */
mboxlog_show(struct seq_file * seq,void * v)1959*4882a593Smuzhiyun static int mboxlog_show(struct seq_file *seq, void *v)
1960*4882a593Smuzhiyun {
1961*4882a593Smuzhiyun 	struct adapter *adapter = seq->private;
1962*4882a593Smuzhiyun 	struct mbox_cmd_log *log = adapter->mbox_log;
1963*4882a593Smuzhiyun 	struct mbox_cmd *entry;
1964*4882a593Smuzhiyun 	int entry_idx, i;
1965*4882a593Smuzhiyun 
1966*4882a593Smuzhiyun 	if (v == SEQ_START_TOKEN) {
1967*4882a593Smuzhiyun 		seq_printf(seq,
1968*4882a593Smuzhiyun 			   "%10s  %15s  %5s  %5s  %s\n",
1969*4882a593Smuzhiyun 			   "Seq#", "Tstamp", "Atime", "Etime",
1970*4882a593Smuzhiyun 			   "Command/Reply");
1971*4882a593Smuzhiyun 		return 0;
1972*4882a593Smuzhiyun 	}
1973*4882a593Smuzhiyun 
1974*4882a593Smuzhiyun 	entry_idx = log->cursor + ((uintptr_t)v - 2);
1975*4882a593Smuzhiyun 	if (entry_idx >= log->size)
1976*4882a593Smuzhiyun 		entry_idx -= log->size;
1977*4882a593Smuzhiyun 	entry = mbox_cmd_log_entry(log, entry_idx);
1978*4882a593Smuzhiyun 
1979*4882a593Smuzhiyun 	/* skip over unused entries */
1980*4882a593Smuzhiyun 	if (entry->timestamp == 0)
1981*4882a593Smuzhiyun 		return 0;
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	seq_printf(seq, "%10u  %15llu  %5d  %5d",
1984*4882a593Smuzhiyun 		   entry->seqno, entry->timestamp,
1985*4882a593Smuzhiyun 		   entry->access, entry->execute);
1986*4882a593Smuzhiyun 	for (i = 0; i < MBOX_LEN / 8; i++) {
1987*4882a593Smuzhiyun 		u64 flit = entry->cmd[i];
1988*4882a593Smuzhiyun 		u32 hi = (u32)(flit >> 32);
1989*4882a593Smuzhiyun 		u32 lo = (u32)flit;
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 		seq_printf(seq, "  %08x %08x", hi, lo);
1992*4882a593Smuzhiyun 	}
1993*4882a593Smuzhiyun 	seq_puts(seq, "\n");
1994*4882a593Smuzhiyun 	return 0;
1995*4882a593Smuzhiyun }
1996*4882a593Smuzhiyun 
mboxlog_get_idx(struct seq_file * seq,loff_t pos)1997*4882a593Smuzhiyun static inline void *mboxlog_get_idx(struct seq_file *seq, loff_t pos)
1998*4882a593Smuzhiyun {
1999*4882a593Smuzhiyun 	struct adapter *adapter = seq->private;
2000*4882a593Smuzhiyun 	struct mbox_cmd_log *log = adapter->mbox_log;
2001*4882a593Smuzhiyun 
2002*4882a593Smuzhiyun 	return ((pos <= log->size) ? (void *)(uintptr_t)(pos + 1) : NULL);
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun 
mboxlog_start(struct seq_file * seq,loff_t * pos)2005*4882a593Smuzhiyun static void *mboxlog_start(struct seq_file *seq, loff_t *pos)
2006*4882a593Smuzhiyun {
2007*4882a593Smuzhiyun 	return *pos ? mboxlog_get_idx(seq, *pos) : SEQ_START_TOKEN;
2008*4882a593Smuzhiyun }
2009*4882a593Smuzhiyun 
mboxlog_next(struct seq_file * seq,void * v,loff_t * pos)2010*4882a593Smuzhiyun static void *mboxlog_next(struct seq_file *seq, void *v, loff_t *pos)
2011*4882a593Smuzhiyun {
2012*4882a593Smuzhiyun 	++*pos;
2013*4882a593Smuzhiyun 	return mboxlog_get_idx(seq, *pos);
2014*4882a593Smuzhiyun }
2015*4882a593Smuzhiyun 
mboxlog_stop(struct seq_file * seq,void * v)2016*4882a593Smuzhiyun static void mboxlog_stop(struct seq_file *seq, void *v)
2017*4882a593Smuzhiyun {
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun 
2020*4882a593Smuzhiyun static const struct seq_operations mboxlog_sops = {
2021*4882a593Smuzhiyun 	.start = mboxlog_start,
2022*4882a593Smuzhiyun 	.next  = mboxlog_next,
2023*4882a593Smuzhiyun 	.stop  = mboxlog_stop,
2024*4882a593Smuzhiyun 	.show  = mboxlog_show
2025*4882a593Smuzhiyun };
2026*4882a593Smuzhiyun 
2027*4882a593Smuzhiyun DEFINE_SEQ_ATTRIBUTE(mboxlog);
2028*4882a593Smuzhiyun /*
2029*4882a593Smuzhiyun  * Show SGE Queue Set information.  We display QPL Queues Sets per line.
2030*4882a593Smuzhiyun  */
2031*4882a593Smuzhiyun #define QPL	4
2032*4882a593Smuzhiyun 
sge_qinfo_show(struct seq_file * seq,void * v)2033*4882a593Smuzhiyun static int sge_qinfo_show(struct seq_file *seq, void *v)
2034*4882a593Smuzhiyun {
2035*4882a593Smuzhiyun 	struct adapter *adapter = seq->private;
2036*4882a593Smuzhiyun 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2037*4882a593Smuzhiyun 	int qs, r = (uintptr_t)v - 1;
2038*4882a593Smuzhiyun 
2039*4882a593Smuzhiyun 	if (r)
2040*4882a593Smuzhiyun 		seq_putc(seq, '\n');
2041*4882a593Smuzhiyun 
2042*4882a593Smuzhiyun 	#define S3(fmt_spec, s, v) \
2043*4882a593Smuzhiyun 		do {\
2044*4882a593Smuzhiyun 			seq_printf(seq, "%-12s", s); \
2045*4882a593Smuzhiyun 			for (qs = 0; qs < n; ++qs) \
2046*4882a593Smuzhiyun 				seq_printf(seq, " %16" fmt_spec, v); \
2047*4882a593Smuzhiyun 			seq_putc(seq, '\n'); \
2048*4882a593Smuzhiyun 		} while (0)
2049*4882a593Smuzhiyun 	#define S(s, v)		S3("s", s, v)
2050*4882a593Smuzhiyun 	#define T(s, v)		S3("u", s, txq[qs].v)
2051*4882a593Smuzhiyun 	#define R(s, v)		S3("u", s, rxq[qs].v)
2052*4882a593Smuzhiyun 
2053*4882a593Smuzhiyun 	if (r < eth_entries) {
2054*4882a593Smuzhiyun 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2055*4882a593Smuzhiyun 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2056*4882a593Smuzhiyun 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2057*4882a593Smuzhiyun 
2058*4882a593Smuzhiyun 		S("QType:", "Ethernet");
2059*4882a593Smuzhiyun 		S("Interface:",
2060*4882a593Smuzhiyun 		  (rxq[qs].rspq.netdev
2061*4882a593Smuzhiyun 		   ? rxq[qs].rspq.netdev->name
2062*4882a593Smuzhiyun 		   : "N/A"));
2063*4882a593Smuzhiyun 		S3("d", "Port:",
2064*4882a593Smuzhiyun 		   (rxq[qs].rspq.netdev
2065*4882a593Smuzhiyun 		    ? ((struct port_info *)
2066*4882a593Smuzhiyun 		       netdev_priv(rxq[qs].rspq.netdev))->port_id
2067*4882a593Smuzhiyun 		    : -1));
2068*4882a593Smuzhiyun 		T("TxQ ID:", q.abs_id);
2069*4882a593Smuzhiyun 		T("TxQ size:", q.size);
2070*4882a593Smuzhiyun 		T("TxQ inuse:", q.in_use);
2071*4882a593Smuzhiyun 		T("TxQ PIdx:", q.pidx);
2072*4882a593Smuzhiyun 		T("TxQ CIdx:", q.cidx);
2073*4882a593Smuzhiyun 		R("RspQ ID:", rspq.abs_id);
2074*4882a593Smuzhiyun 		R("RspQ size:", rspq.size);
2075*4882a593Smuzhiyun 		R("RspQE size:", rspq.iqe_len);
2076*4882a593Smuzhiyun 		S3("u", "Intr delay:", qtimer_val(adapter, &rxq[qs].rspq));
2077*4882a593Smuzhiyun 		S3("u", "Intr pktcnt:",
2078*4882a593Smuzhiyun 		   adapter->sge.counter_val[rxq[qs].rspq.pktcnt_idx]);
2079*4882a593Smuzhiyun 		R("RspQ CIdx:", rspq.cidx);
2080*4882a593Smuzhiyun 		R("RspQ Gen:", rspq.gen);
2081*4882a593Smuzhiyun 		R("FL ID:", fl.abs_id);
2082*4882a593Smuzhiyun 		R("FL size:", fl.size - MIN_FL_RESID);
2083*4882a593Smuzhiyun 		R("FL avail:", fl.avail);
2084*4882a593Smuzhiyun 		R("FL PIdx:", fl.pidx);
2085*4882a593Smuzhiyun 		R("FL CIdx:", fl.cidx);
2086*4882a593Smuzhiyun 		return 0;
2087*4882a593Smuzhiyun 	}
2088*4882a593Smuzhiyun 
2089*4882a593Smuzhiyun 	r -= eth_entries;
2090*4882a593Smuzhiyun 	if (r == 0) {
2091*4882a593Smuzhiyun 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16s\n", "QType:", "FW event queue");
2094*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", evtq->abs_id);
2095*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2096*4882a593Smuzhiyun 			   qtimer_val(adapter, evtq));
2097*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2098*4882a593Smuzhiyun 			   adapter->sge.counter_val[evtq->pktcnt_idx]);
2099*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", evtq->cidx);
2100*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", evtq->gen);
2101*4882a593Smuzhiyun 	} else if (r == 1) {
2102*4882a593Smuzhiyun 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2103*4882a593Smuzhiyun 
2104*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16s\n", "QType:", "Interrupt Queue");
2105*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "RspQ ID:", intrq->abs_id);
2106*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "Intr delay:",
2107*4882a593Smuzhiyun 			   qtimer_val(adapter, intrq));
2108*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "Intr pktcnt:",
2109*4882a593Smuzhiyun 			   adapter->sge.counter_val[intrq->pktcnt_idx]);
2110*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "RspQ Cidx:", intrq->cidx);
2111*4882a593Smuzhiyun 		seq_printf(seq, "%-12s %16u\n", "RspQ Gen:", intrq->gen);
2112*4882a593Smuzhiyun 	}
2113*4882a593Smuzhiyun 
2114*4882a593Smuzhiyun 	#undef R
2115*4882a593Smuzhiyun 	#undef T
2116*4882a593Smuzhiyun 	#undef S
2117*4882a593Smuzhiyun 	#undef S3
2118*4882a593Smuzhiyun 
2119*4882a593Smuzhiyun 	return 0;
2120*4882a593Smuzhiyun }
2121*4882a593Smuzhiyun 
2122*4882a593Smuzhiyun /*
2123*4882a593Smuzhiyun  * Return the number of "entries" in our "file".  We group the multi-Queue
2124*4882a593Smuzhiyun  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2125*4882a593Smuzhiyun  *
2126*4882a593Smuzhiyun  *     Ethernet RX/TX Queue Sets
2127*4882a593Smuzhiyun  *     Firmware Event Queue
2128*4882a593Smuzhiyun  *     Forwarded Interrupt Queue (if in MSI mode)
2129*4882a593Smuzhiyun  */
sge_queue_entries(const struct adapter * adapter)2130*4882a593Smuzhiyun static int sge_queue_entries(const struct adapter *adapter)
2131*4882a593Smuzhiyun {
2132*4882a593Smuzhiyun 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2133*4882a593Smuzhiyun 		((adapter->flags & CXGB4VF_USING_MSI) != 0);
2134*4882a593Smuzhiyun }
2135*4882a593Smuzhiyun 
sge_queue_start(struct seq_file * seq,loff_t * pos)2136*4882a593Smuzhiyun static void *sge_queue_start(struct seq_file *seq, loff_t *pos)
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun 	int entries = sge_queue_entries(seq->private);
2139*4882a593Smuzhiyun 
2140*4882a593Smuzhiyun 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2141*4882a593Smuzhiyun }
2142*4882a593Smuzhiyun 
sge_queue_stop(struct seq_file * seq,void * v)2143*4882a593Smuzhiyun static void sge_queue_stop(struct seq_file *seq, void *v)
2144*4882a593Smuzhiyun {
2145*4882a593Smuzhiyun }
2146*4882a593Smuzhiyun 
sge_queue_next(struct seq_file * seq,void * v,loff_t * pos)2147*4882a593Smuzhiyun static void *sge_queue_next(struct seq_file *seq, void *v, loff_t *pos)
2148*4882a593Smuzhiyun {
2149*4882a593Smuzhiyun 	int entries = sge_queue_entries(seq->private);
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	++*pos;
2152*4882a593Smuzhiyun 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2153*4882a593Smuzhiyun }
2154*4882a593Smuzhiyun 
2155*4882a593Smuzhiyun static const struct seq_operations sge_qinfo_sops = {
2156*4882a593Smuzhiyun 	.start = sge_queue_start,
2157*4882a593Smuzhiyun 	.next  = sge_queue_next,
2158*4882a593Smuzhiyun 	.stop  = sge_queue_stop,
2159*4882a593Smuzhiyun 	.show  = sge_qinfo_show
2160*4882a593Smuzhiyun };
2161*4882a593Smuzhiyun 
2162*4882a593Smuzhiyun DEFINE_SEQ_ATTRIBUTE(sge_qinfo);
2163*4882a593Smuzhiyun 
2164*4882a593Smuzhiyun /*
2165*4882a593Smuzhiyun  * Show SGE Queue Set statistics.  We display QPL Queues Sets per line.
2166*4882a593Smuzhiyun  */
2167*4882a593Smuzhiyun #define QPL	4
2168*4882a593Smuzhiyun 
sge_qstats_show(struct seq_file * seq,void * v)2169*4882a593Smuzhiyun static int sge_qstats_show(struct seq_file *seq, void *v)
2170*4882a593Smuzhiyun {
2171*4882a593Smuzhiyun 	struct adapter *adapter = seq->private;
2172*4882a593Smuzhiyun 	int eth_entries = DIV_ROUND_UP(adapter->sge.ethqsets, QPL);
2173*4882a593Smuzhiyun 	int qs, r = (uintptr_t)v - 1;
2174*4882a593Smuzhiyun 
2175*4882a593Smuzhiyun 	if (r)
2176*4882a593Smuzhiyun 		seq_putc(seq, '\n');
2177*4882a593Smuzhiyun 
2178*4882a593Smuzhiyun 	#define S3(fmt, s, v) \
2179*4882a593Smuzhiyun 		do { \
2180*4882a593Smuzhiyun 			seq_printf(seq, "%-16s", s); \
2181*4882a593Smuzhiyun 			for (qs = 0; qs < n; ++qs) \
2182*4882a593Smuzhiyun 				seq_printf(seq, " %8" fmt, v); \
2183*4882a593Smuzhiyun 			seq_putc(seq, '\n'); \
2184*4882a593Smuzhiyun 		} while (0)
2185*4882a593Smuzhiyun 	#define S(s, v)		S3("s", s, v)
2186*4882a593Smuzhiyun 
2187*4882a593Smuzhiyun 	#define T3(fmt, s, v)	S3(fmt, s, txq[qs].v)
2188*4882a593Smuzhiyun 	#define T(s, v)		T3("lu", s, v)
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 	#define R3(fmt, s, v)	S3(fmt, s, rxq[qs].v)
2191*4882a593Smuzhiyun 	#define R(s, v)		R3("lu", s, v)
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 	if (r < eth_entries) {
2194*4882a593Smuzhiyun 		const struct sge_eth_rxq *rxq = &adapter->sge.ethrxq[r * QPL];
2195*4882a593Smuzhiyun 		const struct sge_eth_txq *txq = &adapter->sge.ethtxq[r * QPL];
2196*4882a593Smuzhiyun 		int n = min(QPL, adapter->sge.ethqsets - QPL * r);
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 		S("QType:", "Ethernet");
2199*4882a593Smuzhiyun 		S("Interface:",
2200*4882a593Smuzhiyun 		  (rxq[qs].rspq.netdev
2201*4882a593Smuzhiyun 		   ? rxq[qs].rspq.netdev->name
2202*4882a593Smuzhiyun 		   : "N/A"));
2203*4882a593Smuzhiyun 		R3("u", "RspQNullInts:", rspq.unhandled_irqs);
2204*4882a593Smuzhiyun 		R("RxPackets:", stats.pkts);
2205*4882a593Smuzhiyun 		R("RxCSO:", stats.rx_cso);
2206*4882a593Smuzhiyun 		R("VLANxtract:", stats.vlan_ex);
2207*4882a593Smuzhiyun 		R("LROmerged:", stats.lro_merged);
2208*4882a593Smuzhiyun 		R("LROpackets:", stats.lro_pkts);
2209*4882a593Smuzhiyun 		R("RxDrops:", stats.rx_drops);
2210*4882a593Smuzhiyun 		T("TSO:", tso);
2211*4882a593Smuzhiyun 		T("TxCSO:", tx_cso);
2212*4882a593Smuzhiyun 		T("VLANins:", vlan_ins);
2213*4882a593Smuzhiyun 		T("TxQFull:", q.stops);
2214*4882a593Smuzhiyun 		T("TxQRestarts:", q.restarts);
2215*4882a593Smuzhiyun 		T("TxMapErr:", mapping_err);
2216*4882a593Smuzhiyun 		R("FLAllocErr:", fl.alloc_failed);
2217*4882a593Smuzhiyun 		R("FLLrgAlcErr:", fl.large_alloc_failed);
2218*4882a593Smuzhiyun 		R("FLStarving:", fl.starving);
2219*4882a593Smuzhiyun 		return 0;
2220*4882a593Smuzhiyun 	}
2221*4882a593Smuzhiyun 
2222*4882a593Smuzhiyun 	r -= eth_entries;
2223*4882a593Smuzhiyun 	if (r == 0) {
2224*4882a593Smuzhiyun 		const struct sge_rspq *evtq = &adapter->sge.fw_evtq;
2225*4882a593Smuzhiyun 
2226*4882a593Smuzhiyun 		seq_printf(seq, "%-8s %16s\n", "QType:", "FW event queue");
2227*4882a593Smuzhiyun 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2228*4882a593Smuzhiyun 			   evtq->unhandled_irqs);
2229*4882a593Smuzhiyun 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", evtq->cidx);
2230*4882a593Smuzhiyun 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", evtq->gen);
2231*4882a593Smuzhiyun 	} else if (r == 1) {
2232*4882a593Smuzhiyun 		const struct sge_rspq *intrq = &adapter->sge.intrq;
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 		seq_printf(seq, "%-8s %16s\n", "QType:", "Interrupt Queue");
2235*4882a593Smuzhiyun 		seq_printf(seq, "%-16s %8u\n", "RspQNullInts:",
2236*4882a593Smuzhiyun 			   intrq->unhandled_irqs);
2237*4882a593Smuzhiyun 		seq_printf(seq, "%-16s %8u\n", "RspQ CIdx:", intrq->cidx);
2238*4882a593Smuzhiyun 		seq_printf(seq, "%-16s %8u\n", "RspQ Gen:", intrq->gen);
2239*4882a593Smuzhiyun 	}
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	#undef R
2242*4882a593Smuzhiyun 	#undef T
2243*4882a593Smuzhiyun 	#undef S
2244*4882a593Smuzhiyun 	#undef R3
2245*4882a593Smuzhiyun 	#undef T3
2246*4882a593Smuzhiyun 	#undef S3
2247*4882a593Smuzhiyun 
2248*4882a593Smuzhiyun 	return 0;
2249*4882a593Smuzhiyun }
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun /*
2252*4882a593Smuzhiyun  * Return the number of "entries" in our "file".  We group the multi-Queue
2253*4882a593Smuzhiyun  * sections with QPL Queue Sets per "entry".  The sections of the output are:
2254*4882a593Smuzhiyun  *
2255*4882a593Smuzhiyun  *     Ethernet RX/TX Queue Sets
2256*4882a593Smuzhiyun  *     Firmware Event Queue
2257*4882a593Smuzhiyun  *     Forwarded Interrupt Queue (if in MSI mode)
2258*4882a593Smuzhiyun  */
sge_qstats_entries(const struct adapter * adapter)2259*4882a593Smuzhiyun static int sge_qstats_entries(const struct adapter *adapter)
2260*4882a593Smuzhiyun {
2261*4882a593Smuzhiyun 	return DIV_ROUND_UP(adapter->sge.ethqsets, QPL) + 1 +
2262*4882a593Smuzhiyun 		((adapter->flags & CXGB4VF_USING_MSI) != 0);
2263*4882a593Smuzhiyun }
2264*4882a593Smuzhiyun 
sge_qstats_start(struct seq_file * seq,loff_t * pos)2265*4882a593Smuzhiyun static void *sge_qstats_start(struct seq_file *seq, loff_t *pos)
2266*4882a593Smuzhiyun {
2267*4882a593Smuzhiyun 	int entries = sge_qstats_entries(seq->private);
2268*4882a593Smuzhiyun 
2269*4882a593Smuzhiyun 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun 
sge_qstats_stop(struct seq_file * seq,void * v)2272*4882a593Smuzhiyun static void sge_qstats_stop(struct seq_file *seq, void *v)
2273*4882a593Smuzhiyun {
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun 
sge_qstats_next(struct seq_file * seq,void * v,loff_t * pos)2276*4882a593Smuzhiyun static void *sge_qstats_next(struct seq_file *seq, void *v, loff_t *pos)
2277*4882a593Smuzhiyun {
2278*4882a593Smuzhiyun 	int entries = sge_qstats_entries(seq->private);
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	(*pos)++;
2281*4882a593Smuzhiyun 	return *pos < entries ? (void *)((uintptr_t)*pos + 1) : NULL;
2282*4882a593Smuzhiyun }
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun static const struct seq_operations sge_qstats_sops = {
2285*4882a593Smuzhiyun 	.start = sge_qstats_start,
2286*4882a593Smuzhiyun 	.next  = sge_qstats_next,
2287*4882a593Smuzhiyun 	.stop  = sge_qstats_stop,
2288*4882a593Smuzhiyun 	.show  = sge_qstats_show
2289*4882a593Smuzhiyun };
2290*4882a593Smuzhiyun 
2291*4882a593Smuzhiyun DEFINE_SEQ_ATTRIBUTE(sge_qstats);
2292*4882a593Smuzhiyun 
2293*4882a593Smuzhiyun /*
2294*4882a593Smuzhiyun  * Show PCI-E SR-IOV Virtual Function Resource Limits.
2295*4882a593Smuzhiyun  */
resources_show(struct seq_file * seq,void * v)2296*4882a593Smuzhiyun static int resources_show(struct seq_file *seq, void *v)
2297*4882a593Smuzhiyun {
2298*4882a593Smuzhiyun 	struct adapter *adapter = seq->private;
2299*4882a593Smuzhiyun 	struct vf_resources *vfres = &adapter->params.vfres;
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 	#define S(desc, fmt, var) \
2302*4882a593Smuzhiyun 		seq_printf(seq, "%-60s " fmt "\n", \
2303*4882a593Smuzhiyun 			   desc " (" #var "):", vfres->var)
2304*4882a593Smuzhiyun 
2305*4882a593Smuzhiyun 	S("Virtual Interfaces", "%d", nvi);
2306*4882a593Smuzhiyun 	S("Egress Queues", "%d", neq);
2307*4882a593Smuzhiyun 	S("Ethernet Control", "%d", nethctrl);
2308*4882a593Smuzhiyun 	S("Ingress Queues/w Free Lists/Interrupts", "%d", niqflint);
2309*4882a593Smuzhiyun 	S("Ingress Queues", "%d", niq);
2310*4882a593Smuzhiyun 	S("Traffic Class", "%d", tc);
2311*4882a593Smuzhiyun 	S("Port Access Rights Mask", "%#x", pmask);
2312*4882a593Smuzhiyun 	S("MAC Address Filters", "%d", nexactf);
2313*4882a593Smuzhiyun 	S("Firmware Command Read Capabilities", "%#x", r_caps);
2314*4882a593Smuzhiyun 	S("Firmware Command Write/Execute Capabilities", "%#x", wx_caps);
2315*4882a593Smuzhiyun 
2316*4882a593Smuzhiyun 	#undef S
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	return 0;
2319*4882a593Smuzhiyun }
2320*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(resources);
2321*4882a593Smuzhiyun 
2322*4882a593Smuzhiyun /*
2323*4882a593Smuzhiyun  * Show Virtual Interfaces.
2324*4882a593Smuzhiyun  */
interfaces_show(struct seq_file * seq,void * v)2325*4882a593Smuzhiyun static int interfaces_show(struct seq_file *seq, void *v)
2326*4882a593Smuzhiyun {
2327*4882a593Smuzhiyun 	if (v == SEQ_START_TOKEN) {
2328*4882a593Smuzhiyun 		seq_puts(seq, "Interface  Port   VIID\n");
2329*4882a593Smuzhiyun 	} else {
2330*4882a593Smuzhiyun 		struct adapter *adapter = seq->private;
2331*4882a593Smuzhiyun 		int pidx = (uintptr_t)v - 2;
2332*4882a593Smuzhiyun 		struct net_device *dev = adapter->port[pidx];
2333*4882a593Smuzhiyun 		struct port_info *pi = netdev_priv(dev);
2334*4882a593Smuzhiyun 
2335*4882a593Smuzhiyun 		seq_printf(seq, "%9s  %4d  %#5x\n",
2336*4882a593Smuzhiyun 			   dev->name, pi->port_id, pi->viid);
2337*4882a593Smuzhiyun 	}
2338*4882a593Smuzhiyun 	return 0;
2339*4882a593Smuzhiyun }
2340*4882a593Smuzhiyun 
interfaces_get_idx(struct adapter * adapter,loff_t pos)2341*4882a593Smuzhiyun static inline void *interfaces_get_idx(struct adapter *adapter, loff_t pos)
2342*4882a593Smuzhiyun {
2343*4882a593Smuzhiyun 	return pos <= adapter->params.nports
2344*4882a593Smuzhiyun 		? (void *)(uintptr_t)(pos + 1)
2345*4882a593Smuzhiyun 		: NULL;
2346*4882a593Smuzhiyun }
2347*4882a593Smuzhiyun 
interfaces_start(struct seq_file * seq,loff_t * pos)2348*4882a593Smuzhiyun static void *interfaces_start(struct seq_file *seq, loff_t *pos)
2349*4882a593Smuzhiyun {
2350*4882a593Smuzhiyun 	return *pos
2351*4882a593Smuzhiyun 		? interfaces_get_idx(seq->private, *pos)
2352*4882a593Smuzhiyun 		: SEQ_START_TOKEN;
2353*4882a593Smuzhiyun }
2354*4882a593Smuzhiyun 
interfaces_next(struct seq_file * seq,void * v,loff_t * pos)2355*4882a593Smuzhiyun static void *interfaces_next(struct seq_file *seq, void *v, loff_t *pos)
2356*4882a593Smuzhiyun {
2357*4882a593Smuzhiyun 	(*pos)++;
2358*4882a593Smuzhiyun 	return interfaces_get_idx(seq->private, *pos);
2359*4882a593Smuzhiyun }
2360*4882a593Smuzhiyun 
interfaces_stop(struct seq_file * seq,void * v)2361*4882a593Smuzhiyun static void interfaces_stop(struct seq_file *seq, void *v)
2362*4882a593Smuzhiyun {
2363*4882a593Smuzhiyun }
2364*4882a593Smuzhiyun 
2365*4882a593Smuzhiyun static const struct seq_operations interfaces_sops = {
2366*4882a593Smuzhiyun 	.start = interfaces_start,
2367*4882a593Smuzhiyun 	.next  = interfaces_next,
2368*4882a593Smuzhiyun 	.stop  = interfaces_stop,
2369*4882a593Smuzhiyun 	.show  = interfaces_show
2370*4882a593Smuzhiyun };
2371*4882a593Smuzhiyun 
2372*4882a593Smuzhiyun DEFINE_SEQ_ATTRIBUTE(interfaces);
2373*4882a593Smuzhiyun 
2374*4882a593Smuzhiyun /*
2375*4882a593Smuzhiyun  * /sys/kernel/debugfs/cxgb4vf/ files list.
2376*4882a593Smuzhiyun  */
2377*4882a593Smuzhiyun struct cxgb4vf_debugfs_entry {
2378*4882a593Smuzhiyun 	const char *name;		/* name of debugfs node */
2379*4882a593Smuzhiyun 	umode_t mode;			/* file system mode */
2380*4882a593Smuzhiyun 	const struct file_operations *fops;
2381*4882a593Smuzhiyun };
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun static struct cxgb4vf_debugfs_entry debugfs_files[] = {
2384*4882a593Smuzhiyun 	{ "mboxlog",    0444, &mboxlog_fops },
2385*4882a593Smuzhiyun 	{ "sge_qinfo",  0444, &sge_qinfo_fops },
2386*4882a593Smuzhiyun 	{ "sge_qstats", 0444, &sge_qstats_fops },
2387*4882a593Smuzhiyun 	{ "resources",  0444, &resources_fops },
2388*4882a593Smuzhiyun 	{ "interfaces", 0444, &interfaces_fops },
2389*4882a593Smuzhiyun };
2390*4882a593Smuzhiyun 
2391*4882a593Smuzhiyun /*
2392*4882a593Smuzhiyun  * Module and device initialization and cleanup code.
2393*4882a593Smuzhiyun  * ==================================================
2394*4882a593Smuzhiyun  */
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun /*
2397*4882a593Smuzhiyun  * Set up out /sys/kernel/debug/cxgb4vf sub-nodes.  We assume that the
2398*4882a593Smuzhiyun  * directory (debugfs_root) has already been set up.
2399*4882a593Smuzhiyun  */
setup_debugfs(struct adapter * adapter)2400*4882a593Smuzhiyun static int setup_debugfs(struct adapter *adapter)
2401*4882a593Smuzhiyun {
2402*4882a593Smuzhiyun 	int i;
2403*4882a593Smuzhiyun 
2404*4882a593Smuzhiyun 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2405*4882a593Smuzhiyun 
2406*4882a593Smuzhiyun 	/*
2407*4882a593Smuzhiyun 	 * Debugfs support is best effort.
2408*4882a593Smuzhiyun 	 */
2409*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
2410*4882a593Smuzhiyun 		debugfs_create_file(debugfs_files[i].name,
2411*4882a593Smuzhiyun 				    debugfs_files[i].mode,
2412*4882a593Smuzhiyun 				    adapter->debugfs_root, adapter,
2413*4882a593Smuzhiyun 				    debugfs_files[i].fops);
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 	return 0;
2416*4882a593Smuzhiyun }
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun /*
2419*4882a593Smuzhiyun  * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above.  We leave
2420*4882a593Smuzhiyun  * it to our caller to tear down the directory (debugfs_root).
2421*4882a593Smuzhiyun  */
cleanup_debugfs(struct adapter * adapter)2422*4882a593Smuzhiyun static void cleanup_debugfs(struct adapter *adapter)
2423*4882a593Smuzhiyun {
2424*4882a593Smuzhiyun 	BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root));
2425*4882a593Smuzhiyun 
2426*4882a593Smuzhiyun 	/*
2427*4882a593Smuzhiyun 	 * Unlike our sister routine cleanup_proc(), we don't need to remove
2428*4882a593Smuzhiyun 	 * individual entries because a call will be made to
2429*4882a593Smuzhiyun 	 * debugfs_remove_recursive().  We just need to clean up any ancillary
2430*4882a593Smuzhiyun 	 * persistent state.
2431*4882a593Smuzhiyun 	 */
2432*4882a593Smuzhiyun 	/* nothing to do */
2433*4882a593Smuzhiyun }
2434*4882a593Smuzhiyun 
2435*4882a593Smuzhiyun /* Figure out how many Ports and Queue Sets we can support.  This depends on
2436*4882a593Smuzhiyun  * knowing our Virtual Function Resources and may be called a second time if
2437*4882a593Smuzhiyun  * we fall back from MSI-X to MSI Interrupt Mode.
2438*4882a593Smuzhiyun  */
size_nports_qsets(struct adapter * adapter)2439*4882a593Smuzhiyun static void size_nports_qsets(struct adapter *adapter)
2440*4882a593Smuzhiyun {
2441*4882a593Smuzhiyun 	struct vf_resources *vfres = &adapter->params.vfres;
2442*4882a593Smuzhiyun 	unsigned int ethqsets, pmask_nports;
2443*4882a593Smuzhiyun 
2444*4882a593Smuzhiyun 	/* The number of "ports" which we support is equal to the number of
2445*4882a593Smuzhiyun 	 * Virtual Interfaces with which we've been provisioned.
2446*4882a593Smuzhiyun 	 */
2447*4882a593Smuzhiyun 	adapter->params.nports = vfres->nvi;
2448*4882a593Smuzhiyun 	if (adapter->params.nports > MAX_NPORTS) {
2449*4882a593Smuzhiyun 		dev_warn(adapter->pdev_dev, "only using %d of %d maximum"
2450*4882a593Smuzhiyun 			 " allowed virtual interfaces\n", MAX_NPORTS,
2451*4882a593Smuzhiyun 			 adapter->params.nports);
2452*4882a593Smuzhiyun 		adapter->params.nports = MAX_NPORTS;
2453*4882a593Smuzhiyun 	}
2454*4882a593Smuzhiyun 
2455*4882a593Smuzhiyun 	/* We may have been provisioned with more VIs than the number of
2456*4882a593Smuzhiyun 	 * ports we're allowed to access (our Port Access Rights Mask).
2457*4882a593Smuzhiyun 	 * This is obviously a configuration conflict but we don't want to
2458*4882a593Smuzhiyun 	 * crash the kernel or anything silly just because of that.
2459*4882a593Smuzhiyun 	 */
2460*4882a593Smuzhiyun 	pmask_nports = hweight32(adapter->params.vfres.pmask);
2461*4882a593Smuzhiyun 	if (pmask_nports < adapter->params.nports) {
2462*4882a593Smuzhiyun 		dev_warn(adapter->pdev_dev, "only using %d of %d provisioned"
2463*4882a593Smuzhiyun 			 " virtual interfaces; limited by Port Access Rights"
2464*4882a593Smuzhiyun 			 " mask %#x\n", pmask_nports, adapter->params.nports,
2465*4882a593Smuzhiyun 			 adapter->params.vfres.pmask);
2466*4882a593Smuzhiyun 		adapter->params.nports = pmask_nports;
2467*4882a593Smuzhiyun 	}
2468*4882a593Smuzhiyun 
2469*4882a593Smuzhiyun 	/* We need to reserve an Ingress Queue for the Asynchronous Firmware
2470*4882a593Smuzhiyun 	 * Event Queue.  And if we're using MSI Interrupts, we'll also need to
2471*4882a593Smuzhiyun 	 * reserve an Ingress Queue for a Forwarded Interrupts.
2472*4882a593Smuzhiyun 	 *
2473*4882a593Smuzhiyun 	 * The rest of the FL/Intr-capable ingress queues will be matched up
2474*4882a593Smuzhiyun 	 * one-for-one with Ethernet/Control egress queues in order to form
2475*4882a593Smuzhiyun 	 * "Queue Sets" which will be aportioned between the "ports".  For
2476*4882a593Smuzhiyun 	 * each Queue Set, we'll need the ability to allocate two Egress
2477*4882a593Smuzhiyun 	 * Contexts -- one for the Ingress Queue Free List and one for the TX
2478*4882a593Smuzhiyun 	 * Ethernet Queue.
2479*4882a593Smuzhiyun 	 *
2480*4882a593Smuzhiyun 	 * Note that even if we're currently configured to use MSI-X
2481*4882a593Smuzhiyun 	 * Interrupts (module variable msi == MSI_MSIX) we may get downgraded
2482*4882a593Smuzhiyun 	 * to MSI Interrupts if we can't get enough MSI-X Interrupts.  If that
2483*4882a593Smuzhiyun 	 * happens we'll need to adjust things later.
2484*4882a593Smuzhiyun 	 */
2485*4882a593Smuzhiyun 	ethqsets = vfres->niqflint - 1 - (msi == MSI_MSI);
2486*4882a593Smuzhiyun 	if (vfres->nethctrl != ethqsets)
2487*4882a593Smuzhiyun 		ethqsets = min(vfres->nethctrl, ethqsets);
2488*4882a593Smuzhiyun 	if (vfres->neq < ethqsets*2)
2489*4882a593Smuzhiyun 		ethqsets = vfres->neq/2;
2490*4882a593Smuzhiyun 	if (ethqsets > MAX_ETH_QSETS)
2491*4882a593Smuzhiyun 		ethqsets = MAX_ETH_QSETS;
2492*4882a593Smuzhiyun 	adapter->sge.max_ethqsets = ethqsets;
2493*4882a593Smuzhiyun 
2494*4882a593Smuzhiyun 	if (adapter->sge.max_ethqsets < adapter->params.nports) {
2495*4882a593Smuzhiyun 		dev_warn(adapter->pdev_dev, "only using %d of %d available"
2496*4882a593Smuzhiyun 			 " virtual interfaces (too few Queue Sets)\n",
2497*4882a593Smuzhiyun 			 adapter->sge.max_ethqsets, adapter->params.nports);
2498*4882a593Smuzhiyun 		adapter->params.nports = adapter->sge.max_ethqsets;
2499*4882a593Smuzhiyun 	}
2500*4882a593Smuzhiyun }
2501*4882a593Smuzhiyun 
2502*4882a593Smuzhiyun /*
2503*4882a593Smuzhiyun  * Perform early "adapter" initialization.  This is where we discover what
2504*4882a593Smuzhiyun  * adapter parameters we're going to be using and initialize basic adapter
2505*4882a593Smuzhiyun  * hardware support.
2506*4882a593Smuzhiyun  */
adap_init0(struct adapter * adapter)2507*4882a593Smuzhiyun static int adap_init0(struct adapter *adapter)
2508*4882a593Smuzhiyun {
2509*4882a593Smuzhiyun 	struct sge_params *sge_params = &adapter->params.sge;
2510*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
2511*4882a593Smuzhiyun 	int err;
2512*4882a593Smuzhiyun 	u32 param, val = 0;
2513*4882a593Smuzhiyun 
2514*4882a593Smuzhiyun 	/*
2515*4882a593Smuzhiyun 	 * Some environments do not properly handle PCIE FLRs -- e.g. in Linux
2516*4882a593Smuzhiyun 	 * 2.6.31 and later we can't call pci_reset_function() in order to
2517*4882a593Smuzhiyun 	 * issue an FLR because of a self- deadlock on the device semaphore.
2518*4882a593Smuzhiyun 	 * Meanwhile, the OS infrastructure doesn't issue FLRs in all the
2519*4882a593Smuzhiyun 	 * cases where they're needed -- for instance, some versions of KVM
2520*4882a593Smuzhiyun 	 * fail to reset "Assigned Devices" when the VM reboots.  Therefore we
2521*4882a593Smuzhiyun 	 * use the firmware based reset in order to reset any per function
2522*4882a593Smuzhiyun 	 * state.
2523*4882a593Smuzhiyun 	 */
2524*4882a593Smuzhiyun 	err = t4vf_fw_reset(adapter);
2525*4882a593Smuzhiyun 	if (err < 0) {
2526*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "FW reset failed: err=%d\n", err);
2527*4882a593Smuzhiyun 		return err;
2528*4882a593Smuzhiyun 	}
2529*4882a593Smuzhiyun 
2530*4882a593Smuzhiyun 	/*
2531*4882a593Smuzhiyun 	 * Grab basic operational parameters.  These will predominantly have
2532*4882a593Smuzhiyun 	 * been set up by the Physical Function Driver or will be hard coded
2533*4882a593Smuzhiyun 	 * into the adapter.  We just have to live with them ...  Note that
2534*4882a593Smuzhiyun 	 * we _must_ get our VPD parameters before our SGE parameters because
2535*4882a593Smuzhiyun 	 * we need to know the adapter's core clock from the VPD in order to
2536*4882a593Smuzhiyun 	 * properly decode the SGE Timer Values.
2537*4882a593Smuzhiyun 	 */
2538*4882a593Smuzhiyun 	err = t4vf_get_dev_params(adapter);
2539*4882a593Smuzhiyun 	if (err) {
2540*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2541*4882a593Smuzhiyun 			" device parameters: err=%d\n", err);
2542*4882a593Smuzhiyun 		return err;
2543*4882a593Smuzhiyun 	}
2544*4882a593Smuzhiyun 	err = t4vf_get_vpd_params(adapter);
2545*4882a593Smuzhiyun 	if (err) {
2546*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2547*4882a593Smuzhiyun 			" VPD parameters: err=%d\n", err);
2548*4882a593Smuzhiyun 		return err;
2549*4882a593Smuzhiyun 	}
2550*4882a593Smuzhiyun 	err = t4vf_get_sge_params(adapter);
2551*4882a593Smuzhiyun 	if (err) {
2552*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2553*4882a593Smuzhiyun 			" SGE parameters: err=%d\n", err);
2554*4882a593Smuzhiyun 		return err;
2555*4882a593Smuzhiyun 	}
2556*4882a593Smuzhiyun 	err = t4vf_get_rss_glb_config(adapter);
2557*4882a593Smuzhiyun 	if (err) {
2558*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to retrieve adapter"
2559*4882a593Smuzhiyun 			" RSS parameters: err=%d\n", err);
2560*4882a593Smuzhiyun 		return err;
2561*4882a593Smuzhiyun 	}
2562*4882a593Smuzhiyun 	if (adapter->params.rss.mode !=
2563*4882a593Smuzhiyun 	    FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
2564*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to operate with global RSS"
2565*4882a593Smuzhiyun 			" mode %d\n", adapter->params.rss.mode);
2566*4882a593Smuzhiyun 		return -EINVAL;
2567*4882a593Smuzhiyun 	}
2568*4882a593Smuzhiyun 	err = t4vf_sge_init(adapter);
2569*4882a593Smuzhiyun 	if (err) {
2570*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to use adapter parameters:"
2571*4882a593Smuzhiyun 			" err=%d\n", err);
2572*4882a593Smuzhiyun 		return err;
2573*4882a593Smuzhiyun 	}
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 	/* If we're running on newer firmware, let it know that we're
2576*4882a593Smuzhiyun 	 * prepared to deal with encapsulated CPL messages.  Older
2577*4882a593Smuzhiyun 	 * firmware won't understand this and we'll just get
2578*4882a593Smuzhiyun 	 * unencapsulated messages ...
2579*4882a593Smuzhiyun 	 */
2580*4882a593Smuzhiyun 	param = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
2581*4882a593Smuzhiyun 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_CPLFW4MSG_ENCAP);
2582*4882a593Smuzhiyun 	val = 1;
2583*4882a593Smuzhiyun 	(void) t4vf_set_params(adapter, 1, &param, &val);
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 	/*
2586*4882a593Smuzhiyun 	 * Retrieve our RX interrupt holdoff timer values and counter
2587*4882a593Smuzhiyun 	 * threshold values from the SGE parameters.
2588*4882a593Smuzhiyun 	 */
2589*4882a593Smuzhiyun 	s->timer_val[0] = core_ticks_to_us(adapter,
2590*4882a593Smuzhiyun 		TIMERVALUE0_G(sge_params->sge_timer_value_0_and_1));
2591*4882a593Smuzhiyun 	s->timer_val[1] = core_ticks_to_us(adapter,
2592*4882a593Smuzhiyun 		TIMERVALUE1_G(sge_params->sge_timer_value_0_and_1));
2593*4882a593Smuzhiyun 	s->timer_val[2] = core_ticks_to_us(adapter,
2594*4882a593Smuzhiyun 		TIMERVALUE0_G(sge_params->sge_timer_value_2_and_3));
2595*4882a593Smuzhiyun 	s->timer_val[3] = core_ticks_to_us(adapter,
2596*4882a593Smuzhiyun 		TIMERVALUE1_G(sge_params->sge_timer_value_2_and_3));
2597*4882a593Smuzhiyun 	s->timer_val[4] = core_ticks_to_us(adapter,
2598*4882a593Smuzhiyun 		TIMERVALUE0_G(sge_params->sge_timer_value_4_and_5));
2599*4882a593Smuzhiyun 	s->timer_val[5] = core_ticks_to_us(adapter,
2600*4882a593Smuzhiyun 		TIMERVALUE1_G(sge_params->sge_timer_value_4_and_5));
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	s->counter_val[0] = THRESHOLD_0_G(sge_params->sge_ingress_rx_threshold);
2603*4882a593Smuzhiyun 	s->counter_val[1] = THRESHOLD_1_G(sge_params->sge_ingress_rx_threshold);
2604*4882a593Smuzhiyun 	s->counter_val[2] = THRESHOLD_2_G(sge_params->sge_ingress_rx_threshold);
2605*4882a593Smuzhiyun 	s->counter_val[3] = THRESHOLD_3_G(sge_params->sge_ingress_rx_threshold);
2606*4882a593Smuzhiyun 
2607*4882a593Smuzhiyun 	/*
2608*4882a593Smuzhiyun 	 * Grab our Virtual Interface resource allocation, extract the
2609*4882a593Smuzhiyun 	 * features that we're interested in and do a bit of sanity testing on
2610*4882a593Smuzhiyun 	 * what we discover.
2611*4882a593Smuzhiyun 	 */
2612*4882a593Smuzhiyun 	err = t4vf_get_vfres(adapter);
2613*4882a593Smuzhiyun 	if (err) {
2614*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "unable to get virtual interface"
2615*4882a593Smuzhiyun 			" resources: err=%d\n", err);
2616*4882a593Smuzhiyun 		return err;
2617*4882a593Smuzhiyun 	}
2618*4882a593Smuzhiyun 
2619*4882a593Smuzhiyun 	/* Check for various parameter sanity issues */
2620*4882a593Smuzhiyun 	if (adapter->params.vfres.pmask == 0) {
2621*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "no port access configured\n"
2622*4882a593Smuzhiyun 			"usable!\n");
2623*4882a593Smuzhiyun 		return -EINVAL;
2624*4882a593Smuzhiyun 	}
2625*4882a593Smuzhiyun 	if (adapter->params.vfres.nvi == 0) {
2626*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "no virtual interfaces configured/"
2627*4882a593Smuzhiyun 			"usable!\n");
2628*4882a593Smuzhiyun 		return -EINVAL;
2629*4882a593Smuzhiyun 	}
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	/* Initialize nports and max_ethqsets now that we have our Virtual
2632*4882a593Smuzhiyun 	 * Function Resources.
2633*4882a593Smuzhiyun 	 */
2634*4882a593Smuzhiyun 	size_nports_qsets(adapter);
2635*4882a593Smuzhiyun 
2636*4882a593Smuzhiyun 	adapter->flags |= CXGB4VF_FW_OK;
2637*4882a593Smuzhiyun 	return 0;
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun 
init_rspq(struct sge_rspq * rspq,u8 timer_idx,u8 pkt_cnt_idx,unsigned int size,unsigned int iqe_size)2640*4882a593Smuzhiyun static inline void init_rspq(struct sge_rspq *rspq, u8 timer_idx,
2641*4882a593Smuzhiyun 			     u8 pkt_cnt_idx, unsigned int size,
2642*4882a593Smuzhiyun 			     unsigned int iqe_size)
2643*4882a593Smuzhiyun {
2644*4882a593Smuzhiyun 	rspq->intr_params = (QINTR_TIMER_IDX_V(timer_idx) |
2645*4882a593Smuzhiyun 			     (pkt_cnt_idx < SGE_NCOUNTERS ?
2646*4882a593Smuzhiyun 			      QINTR_CNT_EN_F : 0));
2647*4882a593Smuzhiyun 	rspq->pktcnt_idx = (pkt_cnt_idx < SGE_NCOUNTERS
2648*4882a593Smuzhiyun 			    ? pkt_cnt_idx
2649*4882a593Smuzhiyun 			    : 0);
2650*4882a593Smuzhiyun 	rspq->iqe_len = iqe_size;
2651*4882a593Smuzhiyun 	rspq->size = size;
2652*4882a593Smuzhiyun }
2653*4882a593Smuzhiyun 
2654*4882a593Smuzhiyun /*
2655*4882a593Smuzhiyun  * Perform default configuration of DMA queues depending on the number and
2656*4882a593Smuzhiyun  * type of ports we found and the number of available CPUs.  Most settings can
2657*4882a593Smuzhiyun  * be modified by the admin via ethtool and cxgbtool prior to the adapter
2658*4882a593Smuzhiyun  * being brought up for the first time.
2659*4882a593Smuzhiyun  */
cfg_queues(struct adapter * adapter)2660*4882a593Smuzhiyun static void cfg_queues(struct adapter *adapter)
2661*4882a593Smuzhiyun {
2662*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
2663*4882a593Smuzhiyun 	int q10g, n10g, qidx, pidx, qs;
2664*4882a593Smuzhiyun 	size_t iqe_size;
2665*4882a593Smuzhiyun 
2666*4882a593Smuzhiyun 	/*
2667*4882a593Smuzhiyun 	 * We should not be called till we know how many Queue Sets we can
2668*4882a593Smuzhiyun 	 * support.  In particular, this means that we need to know what kind
2669*4882a593Smuzhiyun 	 * of interrupts we'll be using ...
2670*4882a593Smuzhiyun 	 */
2671*4882a593Smuzhiyun 	BUG_ON((adapter->flags &
2672*4882a593Smuzhiyun 	       (CXGB4VF_USING_MSIX | CXGB4VF_USING_MSI)) == 0);
2673*4882a593Smuzhiyun 
2674*4882a593Smuzhiyun 	/*
2675*4882a593Smuzhiyun 	 * Count the number of 10GbE Virtual Interfaces that we have.
2676*4882a593Smuzhiyun 	 */
2677*4882a593Smuzhiyun 	n10g = 0;
2678*4882a593Smuzhiyun 	for_each_port(adapter, pidx)
2679*4882a593Smuzhiyun 		n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
2680*4882a593Smuzhiyun 
2681*4882a593Smuzhiyun 	/*
2682*4882a593Smuzhiyun 	 * We default to 1 queue per non-10G port and up to # of cores queues
2683*4882a593Smuzhiyun 	 * per 10G port.
2684*4882a593Smuzhiyun 	 */
2685*4882a593Smuzhiyun 	if (n10g == 0)
2686*4882a593Smuzhiyun 		q10g = 0;
2687*4882a593Smuzhiyun 	else {
2688*4882a593Smuzhiyun 		int n1g = (adapter->params.nports - n10g);
2689*4882a593Smuzhiyun 		q10g = (adapter->sge.max_ethqsets - n1g) / n10g;
2690*4882a593Smuzhiyun 		if (q10g > num_online_cpus())
2691*4882a593Smuzhiyun 			q10g = num_online_cpus();
2692*4882a593Smuzhiyun 	}
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	/*
2695*4882a593Smuzhiyun 	 * Allocate the "Queue Sets" to the various Virtual Interfaces.
2696*4882a593Smuzhiyun 	 * The layout will be established in setup_sge_queues() when the
2697*4882a593Smuzhiyun 	 * adapter is brough up for the first time.
2698*4882a593Smuzhiyun 	 */
2699*4882a593Smuzhiyun 	qidx = 0;
2700*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
2701*4882a593Smuzhiyun 		struct port_info *pi = adap2pinfo(adapter, pidx);
2702*4882a593Smuzhiyun 
2703*4882a593Smuzhiyun 		pi->first_qset = qidx;
2704*4882a593Smuzhiyun 		pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
2705*4882a593Smuzhiyun 		qidx += pi->nqsets;
2706*4882a593Smuzhiyun 	}
2707*4882a593Smuzhiyun 	s->ethqsets = qidx;
2708*4882a593Smuzhiyun 
2709*4882a593Smuzhiyun 	/*
2710*4882a593Smuzhiyun 	 * The Ingress Queue Entry Size for our various Response Queues needs
2711*4882a593Smuzhiyun 	 * to be big enough to accommodate the largest message we can receive
2712*4882a593Smuzhiyun 	 * from the chip/firmware; which is 64 bytes ...
2713*4882a593Smuzhiyun 	 */
2714*4882a593Smuzhiyun 	iqe_size = 64;
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	/*
2717*4882a593Smuzhiyun 	 * Set up default Queue Set parameters ...  Start off with the
2718*4882a593Smuzhiyun 	 * shortest interrupt holdoff timer.
2719*4882a593Smuzhiyun 	 */
2720*4882a593Smuzhiyun 	for (qs = 0; qs < s->max_ethqsets; qs++) {
2721*4882a593Smuzhiyun 		struct sge_eth_rxq *rxq = &s->ethrxq[qs];
2722*4882a593Smuzhiyun 		struct sge_eth_txq *txq = &s->ethtxq[qs];
2723*4882a593Smuzhiyun 
2724*4882a593Smuzhiyun 		init_rspq(&rxq->rspq, 0, 0, 1024, iqe_size);
2725*4882a593Smuzhiyun 		rxq->fl.size = 72;
2726*4882a593Smuzhiyun 		txq->q.size = 1024;
2727*4882a593Smuzhiyun 	}
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 	/*
2730*4882a593Smuzhiyun 	 * The firmware event queue is used for link state changes and
2731*4882a593Smuzhiyun 	 * notifications of TX DMA completions.
2732*4882a593Smuzhiyun 	 */
2733*4882a593Smuzhiyun 	init_rspq(&s->fw_evtq, SGE_TIMER_RSTRT_CNTR, 0, 512, iqe_size);
2734*4882a593Smuzhiyun 
2735*4882a593Smuzhiyun 	/*
2736*4882a593Smuzhiyun 	 * The forwarded interrupt queue is used when we're in MSI interrupt
2737*4882a593Smuzhiyun 	 * mode.  In this mode all interrupts associated with RX queues will
2738*4882a593Smuzhiyun 	 * be forwarded to a single queue which we'll associate with our MSI
2739*4882a593Smuzhiyun 	 * interrupt vector.  The messages dropped in the forwarded interrupt
2740*4882a593Smuzhiyun 	 * queue will indicate which ingress queue needs servicing ...  This
2741*4882a593Smuzhiyun 	 * queue needs to be large enough to accommodate all of the ingress
2742*4882a593Smuzhiyun 	 * queues which are forwarding their interrupt (+1 to prevent the PIDX
2743*4882a593Smuzhiyun 	 * from equalling the CIDX if every ingress queue has an outstanding
2744*4882a593Smuzhiyun 	 * interrupt).  The queue doesn't need to be any larger because no
2745*4882a593Smuzhiyun 	 * ingress queue will ever have more than one outstanding interrupt at
2746*4882a593Smuzhiyun 	 * any time ...
2747*4882a593Smuzhiyun 	 */
2748*4882a593Smuzhiyun 	init_rspq(&s->intrq, SGE_TIMER_RSTRT_CNTR, 0, MSIX_ENTRIES + 1,
2749*4882a593Smuzhiyun 		  iqe_size);
2750*4882a593Smuzhiyun }
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun /*
2753*4882a593Smuzhiyun  * Reduce the number of Ethernet queues across all ports to at most n.
2754*4882a593Smuzhiyun  * n provides at least one queue per port.
2755*4882a593Smuzhiyun  */
reduce_ethqs(struct adapter * adapter,int n)2756*4882a593Smuzhiyun static void reduce_ethqs(struct adapter *adapter, int n)
2757*4882a593Smuzhiyun {
2758*4882a593Smuzhiyun 	int i;
2759*4882a593Smuzhiyun 	struct port_info *pi;
2760*4882a593Smuzhiyun 
2761*4882a593Smuzhiyun 	/*
2762*4882a593Smuzhiyun 	 * While we have too many active Ether Queue Sets, interate across the
2763*4882a593Smuzhiyun 	 * "ports" and reduce their individual Queue Set allocations.
2764*4882a593Smuzhiyun 	 */
2765*4882a593Smuzhiyun 	BUG_ON(n < adapter->params.nports);
2766*4882a593Smuzhiyun 	while (n < adapter->sge.ethqsets)
2767*4882a593Smuzhiyun 		for_each_port(adapter, i) {
2768*4882a593Smuzhiyun 			pi = adap2pinfo(adapter, i);
2769*4882a593Smuzhiyun 			if (pi->nqsets > 1) {
2770*4882a593Smuzhiyun 				pi->nqsets--;
2771*4882a593Smuzhiyun 				adapter->sge.ethqsets--;
2772*4882a593Smuzhiyun 				if (adapter->sge.ethqsets <= n)
2773*4882a593Smuzhiyun 					break;
2774*4882a593Smuzhiyun 			}
2775*4882a593Smuzhiyun 		}
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun 	/*
2778*4882a593Smuzhiyun 	 * Reassign the starting Queue Sets for each of the "ports" ...
2779*4882a593Smuzhiyun 	 */
2780*4882a593Smuzhiyun 	n = 0;
2781*4882a593Smuzhiyun 	for_each_port(adapter, i) {
2782*4882a593Smuzhiyun 		pi = adap2pinfo(adapter, i);
2783*4882a593Smuzhiyun 		pi->first_qset = n;
2784*4882a593Smuzhiyun 		n += pi->nqsets;
2785*4882a593Smuzhiyun 	}
2786*4882a593Smuzhiyun }
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun /*
2789*4882a593Smuzhiyun  * We need to grab enough MSI-X vectors to cover our interrupt needs.  Ideally
2790*4882a593Smuzhiyun  * we get a separate MSI-X vector for every "Queue Set" plus any extras we
2791*4882a593Smuzhiyun  * need.  Minimally we need one for every Virtual Interface plus those needed
2792*4882a593Smuzhiyun  * for our "extras".  Note that this process may lower the maximum number of
2793*4882a593Smuzhiyun  * allowed Queue Sets ...
2794*4882a593Smuzhiyun  */
enable_msix(struct adapter * adapter)2795*4882a593Smuzhiyun static int enable_msix(struct adapter *adapter)
2796*4882a593Smuzhiyun {
2797*4882a593Smuzhiyun 	int i, want, need, nqsets;
2798*4882a593Smuzhiyun 	struct msix_entry entries[MSIX_ENTRIES];
2799*4882a593Smuzhiyun 	struct sge *s = &adapter->sge;
2800*4882a593Smuzhiyun 
2801*4882a593Smuzhiyun 	for (i = 0; i < MSIX_ENTRIES; ++i)
2802*4882a593Smuzhiyun 		entries[i].entry = i;
2803*4882a593Smuzhiyun 
2804*4882a593Smuzhiyun 	/*
2805*4882a593Smuzhiyun 	 * We _want_ enough MSI-X interrupts to cover all of our "Queue Sets"
2806*4882a593Smuzhiyun 	 * plus those needed for our "extras" (for example, the firmware
2807*4882a593Smuzhiyun 	 * message queue).  We _need_ at least one "Queue Set" per Virtual
2808*4882a593Smuzhiyun 	 * Interface plus those needed for our "extras".  So now we get to see
2809*4882a593Smuzhiyun 	 * if the song is right ...
2810*4882a593Smuzhiyun 	 */
2811*4882a593Smuzhiyun 	want = s->max_ethqsets + MSIX_EXTRAS;
2812*4882a593Smuzhiyun 	need = adapter->params.nports + MSIX_EXTRAS;
2813*4882a593Smuzhiyun 
2814*4882a593Smuzhiyun 	want = pci_enable_msix_range(adapter->pdev, entries, need, want);
2815*4882a593Smuzhiyun 	if (want < 0)
2816*4882a593Smuzhiyun 		return want;
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	nqsets = want - MSIX_EXTRAS;
2819*4882a593Smuzhiyun 	if (nqsets < s->max_ethqsets) {
2820*4882a593Smuzhiyun 		dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
2821*4882a593Smuzhiyun 			 " for %d Queue Sets\n", nqsets);
2822*4882a593Smuzhiyun 		s->max_ethqsets = nqsets;
2823*4882a593Smuzhiyun 		if (nqsets < s->ethqsets)
2824*4882a593Smuzhiyun 			reduce_ethqs(adapter, nqsets);
2825*4882a593Smuzhiyun 	}
2826*4882a593Smuzhiyun 	for (i = 0; i < want; ++i)
2827*4882a593Smuzhiyun 		adapter->msix_info[i].vec = entries[i].vector;
2828*4882a593Smuzhiyun 
2829*4882a593Smuzhiyun 	return 0;
2830*4882a593Smuzhiyun }
2831*4882a593Smuzhiyun 
2832*4882a593Smuzhiyun static const struct net_device_ops cxgb4vf_netdev_ops	= {
2833*4882a593Smuzhiyun 	.ndo_open		= cxgb4vf_open,
2834*4882a593Smuzhiyun 	.ndo_stop		= cxgb4vf_stop,
2835*4882a593Smuzhiyun 	.ndo_start_xmit		= t4vf_eth_xmit,
2836*4882a593Smuzhiyun 	.ndo_get_stats		= cxgb4vf_get_stats,
2837*4882a593Smuzhiyun 	.ndo_set_rx_mode	= cxgb4vf_set_rxmode,
2838*4882a593Smuzhiyun 	.ndo_set_mac_address	= cxgb4vf_set_mac_addr,
2839*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
2840*4882a593Smuzhiyun 	.ndo_do_ioctl		= cxgb4vf_do_ioctl,
2841*4882a593Smuzhiyun 	.ndo_change_mtu		= cxgb4vf_change_mtu,
2842*4882a593Smuzhiyun 	.ndo_fix_features	= cxgb4vf_fix_features,
2843*4882a593Smuzhiyun 	.ndo_set_features	= cxgb4vf_set_features,
2844*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
2845*4882a593Smuzhiyun 	.ndo_poll_controller	= cxgb4vf_poll_controller,
2846*4882a593Smuzhiyun #endif
2847*4882a593Smuzhiyun };
2848*4882a593Smuzhiyun 
2849*4882a593Smuzhiyun /**
2850*4882a593Smuzhiyun  *	cxgb4vf_get_port_mask - Get port mask for the VF based on mac
2851*4882a593Smuzhiyun  *				address stored on the adapter
2852*4882a593Smuzhiyun  *	@adapter: The adapter
2853*4882a593Smuzhiyun  *
2854*4882a593Smuzhiyun  *	Find the the port mask for the VF based on the index of mac
2855*4882a593Smuzhiyun  *	address stored in the adapter. If no mac address is stored on
2856*4882a593Smuzhiyun  *	the adapter for the VF, use the port mask received from the
2857*4882a593Smuzhiyun  *	firmware.
2858*4882a593Smuzhiyun  */
cxgb4vf_get_port_mask(struct adapter * adapter)2859*4882a593Smuzhiyun static unsigned int cxgb4vf_get_port_mask(struct adapter *adapter)
2860*4882a593Smuzhiyun {
2861*4882a593Smuzhiyun 	unsigned int naddr = 1, pidx = 0;
2862*4882a593Smuzhiyun 	unsigned int pmask, rmask = 0;
2863*4882a593Smuzhiyun 	u8 mac[ETH_ALEN];
2864*4882a593Smuzhiyun 	int err;
2865*4882a593Smuzhiyun 
2866*4882a593Smuzhiyun 	pmask = adapter->params.vfres.pmask;
2867*4882a593Smuzhiyun 	while (pmask) {
2868*4882a593Smuzhiyun 		if (pmask & 1) {
2869*4882a593Smuzhiyun 			err = t4vf_get_vf_mac_acl(adapter, pidx, &naddr, mac);
2870*4882a593Smuzhiyun 			if (!err && !is_zero_ether_addr(mac))
2871*4882a593Smuzhiyun 				rmask |= (1 << pidx);
2872*4882a593Smuzhiyun 		}
2873*4882a593Smuzhiyun 		pmask >>= 1;
2874*4882a593Smuzhiyun 		pidx++;
2875*4882a593Smuzhiyun 	}
2876*4882a593Smuzhiyun 	if (!rmask)
2877*4882a593Smuzhiyun 		rmask = adapter->params.vfres.pmask;
2878*4882a593Smuzhiyun 
2879*4882a593Smuzhiyun 	return rmask;
2880*4882a593Smuzhiyun }
2881*4882a593Smuzhiyun 
2882*4882a593Smuzhiyun /*
2883*4882a593Smuzhiyun  * "Probe" a device: initialize a device and construct all kernel and driver
2884*4882a593Smuzhiyun  * state needed to manage the device.  This routine is called "init_one" in
2885*4882a593Smuzhiyun  * the PF Driver ...
2886*4882a593Smuzhiyun  */
cxgb4vf_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2887*4882a593Smuzhiyun static int cxgb4vf_pci_probe(struct pci_dev *pdev,
2888*4882a593Smuzhiyun 			     const struct pci_device_id *ent)
2889*4882a593Smuzhiyun {
2890*4882a593Smuzhiyun 	struct adapter *adapter;
2891*4882a593Smuzhiyun 	struct net_device *netdev;
2892*4882a593Smuzhiyun 	struct port_info *pi;
2893*4882a593Smuzhiyun 	unsigned int pmask;
2894*4882a593Smuzhiyun 	int pci_using_dac;
2895*4882a593Smuzhiyun 	int err, pidx;
2896*4882a593Smuzhiyun 
2897*4882a593Smuzhiyun 	/*
2898*4882a593Smuzhiyun 	 * Initialize generic PCI device state.
2899*4882a593Smuzhiyun 	 */
2900*4882a593Smuzhiyun 	err = pci_enable_device(pdev);
2901*4882a593Smuzhiyun 	if (err) {
2902*4882a593Smuzhiyun 		dev_err(&pdev->dev, "cannot enable PCI device\n");
2903*4882a593Smuzhiyun 		return err;
2904*4882a593Smuzhiyun 	}
2905*4882a593Smuzhiyun 
2906*4882a593Smuzhiyun 	/*
2907*4882a593Smuzhiyun 	 * Reserve PCI resources for the device.  If we can't get them some
2908*4882a593Smuzhiyun 	 * other driver may have already claimed the device ...
2909*4882a593Smuzhiyun 	 */
2910*4882a593Smuzhiyun 	err = pci_request_regions(pdev, KBUILD_MODNAME);
2911*4882a593Smuzhiyun 	if (err) {
2912*4882a593Smuzhiyun 		dev_err(&pdev->dev, "cannot obtain PCI resources\n");
2913*4882a593Smuzhiyun 		goto err_disable_device;
2914*4882a593Smuzhiyun 	}
2915*4882a593Smuzhiyun 
2916*4882a593Smuzhiyun 	/*
2917*4882a593Smuzhiyun 	 * Set up our DMA mask: try for 64-bit address masking first and
2918*4882a593Smuzhiyun 	 * fall back to 32-bit if we can't get 64 bits ...
2919*4882a593Smuzhiyun 	 */
2920*4882a593Smuzhiyun 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2921*4882a593Smuzhiyun 	if (err == 0) {
2922*4882a593Smuzhiyun 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2923*4882a593Smuzhiyun 		if (err) {
2924*4882a593Smuzhiyun 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for"
2925*4882a593Smuzhiyun 				" coherent allocations\n");
2926*4882a593Smuzhiyun 			goto err_release_regions;
2927*4882a593Smuzhiyun 		}
2928*4882a593Smuzhiyun 		pci_using_dac = 1;
2929*4882a593Smuzhiyun 	} else {
2930*4882a593Smuzhiyun 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2931*4882a593Smuzhiyun 		if (err != 0) {
2932*4882a593Smuzhiyun 			dev_err(&pdev->dev, "no usable DMA configuration\n");
2933*4882a593Smuzhiyun 			goto err_release_regions;
2934*4882a593Smuzhiyun 		}
2935*4882a593Smuzhiyun 		pci_using_dac = 0;
2936*4882a593Smuzhiyun 	}
2937*4882a593Smuzhiyun 
2938*4882a593Smuzhiyun 	/*
2939*4882a593Smuzhiyun 	 * Enable bus mastering for the device ...
2940*4882a593Smuzhiyun 	 */
2941*4882a593Smuzhiyun 	pci_set_master(pdev);
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	/*
2944*4882a593Smuzhiyun 	 * Allocate our adapter data structure and attach it to the device.
2945*4882a593Smuzhiyun 	 */
2946*4882a593Smuzhiyun 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2947*4882a593Smuzhiyun 	if (!adapter) {
2948*4882a593Smuzhiyun 		err = -ENOMEM;
2949*4882a593Smuzhiyun 		goto err_release_regions;
2950*4882a593Smuzhiyun 	}
2951*4882a593Smuzhiyun 	pci_set_drvdata(pdev, adapter);
2952*4882a593Smuzhiyun 	adapter->pdev = pdev;
2953*4882a593Smuzhiyun 	adapter->pdev_dev = &pdev->dev;
2954*4882a593Smuzhiyun 
2955*4882a593Smuzhiyun 	adapter->mbox_log = kzalloc(sizeof(*adapter->mbox_log) +
2956*4882a593Smuzhiyun 				    (sizeof(struct mbox_cmd) *
2957*4882a593Smuzhiyun 				     T4VF_OS_LOG_MBOX_CMDS),
2958*4882a593Smuzhiyun 				    GFP_KERNEL);
2959*4882a593Smuzhiyun 	if (!adapter->mbox_log) {
2960*4882a593Smuzhiyun 		err = -ENOMEM;
2961*4882a593Smuzhiyun 		goto err_free_adapter;
2962*4882a593Smuzhiyun 	}
2963*4882a593Smuzhiyun 	adapter->mbox_log->size = T4VF_OS_LOG_MBOX_CMDS;
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	/*
2966*4882a593Smuzhiyun 	 * Initialize SMP data synchronization resources.
2967*4882a593Smuzhiyun 	 */
2968*4882a593Smuzhiyun 	spin_lock_init(&adapter->stats_lock);
2969*4882a593Smuzhiyun 	spin_lock_init(&adapter->mbox_lock);
2970*4882a593Smuzhiyun 	INIT_LIST_HEAD(&adapter->mlist.list);
2971*4882a593Smuzhiyun 
2972*4882a593Smuzhiyun 	/*
2973*4882a593Smuzhiyun 	 * Map our I/O registers in BAR0.
2974*4882a593Smuzhiyun 	 */
2975*4882a593Smuzhiyun 	adapter->regs = pci_ioremap_bar(pdev, 0);
2976*4882a593Smuzhiyun 	if (!adapter->regs) {
2977*4882a593Smuzhiyun 		dev_err(&pdev->dev, "cannot map device registers\n");
2978*4882a593Smuzhiyun 		err = -ENOMEM;
2979*4882a593Smuzhiyun 		goto err_free_adapter;
2980*4882a593Smuzhiyun 	}
2981*4882a593Smuzhiyun 
2982*4882a593Smuzhiyun 	/* Wait for the device to become ready before proceeding ...
2983*4882a593Smuzhiyun 	 */
2984*4882a593Smuzhiyun 	err = t4vf_prep_adapter(adapter);
2985*4882a593Smuzhiyun 	if (err) {
2986*4882a593Smuzhiyun 		dev_err(adapter->pdev_dev, "device didn't become ready:"
2987*4882a593Smuzhiyun 			" err=%d\n", err);
2988*4882a593Smuzhiyun 		goto err_unmap_bar0;
2989*4882a593Smuzhiyun 	}
2990*4882a593Smuzhiyun 
2991*4882a593Smuzhiyun 	/* For T5 and later we want to use the new BAR-based User Doorbells,
2992*4882a593Smuzhiyun 	 * so we need to map BAR2 here ...
2993*4882a593Smuzhiyun 	 */
2994*4882a593Smuzhiyun 	if (!is_t4(adapter->params.chip)) {
2995*4882a593Smuzhiyun 		adapter->bar2 = ioremap_wc(pci_resource_start(pdev, 2),
2996*4882a593Smuzhiyun 					   pci_resource_len(pdev, 2));
2997*4882a593Smuzhiyun 		if (!adapter->bar2) {
2998*4882a593Smuzhiyun 			dev_err(adapter->pdev_dev, "cannot map BAR2 doorbells\n");
2999*4882a593Smuzhiyun 			err = -ENOMEM;
3000*4882a593Smuzhiyun 			goto err_unmap_bar0;
3001*4882a593Smuzhiyun 		}
3002*4882a593Smuzhiyun 	}
3003*4882a593Smuzhiyun 	/*
3004*4882a593Smuzhiyun 	 * Initialize adapter level features.
3005*4882a593Smuzhiyun 	 */
3006*4882a593Smuzhiyun 	adapter->name = pci_name(pdev);
3007*4882a593Smuzhiyun 	adapter->msg_enable = DFLT_MSG_ENABLE;
3008*4882a593Smuzhiyun 
3009*4882a593Smuzhiyun 	/* If possible, we use PCIe Relaxed Ordering Attribute to deliver
3010*4882a593Smuzhiyun 	 * Ingress Packet Data to Free List Buffers in order to allow for
3011*4882a593Smuzhiyun 	 * chipset performance optimizations between the Root Complex and
3012*4882a593Smuzhiyun 	 * Memory Controllers.  (Messages to the associated Ingress Queue
3013*4882a593Smuzhiyun 	 * notifying new Packet Placement in the Free Lists Buffers will be
3014*4882a593Smuzhiyun 	 * send without the Relaxed Ordering Attribute thus guaranteeing that
3015*4882a593Smuzhiyun 	 * all preceding PCIe Transaction Layer Packets will be processed
3016*4882a593Smuzhiyun 	 * first.)  But some Root Complexes have various issues with Upstream
3017*4882a593Smuzhiyun 	 * Transaction Layer Packets with the Relaxed Ordering Attribute set.
3018*4882a593Smuzhiyun 	 * The PCIe devices which under the Root Complexes will be cleared the
3019*4882a593Smuzhiyun 	 * Relaxed Ordering bit in the configuration space, So we check our
3020*4882a593Smuzhiyun 	 * PCIe configuration space to see if it's flagged with advice against
3021*4882a593Smuzhiyun 	 * using Relaxed Ordering.
3022*4882a593Smuzhiyun 	 */
3023*4882a593Smuzhiyun 	if (!pcie_relaxed_ordering_enabled(pdev))
3024*4882a593Smuzhiyun 		adapter->flags |= CXGB4VF_ROOT_NO_RELAXED_ORDERING;
3025*4882a593Smuzhiyun 
3026*4882a593Smuzhiyun 	err = adap_init0(adapter);
3027*4882a593Smuzhiyun 	if (err)
3028*4882a593Smuzhiyun 		dev_err(&pdev->dev,
3029*4882a593Smuzhiyun 			"Adapter initialization failed, error %d. Continuing in debug mode\n",
3030*4882a593Smuzhiyun 			err);
3031*4882a593Smuzhiyun 
3032*4882a593Smuzhiyun 	/* Initialize hash mac addr list */
3033*4882a593Smuzhiyun 	INIT_LIST_HEAD(&adapter->mac_hlist);
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 	/*
3036*4882a593Smuzhiyun 	 * Allocate our "adapter ports" and stitch everything together.
3037*4882a593Smuzhiyun 	 */
3038*4882a593Smuzhiyun 	pmask = cxgb4vf_get_port_mask(adapter);
3039*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
3040*4882a593Smuzhiyun 		int port_id, viid;
3041*4882a593Smuzhiyun 		u8 mac[ETH_ALEN];
3042*4882a593Smuzhiyun 		unsigned int naddr = 1;
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 		/*
3045*4882a593Smuzhiyun 		 * We simplistically allocate our virtual interfaces
3046*4882a593Smuzhiyun 		 * sequentially across the port numbers to which we have
3047*4882a593Smuzhiyun 		 * access rights.  This should be configurable in some manner
3048*4882a593Smuzhiyun 		 * ...
3049*4882a593Smuzhiyun 		 */
3050*4882a593Smuzhiyun 		if (pmask == 0)
3051*4882a593Smuzhiyun 			break;
3052*4882a593Smuzhiyun 		port_id = ffs(pmask) - 1;
3053*4882a593Smuzhiyun 		pmask &= ~(1 << port_id);
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 		/*
3056*4882a593Smuzhiyun 		 * Allocate our network device and stitch things together.
3057*4882a593Smuzhiyun 		 */
3058*4882a593Smuzhiyun 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3059*4882a593Smuzhiyun 					   MAX_PORT_QSETS);
3060*4882a593Smuzhiyun 		if (netdev == NULL) {
3061*4882a593Smuzhiyun 			err = -ENOMEM;
3062*4882a593Smuzhiyun 			goto err_free_dev;
3063*4882a593Smuzhiyun 		}
3064*4882a593Smuzhiyun 		adapter->port[pidx] = netdev;
3065*4882a593Smuzhiyun 		SET_NETDEV_DEV(netdev, &pdev->dev);
3066*4882a593Smuzhiyun 		pi = netdev_priv(netdev);
3067*4882a593Smuzhiyun 		pi->adapter = adapter;
3068*4882a593Smuzhiyun 		pi->pidx = pidx;
3069*4882a593Smuzhiyun 		pi->port_id = port_id;
3070*4882a593Smuzhiyun 
3071*4882a593Smuzhiyun 		/*
3072*4882a593Smuzhiyun 		 * Initialize the starting state of our "port" and register
3073*4882a593Smuzhiyun 		 * it.
3074*4882a593Smuzhiyun 		 */
3075*4882a593Smuzhiyun 		pi->xact_addr_filt = -1;
3076*4882a593Smuzhiyun 		netdev->irq = pdev->irq;
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS | NETIF_F_GRO |
3079*4882a593Smuzhiyun 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3080*4882a593Smuzhiyun 			NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
3081*4882a593Smuzhiyun 		netdev->features = netdev->hw_features;
3082*4882a593Smuzhiyun 		if (pci_using_dac)
3083*4882a593Smuzhiyun 			netdev->features |= NETIF_F_HIGHDMA;
3084*4882a593Smuzhiyun 		netdev->vlan_features = netdev->features & VLAN_FEAT;
3085*4882a593Smuzhiyun 
3086*4882a593Smuzhiyun 		netdev->priv_flags |= IFF_UNICAST_FLT;
3087*4882a593Smuzhiyun 		netdev->min_mtu = 81;
3088*4882a593Smuzhiyun 		netdev->max_mtu = ETH_MAX_MTU;
3089*4882a593Smuzhiyun 
3090*4882a593Smuzhiyun 		netdev->netdev_ops = &cxgb4vf_netdev_ops;
3091*4882a593Smuzhiyun 		netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
3092*4882a593Smuzhiyun 		netdev->dev_port = pi->port_id;
3093*4882a593Smuzhiyun 
3094*4882a593Smuzhiyun 		/*
3095*4882a593Smuzhiyun 		 * If we haven't been able to contact the firmware, there's
3096*4882a593Smuzhiyun 		 * nothing else we can do for this "port" ...
3097*4882a593Smuzhiyun 		 */
3098*4882a593Smuzhiyun 		if (!(adapter->flags & CXGB4VF_FW_OK))
3099*4882a593Smuzhiyun 			continue;
3100*4882a593Smuzhiyun 
3101*4882a593Smuzhiyun 		viid = t4vf_alloc_vi(adapter, port_id);
3102*4882a593Smuzhiyun 		if (viid < 0) {
3103*4882a593Smuzhiyun 			dev_err(&pdev->dev,
3104*4882a593Smuzhiyun 				"cannot allocate VI for port %d: err=%d\n",
3105*4882a593Smuzhiyun 				port_id, viid);
3106*4882a593Smuzhiyun 			err = viid;
3107*4882a593Smuzhiyun 			goto err_free_dev;
3108*4882a593Smuzhiyun 		}
3109*4882a593Smuzhiyun 		pi->viid = viid;
3110*4882a593Smuzhiyun 
3111*4882a593Smuzhiyun 		/*
3112*4882a593Smuzhiyun 		 * Initialize the hardware/software state for the port.
3113*4882a593Smuzhiyun 		 */
3114*4882a593Smuzhiyun 		err = t4vf_port_init(adapter, pidx);
3115*4882a593Smuzhiyun 		if (err) {
3116*4882a593Smuzhiyun 			dev_err(&pdev->dev, "cannot initialize port %d\n",
3117*4882a593Smuzhiyun 				pidx);
3118*4882a593Smuzhiyun 			goto err_free_dev;
3119*4882a593Smuzhiyun 		}
3120*4882a593Smuzhiyun 
3121*4882a593Smuzhiyun 		err = t4vf_get_vf_mac_acl(adapter, port_id, &naddr, mac);
3122*4882a593Smuzhiyun 		if (err) {
3123*4882a593Smuzhiyun 			dev_err(&pdev->dev,
3124*4882a593Smuzhiyun 				"unable to determine MAC ACL address, "
3125*4882a593Smuzhiyun 				"continuing anyway.. (status %d)\n", err);
3126*4882a593Smuzhiyun 		} else if (naddr && adapter->params.vfres.nvi == 1) {
3127*4882a593Smuzhiyun 			struct sockaddr addr;
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun 			ether_addr_copy(addr.sa_data, mac);
3130*4882a593Smuzhiyun 			err = cxgb4vf_set_mac_addr(netdev, &addr);
3131*4882a593Smuzhiyun 			if (err) {
3132*4882a593Smuzhiyun 				dev_err(&pdev->dev,
3133*4882a593Smuzhiyun 					"unable to set MAC address %pM\n",
3134*4882a593Smuzhiyun 					mac);
3135*4882a593Smuzhiyun 				goto err_free_dev;
3136*4882a593Smuzhiyun 			}
3137*4882a593Smuzhiyun 			dev_info(&pdev->dev,
3138*4882a593Smuzhiyun 				 "Using assigned MAC ACL: %pM\n", mac);
3139*4882a593Smuzhiyun 		}
3140*4882a593Smuzhiyun 	}
3141*4882a593Smuzhiyun 
3142*4882a593Smuzhiyun 	/* See what interrupts we'll be using.  If we've been configured to
3143*4882a593Smuzhiyun 	 * use MSI-X interrupts, try to enable them but fall back to using
3144*4882a593Smuzhiyun 	 * MSI interrupts if we can't enable MSI-X interrupts.  If we can't
3145*4882a593Smuzhiyun 	 * get MSI interrupts we bail with the error.
3146*4882a593Smuzhiyun 	 */
3147*4882a593Smuzhiyun 	if (msi == MSI_MSIX && enable_msix(adapter) == 0)
3148*4882a593Smuzhiyun 		adapter->flags |= CXGB4VF_USING_MSIX;
3149*4882a593Smuzhiyun 	else {
3150*4882a593Smuzhiyun 		if (msi == MSI_MSIX) {
3151*4882a593Smuzhiyun 			dev_info(adapter->pdev_dev,
3152*4882a593Smuzhiyun 				 "Unable to use MSI-X Interrupts; falling "
3153*4882a593Smuzhiyun 				 "back to MSI Interrupts\n");
3154*4882a593Smuzhiyun 
3155*4882a593Smuzhiyun 			/* We're going to need a Forwarded Interrupt Queue so
3156*4882a593Smuzhiyun 			 * that may cut into how many Queue Sets we can
3157*4882a593Smuzhiyun 			 * support.
3158*4882a593Smuzhiyun 			 */
3159*4882a593Smuzhiyun 			msi = MSI_MSI;
3160*4882a593Smuzhiyun 			size_nports_qsets(adapter);
3161*4882a593Smuzhiyun 		}
3162*4882a593Smuzhiyun 		err = pci_enable_msi(pdev);
3163*4882a593Smuzhiyun 		if (err) {
3164*4882a593Smuzhiyun 			dev_err(&pdev->dev, "Unable to allocate MSI Interrupts;"
3165*4882a593Smuzhiyun 				" err=%d\n", err);
3166*4882a593Smuzhiyun 			goto err_free_dev;
3167*4882a593Smuzhiyun 		}
3168*4882a593Smuzhiyun 		adapter->flags |= CXGB4VF_USING_MSI;
3169*4882a593Smuzhiyun 	}
3170*4882a593Smuzhiyun 
3171*4882a593Smuzhiyun 	/* Now that we know how many "ports" we have and what interrupt
3172*4882a593Smuzhiyun 	 * mechanism we're going to use, we can configure our queue resources.
3173*4882a593Smuzhiyun 	 */
3174*4882a593Smuzhiyun 	cfg_queues(adapter);
3175*4882a593Smuzhiyun 
3176*4882a593Smuzhiyun 	/*
3177*4882a593Smuzhiyun 	 * The "card" is now ready to go.  If any errors occur during device
3178*4882a593Smuzhiyun 	 * registration we do not fail the whole "card" but rather proceed
3179*4882a593Smuzhiyun 	 * only with the ports we manage to register successfully.  However we
3180*4882a593Smuzhiyun 	 * must register at least one net device.
3181*4882a593Smuzhiyun 	 */
3182*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
3183*4882a593Smuzhiyun 		struct port_info *pi = netdev_priv(adapter->port[pidx]);
3184*4882a593Smuzhiyun 		netdev = adapter->port[pidx];
3185*4882a593Smuzhiyun 		if (netdev == NULL)
3186*4882a593Smuzhiyun 			continue;
3187*4882a593Smuzhiyun 
3188*4882a593Smuzhiyun 		netif_set_real_num_tx_queues(netdev, pi->nqsets);
3189*4882a593Smuzhiyun 		netif_set_real_num_rx_queues(netdev, pi->nqsets);
3190*4882a593Smuzhiyun 
3191*4882a593Smuzhiyun 		err = register_netdev(netdev);
3192*4882a593Smuzhiyun 		if (err) {
3193*4882a593Smuzhiyun 			dev_warn(&pdev->dev, "cannot register net device %s,"
3194*4882a593Smuzhiyun 				 " skipping\n", netdev->name);
3195*4882a593Smuzhiyun 			continue;
3196*4882a593Smuzhiyun 		}
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun 		netif_carrier_off(netdev);
3199*4882a593Smuzhiyun 		set_bit(pidx, &adapter->registered_device_map);
3200*4882a593Smuzhiyun 	}
3201*4882a593Smuzhiyun 	if (adapter->registered_device_map == 0) {
3202*4882a593Smuzhiyun 		dev_err(&pdev->dev, "could not register any net devices\n");
3203*4882a593Smuzhiyun 		goto err_disable_interrupts;
3204*4882a593Smuzhiyun 	}
3205*4882a593Smuzhiyun 
3206*4882a593Smuzhiyun 	/*
3207*4882a593Smuzhiyun 	 * Set up our debugfs entries.
3208*4882a593Smuzhiyun 	 */
3209*4882a593Smuzhiyun 	if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) {
3210*4882a593Smuzhiyun 		adapter->debugfs_root =
3211*4882a593Smuzhiyun 			debugfs_create_dir(pci_name(pdev),
3212*4882a593Smuzhiyun 					   cxgb4vf_debugfs_root);
3213*4882a593Smuzhiyun 		setup_debugfs(adapter);
3214*4882a593Smuzhiyun 	}
3215*4882a593Smuzhiyun 
3216*4882a593Smuzhiyun 	/*
3217*4882a593Smuzhiyun 	 * Print a short notice on the existence and configuration of the new
3218*4882a593Smuzhiyun 	 * VF network device ...
3219*4882a593Smuzhiyun 	 */
3220*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
3221*4882a593Smuzhiyun 		dev_info(adapter->pdev_dev, "%s: Chelsio VF NIC PCIe %s\n",
3222*4882a593Smuzhiyun 			 adapter->port[pidx]->name,
3223*4882a593Smuzhiyun 			 (adapter->flags & CXGB4VF_USING_MSIX) ? "MSI-X" :
3224*4882a593Smuzhiyun 			 (adapter->flags & CXGB4VF_USING_MSI)  ? "MSI" : "");
3225*4882a593Smuzhiyun 	}
3226*4882a593Smuzhiyun 
3227*4882a593Smuzhiyun 	/*
3228*4882a593Smuzhiyun 	 * Return success!
3229*4882a593Smuzhiyun 	 */
3230*4882a593Smuzhiyun 	return 0;
3231*4882a593Smuzhiyun 
3232*4882a593Smuzhiyun 	/*
3233*4882a593Smuzhiyun 	 * Error recovery and exit code.  Unwind state that's been created
3234*4882a593Smuzhiyun 	 * so far and return the error.
3235*4882a593Smuzhiyun 	 */
3236*4882a593Smuzhiyun err_disable_interrupts:
3237*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSIX) {
3238*4882a593Smuzhiyun 		pci_disable_msix(adapter->pdev);
3239*4882a593Smuzhiyun 		adapter->flags &= ~CXGB4VF_USING_MSIX;
3240*4882a593Smuzhiyun 	} else if (adapter->flags & CXGB4VF_USING_MSI) {
3241*4882a593Smuzhiyun 		pci_disable_msi(adapter->pdev);
3242*4882a593Smuzhiyun 		adapter->flags &= ~CXGB4VF_USING_MSI;
3243*4882a593Smuzhiyun 	}
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun err_free_dev:
3246*4882a593Smuzhiyun 	for_each_port(adapter, pidx) {
3247*4882a593Smuzhiyun 		netdev = adapter->port[pidx];
3248*4882a593Smuzhiyun 		if (netdev == NULL)
3249*4882a593Smuzhiyun 			continue;
3250*4882a593Smuzhiyun 		pi = netdev_priv(netdev);
3251*4882a593Smuzhiyun 		if (pi->viid)
3252*4882a593Smuzhiyun 			t4vf_free_vi(adapter, pi->viid);
3253*4882a593Smuzhiyun 		if (test_bit(pidx, &adapter->registered_device_map))
3254*4882a593Smuzhiyun 			unregister_netdev(netdev);
3255*4882a593Smuzhiyun 		free_netdev(netdev);
3256*4882a593Smuzhiyun 	}
3257*4882a593Smuzhiyun 
3258*4882a593Smuzhiyun 	if (!is_t4(adapter->params.chip))
3259*4882a593Smuzhiyun 		iounmap(adapter->bar2);
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun err_unmap_bar0:
3262*4882a593Smuzhiyun 	iounmap(adapter->regs);
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun err_free_adapter:
3265*4882a593Smuzhiyun 	kfree(adapter->mbox_log);
3266*4882a593Smuzhiyun 	kfree(adapter);
3267*4882a593Smuzhiyun 
3268*4882a593Smuzhiyun err_release_regions:
3269*4882a593Smuzhiyun 	pci_release_regions(pdev);
3270*4882a593Smuzhiyun 	pci_clear_master(pdev);
3271*4882a593Smuzhiyun 
3272*4882a593Smuzhiyun err_disable_device:
3273*4882a593Smuzhiyun 	pci_disable_device(pdev);
3274*4882a593Smuzhiyun 
3275*4882a593Smuzhiyun 	return err;
3276*4882a593Smuzhiyun }
3277*4882a593Smuzhiyun 
3278*4882a593Smuzhiyun /*
3279*4882a593Smuzhiyun  * "Remove" a device: tear down all kernel and driver state created in the
3280*4882a593Smuzhiyun  * "probe" routine and quiesce the device (disable interrupts, etc.).  (Note
3281*4882a593Smuzhiyun  * that this is called "remove_one" in the PF Driver.)
3282*4882a593Smuzhiyun  */
cxgb4vf_pci_remove(struct pci_dev * pdev)3283*4882a593Smuzhiyun static void cxgb4vf_pci_remove(struct pci_dev *pdev)
3284*4882a593Smuzhiyun {
3285*4882a593Smuzhiyun 	struct adapter *adapter = pci_get_drvdata(pdev);
3286*4882a593Smuzhiyun 	struct hash_mac_addr *entry, *tmp;
3287*4882a593Smuzhiyun 
3288*4882a593Smuzhiyun 	/*
3289*4882a593Smuzhiyun 	 * Tear down driver state associated with device.
3290*4882a593Smuzhiyun 	 */
3291*4882a593Smuzhiyun 	if (adapter) {
3292*4882a593Smuzhiyun 		int pidx;
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 		/*
3295*4882a593Smuzhiyun 		 * Stop all of our activity.  Unregister network port,
3296*4882a593Smuzhiyun 		 * disable interrupts, etc.
3297*4882a593Smuzhiyun 		 */
3298*4882a593Smuzhiyun 		for_each_port(adapter, pidx)
3299*4882a593Smuzhiyun 			if (test_bit(pidx, &adapter->registered_device_map))
3300*4882a593Smuzhiyun 				unregister_netdev(adapter->port[pidx]);
3301*4882a593Smuzhiyun 		t4vf_sge_stop(adapter);
3302*4882a593Smuzhiyun 		if (adapter->flags & CXGB4VF_USING_MSIX) {
3303*4882a593Smuzhiyun 			pci_disable_msix(adapter->pdev);
3304*4882a593Smuzhiyun 			adapter->flags &= ~CXGB4VF_USING_MSIX;
3305*4882a593Smuzhiyun 		} else if (adapter->flags & CXGB4VF_USING_MSI) {
3306*4882a593Smuzhiyun 			pci_disable_msi(adapter->pdev);
3307*4882a593Smuzhiyun 			adapter->flags &= ~CXGB4VF_USING_MSI;
3308*4882a593Smuzhiyun 		}
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 		/*
3311*4882a593Smuzhiyun 		 * Tear down our debugfs entries.
3312*4882a593Smuzhiyun 		 */
3313*4882a593Smuzhiyun 		if (!IS_ERR_OR_NULL(adapter->debugfs_root)) {
3314*4882a593Smuzhiyun 			cleanup_debugfs(adapter);
3315*4882a593Smuzhiyun 			debugfs_remove_recursive(adapter->debugfs_root);
3316*4882a593Smuzhiyun 		}
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 		/*
3319*4882a593Smuzhiyun 		 * Free all of the various resources which we've acquired ...
3320*4882a593Smuzhiyun 		 */
3321*4882a593Smuzhiyun 		t4vf_free_sge_resources(adapter);
3322*4882a593Smuzhiyun 		for_each_port(adapter, pidx) {
3323*4882a593Smuzhiyun 			struct net_device *netdev = adapter->port[pidx];
3324*4882a593Smuzhiyun 			struct port_info *pi;
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun 			if (netdev == NULL)
3327*4882a593Smuzhiyun 				continue;
3328*4882a593Smuzhiyun 
3329*4882a593Smuzhiyun 			pi = netdev_priv(netdev);
3330*4882a593Smuzhiyun 			if (pi->viid)
3331*4882a593Smuzhiyun 				t4vf_free_vi(adapter, pi->viid);
3332*4882a593Smuzhiyun 			free_netdev(netdev);
3333*4882a593Smuzhiyun 		}
3334*4882a593Smuzhiyun 		iounmap(adapter->regs);
3335*4882a593Smuzhiyun 		if (!is_t4(adapter->params.chip))
3336*4882a593Smuzhiyun 			iounmap(adapter->bar2);
3337*4882a593Smuzhiyun 		kfree(adapter->mbox_log);
3338*4882a593Smuzhiyun 		list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist,
3339*4882a593Smuzhiyun 					 list) {
3340*4882a593Smuzhiyun 			list_del(&entry->list);
3341*4882a593Smuzhiyun 			kfree(entry);
3342*4882a593Smuzhiyun 		}
3343*4882a593Smuzhiyun 		kfree(adapter);
3344*4882a593Smuzhiyun 	}
3345*4882a593Smuzhiyun 
3346*4882a593Smuzhiyun 	/*
3347*4882a593Smuzhiyun 	 * Disable the device and release its PCI resources.
3348*4882a593Smuzhiyun 	 */
3349*4882a593Smuzhiyun 	pci_disable_device(pdev);
3350*4882a593Smuzhiyun 	pci_clear_master(pdev);
3351*4882a593Smuzhiyun 	pci_release_regions(pdev);
3352*4882a593Smuzhiyun }
3353*4882a593Smuzhiyun 
3354*4882a593Smuzhiyun /*
3355*4882a593Smuzhiyun  * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt
3356*4882a593Smuzhiyun  * delivery.
3357*4882a593Smuzhiyun  */
cxgb4vf_pci_shutdown(struct pci_dev * pdev)3358*4882a593Smuzhiyun static void cxgb4vf_pci_shutdown(struct pci_dev *pdev)
3359*4882a593Smuzhiyun {
3360*4882a593Smuzhiyun 	struct adapter *adapter;
3361*4882a593Smuzhiyun 	int pidx;
3362*4882a593Smuzhiyun 
3363*4882a593Smuzhiyun 	adapter = pci_get_drvdata(pdev);
3364*4882a593Smuzhiyun 	if (!adapter)
3365*4882a593Smuzhiyun 		return;
3366*4882a593Smuzhiyun 
3367*4882a593Smuzhiyun 	/* Disable all Virtual Interfaces.  This will shut down the
3368*4882a593Smuzhiyun 	 * delivery of all ingress packets into the chip for these
3369*4882a593Smuzhiyun 	 * Virtual Interfaces.
3370*4882a593Smuzhiyun 	 */
3371*4882a593Smuzhiyun 	for_each_port(adapter, pidx)
3372*4882a593Smuzhiyun 		if (test_bit(pidx, &adapter->registered_device_map))
3373*4882a593Smuzhiyun 			unregister_netdev(adapter->port[pidx]);
3374*4882a593Smuzhiyun 
3375*4882a593Smuzhiyun 	/* Free up all Queues which will prevent further DMA and
3376*4882a593Smuzhiyun 	 * Interrupts allowing various internal pathways to drain.
3377*4882a593Smuzhiyun 	 */
3378*4882a593Smuzhiyun 	t4vf_sge_stop(adapter);
3379*4882a593Smuzhiyun 	if (adapter->flags & CXGB4VF_USING_MSIX) {
3380*4882a593Smuzhiyun 		pci_disable_msix(adapter->pdev);
3381*4882a593Smuzhiyun 		adapter->flags &= ~CXGB4VF_USING_MSIX;
3382*4882a593Smuzhiyun 	} else if (adapter->flags & CXGB4VF_USING_MSI) {
3383*4882a593Smuzhiyun 		pci_disable_msi(adapter->pdev);
3384*4882a593Smuzhiyun 		adapter->flags &= ~CXGB4VF_USING_MSI;
3385*4882a593Smuzhiyun 	}
3386*4882a593Smuzhiyun 
3387*4882a593Smuzhiyun 	/*
3388*4882a593Smuzhiyun 	 * Free up all Queues which will prevent further DMA and
3389*4882a593Smuzhiyun 	 * Interrupts allowing various internal pathways to drain.
3390*4882a593Smuzhiyun 	 */
3391*4882a593Smuzhiyun 	t4vf_free_sge_resources(adapter);
3392*4882a593Smuzhiyun 	pci_set_drvdata(pdev, NULL);
3393*4882a593Smuzhiyun }
3394*4882a593Smuzhiyun 
3395*4882a593Smuzhiyun /* Macros needed to support the PCI Device ID Table ...
3396*4882a593Smuzhiyun  */
3397*4882a593Smuzhiyun #define CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN \
3398*4882a593Smuzhiyun 	static const struct pci_device_id cxgb4vf_pci_tbl[] = {
3399*4882a593Smuzhiyun #define CH_PCI_DEVICE_ID_FUNCTION	0x8
3400*4882a593Smuzhiyun 
3401*4882a593Smuzhiyun #define CH_PCI_ID_TABLE_ENTRY(devid) \
3402*4882a593Smuzhiyun 		{ PCI_VDEVICE(CHELSIO, (devid)), 0 }
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun #define CH_PCI_DEVICE_ID_TABLE_DEFINE_END { 0, } }
3405*4882a593Smuzhiyun 
3406*4882a593Smuzhiyun #include "../cxgb4/t4_pci_id_tbl.h"
3407*4882a593Smuzhiyun 
3408*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESC);
3409*4882a593Smuzhiyun MODULE_AUTHOR("Chelsio Communications");
3410*4882a593Smuzhiyun MODULE_LICENSE("Dual BSD/GPL");
3411*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, cxgb4vf_pci_tbl);
3412*4882a593Smuzhiyun 
3413*4882a593Smuzhiyun static struct pci_driver cxgb4vf_driver = {
3414*4882a593Smuzhiyun 	.name		= KBUILD_MODNAME,
3415*4882a593Smuzhiyun 	.id_table	= cxgb4vf_pci_tbl,
3416*4882a593Smuzhiyun 	.probe		= cxgb4vf_pci_probe,
3417*4882a593Smuzhiyun 	.remove		= cxgb4vf_pci_remove,
3418*4882a593Smuzhiyun 	.shutdown	= cxgb4vf_pci_shutdown,
3419*4882a593Smuzhiyun };
3420*4882a593Smuzhiyun 
3421*4882a593Smuzhiyun /*
3422*4882a593Smuzhiyun  * Initialize global driver state.
3423*4882a593Smuzhiyun  */
cxgb4vf_module_init(void)3424*4882a593Smuzhiyun static int __init cxgb4vf_module_init(void)
3425*4882a593Smuzhiyun {
3426*4882a593Smuzhiyun 	int ret;
3427*4882a593Smuzhiyun 
3428*4882a593Smuzhiyun 	/*
3429*4882a593Smuzhiyun 	 * Vet our module parameters.
3430*4882a593Smuzhiyun 	 */
3431*4882a593Smuzhiyun 	if (msi != MSI_MSIX && msi != MSI_MSI) {
3432*4882a593Smuzhiyun 		pr_warn("bad module parameter msi=%d; must be %d (MSI-X or MSI) or %d (MSI)\n",
3433*4882a593Smuzhiyun 			msi, MSI_MSIX, MSI_MSI);
3434*4882a593Smuzhiyun 		return -EINVAL;
3435*4882a593Smuzhiyun 	}
3436*4882a593Smuzhiyun 
3437*4882a593Smuzhiyun 	/* Debugfs support is optional, debugfs will warn if this fails */
3438*4882a593Smuzhiyun 	cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3439*4882a593Smuzhiyun 
3440*4882a593Smuzhiyun 	ret = pci_register_driver(&cxgb4vf_driver);
3441*4882a593Smuzhiyun 	if (ret < 0)
3442*4882a593Smuzhiyun 		debugfs_remove(cxgb4vf_debugfs_root);
3443*4882a593Smuzhiyun 	return ret;
3444*4882a593Smuzhiyun }
3445*4882a593Smuzhiyun 
3446*4882a593Smuzhiyun /*
3447*4882a593Smuzhiyun  * Tear down global driver state.
3448*4882a593Smuzhiyun  */
cxgb4vf_module_exit(void)3449*4882a593Smuzhiyun static void __exit cxgb4vf_module_exit(void)
3450*4882a593Smuzhiyun {
3451*4882a593Smuzhiyun 	pci_unregister_driver(&cxgb4vf_driver);
3452*4882a593Smuzhiyun 	debugfs_remove(cxgb4vf_debugfs_root);
3453*4882a593Smuzhiyun }
3454*4882a593Smuzhiyun 
3455*4882a593Smuzhiyun module_init(cxgb4vf_module_init);
3456*4882a593Smuzhiyun module_exit(cxgb4vf_module_exit);
3457