xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/brocade/bna/bnad_ethtool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Linux network driver for QLogic BR-series Converged Network Adapter.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
7*4882a593Smuzhiyun  * Copyright (c) 2014-2015 QLogic Corporation
8*4882a593Smuzhiyun  * All rights reserved
9*4882a593Smuzhiyun  * www.qlogic.com
10*4882a593Smuzhiyun  */
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "cna.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/netdevice.h>
15*4882a593Smuzhiyun #include <linux/skbuff.h>
16*4882a593Smuzhiyun #include <linux/ethtool.h>
17*4882a593Smuzhiyun #include <linux/rtnetlink.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "bna.h"
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "bnad.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define BNAD_NUM_TXF_COUNTERS 12
24*4882a593Smuzhiyun #define BNAD_NUM_RXF_COUNTERS 10
25*4882a593Smuzhiyun #define BNAD_NUM_CQ_COUNTERS (3 + 5)
26*4882a593Smuzhiyun #define BNAD_NUM_RXQ_COUNTERS 7
27*4882a593Smuzhiyun #define BNAD_NUM_TXQ_COUNTERS 5
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static const char *bnad_net_stats_strings[] = {
30*4882a593Smuzhiyun 	"rx_packets",
31*4882a593Smuzhiyun 	"tx_packets",
32*4882a593Smuzhiyun 	"rx_bytes",
33*4882a593Smuzhiyun 	"tx_bytes",
34*4882a593Smuzhiyun 	"rx_errors",
35*4882a593Smuzhiyun 	"tx_errors",
36*4882a593Smuzhiyun 	"rx_dropped",
37*4882a593Smuzhiyun 	"tx_dropped",
38*4882a593Smuzhiyun 	"multicast",
39*4882a593Smuzhiyun 	"collisions",
40*4882a593Smuzhiyun 	"rx_length_errors",
41*4882a593Smuzhiyun 	"rx_crc_errors",
42*4882a593Smuzhiyun 	"rx_frame_errors",
43*4882a593Smuzhiyun 	"tx_fifo_errors",
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	"netif_queue_stop",
46*4882a593Smuzhiyun 	"netif_queue_wakeup",
47*4882a593Smuzhiyun 	"netif_queue_stopped",
48*4882a593Smuzhiyun 	"tso4",
49*4882a593Smuzhiyun 	"tso6",
50*4882a593Smuzhiyun 	"tso_err",
51*4882a593Smuzhiyun 	"tcpcsum_offload",
52*4882a593Smuzhiyun 	"udpcsum_offload",
53*4882a593Smuzhiyun 	"csum_help",
54*4882a593Smuzhiyun 	"tx_skb_too_short",
55*4882a593Smuzhiyun 	"tx_skb_stopping",
56*4882a593Smuzhiyun 	"tx_skb_max_vectors",
57*4882a593Smuzhiyun 	"tx_skb_mss_too_long",
58*4882a593Smuzhiyun 	"tx_skb_tso_too_short",
59*4882a593Smuzhiyun 	"tx_skb_tso_prepare",
60*4882a593Smuzhiyun 	"tx_skb_non_tso_too_long",
61*4882a593Smuzhiyun 	"tx_skb_tcp_hdr",
62*4882a593Smuzhiyun 	"tx_skb_udp_hdr",
63*4882a593Smuzhiyun 	"tx_skb_csum_err",
64*4882a593Smuzhiyun 	"tx_skb_headlen_too_long",
65*4882a593Smuzhiyun 	"tx_skb_headlen_zero",
66*4882a593Smuzhiyun 	"tx_skb_frag_zero",
67*4882a593Smuzhiyun 	"tx_skb_len_mismatch",
68*4882a593Smuzhiyun 	"tx_skb_map_failed",
69*4882a593Smuzhiyun 	"hw_stats_updates",
70*4882a593Smuzhiyun 	"netif_rx_dropped",
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	"link_toggle",
73*4882a593Smuzhiyun 	"cee_toggle",
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	"rxp_info_alloc_failed",
76*4882a593Smuzhiyun 	"mbox_intr_disabled",
77*4882a593Smuzhiyun 	"mbox_intr_enabled",
78*4882a593Smuzhiyun 	"tx_unmap_q_alloc_failed",
79*4882a593Smuzhiyun 	"rx_unmap_q_alloc_failed",
80*4882a593Smuzhiyun 	"rxbuf_alloc_failed",
81*4882a593Smuzhiyun 	"rxbuf_map_failed",
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	"mac_stats_clr_cnt",
84*4882a593Smuzhiyun 	"mac_frame_64",
85*4882a593Smuzhiyun 	"mac_frame_65_127",
86*4882a593Smuzhiyun 	"mac_frame_128_255",
87*4882a593Smuzhiyun 	"mac_frame_256_511",
88*4882a593Smuzhiyun 	"mac_frame_512_1023",
89*4882a593Smuzhiyun 	"mac_frame_1024_1518",
90*4882a593Smuzhiyun 	"mac_frame_1518_1522",
91*4882a593Smuzhiyun 	"mac_rx_bytes",
92*4882a593Smuzhiyun 	"mac_rx_packets",
93*4882a593Smuzhiyun 	"mac_rx_fcs_error",
94*4882a593Smuzhiyun 	"mac_rx_multicast",
95*4882a593Smuzhiyun 	"mac_rx_broadcast",
96*4882a593Smuzhiyun 	"mac_rx_control_frames",
97*4882a593Smuzhiyun 	"mac_rx_pause",
98*4882a593Smuzhiyun 	"mac_rx_unknown_opcode",
99*4882a593Smuzhiyun 	"mac_rx_alignment_error",
100*4882a593Smuzhiyun 	"mac_rx_frame_length_error",
101*4882a593Smuzhiyun 	"mac_rx_code_error",
102*4882a593Smuzhiyun 	"mac_rx_carrier_sense_error",
103*4882a593Smuzhiyun 	"mac_rx_undersize",
104*4882a593Smuzhiyun 	"mac_rx_oversize",
105*4882a593Smuzhiyun 	"mac_rx_fragments",
106*4882a593Smuzhiyun 	"mac_rx_jabber",
107*4882a593Smuzhiyun 	"mac_rx_drop",
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	"mac_tx_bytes",
110*4882a593Smuzhiyun 	"mac_tx_packets",
111*4882a593Smuzhiyun 	"mac_tx_multicast",
112*4882a593Smuzhiyun 	"mac_tx_broadcast",
113*4882a593Smuzhiyun 	"mac_tx_pause",
114*4882a593Smuzhiyun 	"mac_tx_deferral",
115*4882a593Smuzhiyun 	"mac_tx_excessive_deferral",
116*4882a593Smuzhiyun 	"mac_tx_single_collision",
117*4882a593Smuzhiyun 	"mac_tx_muliple_collision",
118*4882a593Smuzhiyun 	"mac_tx_late_collision",
119*4882a593Smuzhiyun 	"mac_tx_excessive_collision",
120*4882a593Smuzhiyun 	"mac_tx_total_collision",
121*4882a593Smuzhiyun 	"mac_tx_pause_honored",
122*4882a593Smuzhiyun 	"mac_tx_drop",
123*4882a593Smuzhiyun 	"mac_tx_jabber",
124*4882a593Smuzhiyun 	"mac_tx_fcs_error",
125*4882a593Smuzhiyun 	"mac_tx_control_frame",
126*4882a593Smuzhiyun 	"mac_tx_oversize",
127*4882a593Smuzhiyun 	"mac_tx_undersize",
128*4882a593Smuzhiyun 	"mac_tx_fragments",
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	"bpc_tx_pause_0",
131*4882a593Smuzhiyun 	"bpc_tx_pause_1",
132*4882a593Smuzhiyun 	"bpc_tx_pause_2",
133*4882a593Smuzhiyun 	"bpc_tx_pause_3",
134*4882a593Smuzhiyun 	"bpc_tx_pause_4",
135*4882a593Smuzhiyun 	"bpc_tx_pause_5",
136*4882a593Smuzhiyun 	"bpc_tx_pause_6",
137*4882a593Smuzhiyun 	"bpc_tx_pause_7",
138*4882a593Smuzhiyun 	"bpc_tx_zero_pause_0",
139*4882a593Smuzhiyun 	"bpc_tx_zero_pause_1",
140*4882a593Smuzhiyun 	"bpc_tx_zero_pause_2",
141*4882a593Smuzhiyun 	"bpc_tx_zero_pause_3",
142*4882a593Smuzhiyun 	"bpc_tx_zero_pause_4",
143*4882a593Smuzhiyun 	"bpc_tx_zero_pause_5",
144*4882a593Smuzhiyun 	"bpc_tx_zero_pause_6",
145*4882a593Smuzhiyun 	"bpc_tx_zero_pause_7",
146*4882a593Smuzhiyun 	"bpc_tx_first_pause_0",
147*4882a593Smuzhiyun 	"bpc_tx_first_pause_1",
148*4882a593Smuzhiyun 	"bpc_tx_first_pause_2",
149*4882a593Smuzhiyun 	"bpc_tx_first_pause_3",
150*4882a593Smuzhiyun 	"bpc_tx_first_pause_4",
151*4882a593Smuzhiyun 	"bpc_tx_first_pause_5",
152*4882a593Smuzhiyun 	"bpc_tx_first_pause_6",
153*4882a593Smuzhiyun 	"bpc_tx_first_pause_7",
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	"bpc_rx_pause_0",
156*4882a593Smuzhiyun 	"bpc_rx_pause_1",
157*4882a593Smuzhiyun 	"bpc_rx_pause_2",
158*4882a593Smuzhiyun 	"bpc_rx_pause_3",
159*4882a593Smuzhiyun 	"bpc_rx_pause_4",
160*4882a593Smuzhiyun 	"bpc_rx_pause_5",
161*4882a593Smuzhiyun 	"bpc_rx_pause_6",
162*4882a593Smuzhiyun 	"bpc_rx_pause_7",
163*4882a593Smuzhiyun 	"bpc_rx_zero_pause_0",
164*4882a593Smuzhiyun 	"bpc_rx_zero_pause_1",
165*4882a593Smuzhiyun 	"bpc_rx_zero_pause_2",
166*4882a593Smuzhiyun 	"bpc_rx_zero_pause_3",
167*4882a593Smuzhiyun 	"bpc_rx_zero_pause_4",
168*4882a593Smuzhiyun 	"bpc_rx_zero_pause_5",
169*4882a593Smuzhiyun 	"bpc_rx_zero_pause_6",
170*4882a593Smuzhiyun 	"bpc_rx_zero_pause_7",
171*4882a593Smuzhiyun 	"bpc_rx_first_pause_0",
172*4882a593Smuzhiyun 	"bpc_rx_first_pause_1",
173*4882a593Smuzhiyun 	"bpc_rx_first_pause_2",
174*4882a593Smuzhiyun 	"bpc_rx_first_pause_3",
175*4882a593Smuzhiyun 	"bpc_rx_first_pause_4",
176*4882a593Smuzhiyun 	"bpc_rx_first_pause_5",
177*4882a593Smuzhiyun 	"bpc_rx_first_pause_6",
178*4882a593Smuzhiyun 	"bpc_rx_first_pause_7",
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	"rad_rx_frames",
181*4882a593Smuzhiyun 	"rad_rx_octets",
182*4882a593Smuzhiyun 	"rad_rx_vlan_frames",
183*4882a593Smuzhiyun 	"rad_rx_ucast",
184*4882a593Smuzhiyun 	"rad_rx_ucast_octets",
185*4882a593Smuzhiyun 	"rad_rx_ucast_vlan",
186*4882a593Smuzhiyun 	"rad_rx_mcast",
187*4882a593Smuzhiyun 	"rad_rx_mcast_octets",
188*4882a593Smuzhiyun 	"rad_rx_mcast_vlan",
189*4882a593Smuzhiyun 	"rad_rx_bcast",
190*4882a593Smuzhiyun 	"rad_rx_bcast_octets",
191*4882a593Smuzhiyun 	"rad_rx_bcast_vlan",
192*4882a593Smuzhiyun 	"rad_rx_drops",
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	"rlb_rad_rx_frames",
195*4882a593Smuzhiyun 	"rlb_rad_rx_octets",
196*4882a593Smuzhiyun 	"rlb_rad_rx_vlan_frames",
197*4882a593Smuzhiyun 	"rlb_rad_rx_ucast",
198*4882a593Smuzhiyun 	"rlb_rad_rx_ucast_octets",
199*4882a593Smuzhiyun 	"rlb_rad_rx_ucast_vlan",
200*4882a593Smuzhiyun 	"rlb_rad_rx_mcast",
201*4882a593Smuzhiyun 	"rlb_rad_rx_mcast_octets",
202*4882a593Smuzhiyun 	"rlb_rad_rx_mcast_vlan",
203*4882a593Smuzhiyun 	"rlb_rad_rx_bcast",
204*4882a593Smuzhiyun 	"rlb_rad_rx_bcast_octets",
205*4882a593Smuzhiyun 	"rlb_rad_rx_bcast_vlan",
206*4882a593Smuzhiyun 	"rlb_rad_rx_drops",
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	"fc_rx_ucast_octets",
209*4882a593Smuzhiyun 	"fc_rx_ucast",
210*4882a593Smuzhiyun 	"fc_rx_ucast_vlan",
211*4882a593Smuzhiyun 	"fc_rx_mcast_octets",
212*4882a593Smuzhiyun 	"fc_rx_mcast",
213*4882a593Smuzhiyun 	"fc_rx_mcast_vlan",
214*4882a593Smuzhiyun 	"fc_rx_bcast_octets",
215*4882a593Smuzhiyun 	"fc_rx_bcast",
216*4882a593Smuzhiyun 	"fc_rx_bcast_vlan",
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	"fc_tx_ucast_octets",
219*4882a593Smuzhiyun 	"fc_tx_ucast",
220*4882a593Smuzhiyun 	"fc_tx_ucast_vlan",
221*4882a593Smuzhiyun 	"fc_tx_mcast_octets",
222*4882a593Smuzhiyun 	"fc_tx_mcast",
223*4882a593Smuzhiyun 	"fc_tx_mcast_vlan",
224*4882a593Smuzhiyun 	"fc_tx_bcast_octets",
225*4882a593Smuzhiyun 	"fc_tx_bcast",
226*4882a593Smuzhiyun 	"fc_tx_bcast_vlan",
227*4882a593Smuzhiyun 	"fc_tx_parity_errors",
228*4882a593Smuzhiyun 	"fc_tx_timeout",
229*4882a593Smuzhiyun 	"fc_tx_fid_parity_errors",
230*4882a593Smuzhiyun };
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun #define BNAD_ETHTOOL_STATS_NUM	ARRAY_SIZE(bnad_net_stats_strings)
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun static int
bnad_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)235*4882a593Smuzhiyun bnad_get_link_ksettings(struct net_device *netdev,
236*4882a593Smuzhiyun 			struct ethtool_link_ksettings *cmd)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	u32 supported, advertising;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	supported = SUPPORTED_10000baseT_Full;
241*4882a593Smuzhiyun 	advertising = ADVERTISED_10000baseT_Full;
242*4882a593Smuzhiyun 	cmd->base.autoneg = AUTONEG_DISABLE;
243*4882a593Smuzhiyun 	supported |= SUPPORTED_FIBRE;
244*4882a593Smuzhiyun 	advertising |= ADVERTISED_FIBRE;
245*4882a593Smuzhiyun 	cmd->base.port = PORT_FIBRE;
246*4882a593Smuzhiyun 	cmd->base.phy_address = 0;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (netif_carrier_ok(netdev)) {
249*4882a593Smuzhiyun 		cmd->base.speed = SPEED_10000;
250*4882a593Smuzhiyun 		cmd->base.duplex = DUPLEX_FULL;
251*4882a593Smuzhiyun 	} else {
252*4882a593Smuzhiyun 		cmd->base.speed = SPEED_UNKNOWN;
253*4882a593Smuzhiyun 		cmd->base.duplex = DUPLEX_UNKNOWN;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
257*4882a593Smuzhiyun 						supported);
258*4882a593Smuzhiyun 	ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
259*4882a593Smuzhiyun 						advertising);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun static int
bnad_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)265*4882a593Smuzhiyun bnad_set_link_ksettings(struct net_device *netdev,
266*4882a593Smuzhiyun 			const struct ethtool_link_ksettings *cmd)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	/* 10G full duplex setting supported only */
269*4882a593Smuzhiyun 	if (cmd->base.autoneg == AUTONEG_ENABLE)
270*4882a593Smuzhiyun 		return -EOPNOTSUPP;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if ((cmd->base.speed == SPEED_10000) &&
273*4882a593Smuzhiyun 	    (cmd->base.duplex == DUPLEX_FULL))
274*4882a593Smuzhiyun 		return 0;
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return -EOPNOTSUPP;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun static void
bnad_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)280*4882a593Smuzhiyun bnad_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
283*4882a593Smuzhiyun 	struct bfa_ioc_attr *ioc_attr;
284*4882a593Smuzhiyun 	unsigned long flags;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	strlcpy(drvinfo->driver, BNAD_NAME, sizeof(drvinfo->driver));
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	ioc_attr = kzalloc(sizeof(*ioc_attr), GFP_KERNEL);
289*4882a593Smuzhiyun 	if (ioc_attr) {
290*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
291*4882a593Smuzhiyun 		bfa_nw_ioc_get_attr(&bnad->bna.ioceth.ioc, ioc_attr);
292*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		strlcpy(drvinfo->fw_version, ioc_attr->adapter_attr.fw_ver,
295*4882a593Smuzhiyun 			sizeof(drvinfo->fw_version));
296*4882a593Smuzhiyun 		kfree(ioc_attr);
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	strlcpy(drvinfo->bus_info, pci_name(bnad->pcidev),
300*4882a593Smuzhiyun 		sizeof(drvinfo->bus_info));
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun static void
bnad_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wolinfo)304*4882a593Smuzhiyun bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun 	wolinfo->supported = 0;
307*4882a593Smuzhiyun 	wolinfo->wolopts = 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun static int
bnad_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * coalesce)311*4882a593Smuzhiyun bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
314*4882a593Smuzhiyun 	unsigned long flags;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* Lock rqd. to access bnad->bna_lock */
317*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
318*4882a593Smuzhiyun 	coalesce->use_adaptive_rx_coalesce =
319*4882a593Smuzhiyun 		(bnad->cfg_flags & BNAD_CF_DIM_ENABLED) ? true : false;
320*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	coalesce->rx_coalesce_usecs = bnad->rx_coalescing_timeo *
323*4882a593Smuzhiyun 					BFI_COALESCING_TIMER_UNIT;
324*4882a593Smuzhiyun 	coalesce->tx_coalesce_usecs = bnad->tx_coalescing_timeo *
325*4882a593Smuzhiyun 					BFI_COALESCING_TIMER_UNIT;
326*4882a593Smuzhiyun 	coalesce->tx_max_coalesced_frames = BFI_TX_INTERPKT_COUNT;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return 0;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun static int
bnad_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * coalesce)332*4882a593Smuzhiyun bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
335*4882a593Smuzhiyun 	unsigned long flags;
336*4882a593Smuzhiyun 	int to_del = 0;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (coalesce->rx_coalesce_usecs == 0 ||
339*4882a593Smuzhiyun 	    coalesce->rx_coalesce_usecs >
340*4882a593Smuzhiyun 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
341*4882a593Smuzhiyun 		return -EINVAL;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	if (coalesce->tx_coalesce_usecs == 0 ||
344*4882a593Smuzhiyun 	    coalesce->tx_coalesce_usecs >
345*4882a593Smuzhiyun 	    BFI_MAX_COALESCING_TIMEO * BFI_COALESCING_TIMER_UNIT)
346*4882a593Smuzhiyun 		return -EINVAL;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
349*4882a593Smuzhiyun 	/*
350*4882a593Smuzhiyun 	 * Do not need to store rx_coalesce_usecs here
351*4882a593Smuzhiyun 	 * Every time DIM is disabled, we can get it from the
352*4882a593Smuzhiyun 	 * stack.
353*4882a593Smuzhiyun 	 */
354*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
355*4882a593Smuzhiyun 	if (coalesce->use_adaptive_rx_coalesce) {
356*4882a593Smuzhiyun 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED)) {
357*4882a593Smuzhiyun 			bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
358*4882a593Smuzhiyun 			bnad_dim_timer_start(bnad);
359*4882a593Smuzhiyun 		}
360*4882a593Smuzhiyun 	} else {
361*4882a593Smuzhiyun 		if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED) {
362*4882a593Smuzhiyun 			bnad->cfg_flags &= ~BNAD_CF_DIM_ENABLED;
363*4882a593Smuzhiyun 			if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
364*4882a593Smuzhiyun 			    test_bit(BNAD_RF_DIM_TIMER_RUNNING,
365*4882a593Smuzhiyun 			    &bnad->run_flags)) {
366*4882a593Smuzhiyun 				clear_bit(BNAD_RF_DIM_TIMER_RUNNING,
367*4882a593Smuzhiyun 							&bnad->run_flags);
368*4882a593Smuzhiyun 				to_del = 1;
369*4882a593Smuzhiyun 			}
370*4882a593Smuzhiyun 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
371*4882a593Smuzhiyun 			if (to_del)
372*4882a593Smuzhiyun 				del_timer_sync(&bnad->dim_timer);
373*4882a593Smuzhiyun 			spin_lock_irqsave(&bnad->bna_lock, flags);
374*4882a593Smuzhiyun 			bnad_rx_coalescing_timeo_set(bnad);
375*4882a593Smuzhiyun 		}
376*4882a593Smuzhiyun 	}
377*4882a593Smuzhiyun 	if (bnad->tx_coalescing_timeo != coalesce->tx_coalesce_usecs /
378*4882a593Smuzhiyun 					BFI_COALESCING_TIMER_UNIT) {
379*4882a593Smuzhiyun 		bnad->tx_coalescing_timeo = coalesce->tx_coalesce_usecs /
380*4882a593Smuzhiyun 						BFI_COALESCING_TIMER_UNIT;
381*4882a593Smuzhiyun 		bnad_tx_coalescing_timeo_set(bnad);
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	if (bnad->rx_coalescing_timeo != coalesce->rx_coalesce_usecs /
385*4882a593Smuzhiyun 					BFI_COALESCING_TIMER_UNIT) {
386*4882a593Smuzhiyun 		bnad->rx_coalescing_timeo = coalesce->rx_coalesce_usecs /
387*4882a593Smuzhiyun 						BFI_COALESCING_TIMER_UNIT;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 		if (!(bnad->cfg_flags & BNAD_CF_DIM_ENABLED))
390*4882a593Smuzhiyun 			bnad_rx_coalescing_timeo_set(bnad);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	/* Add Tx Inter-pkt DMA count?  */
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
399*4882a593Smuzhiyun 	return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun static void
bnad_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ringparam)403*4882a593Smuzhiyun bnad_get_ringparam(struct net_device *netdev,
404*4882a593Smuzhiyun 		   struct ethtool_ringparam *ringparam)
405*4882a593Smuzhiyun {
406*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	ringparam->rx_max_pending = BNAD_MAX_RXQ_DEPTH;
409*4882a593Smuzhiyun 	ringparam->tx_max_pending = BNAD_MAX_TXQ_DEPTH;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	ringparam->rx_pending = bnad->rxq_depth;
412*4882a593Smuzhiyun 	ringparam->tx_pending = bnad->txq_depth;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun static int
bnad_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ringparam)416*4882a593Smuzhiyun bnad_set_ringparam(struct net_device *netdev,
417*4882a593Smuzhiyun 		   struct ethtool_ringparam *ringparam)
418*4882a593Smuzhiyun {
419*4882a593Smuzhiyun 	int i, current_err, err = 0;
420*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
421*4882a593Smuzhiyun 	unsigned long flags;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
424*4882a593Smuzhiyun 	if (ringparam->rx_pending == bnad->rxq_depth &&
425*4882a593Smuzhiyun 	    ringparam->tx_pending == bnad->txq_depth) {
426*4882a593Smuzhiyun 		mutex_unlock(&bnad->conf_mutex);
427*4882a593Smuzhiyun 		return 0;
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 	if (ringparam->rx_pending < BNAD_MIN_Q_DEPTH ||
431*4882a593Smuzhiyun 	    ringparam->rx_pending > BNAD_MAX_RXQ_DEPTH ||
432*4882a593Smuzhiyun 	    !is_power_of_2(ringparam->rx_pending)) {
433*4882a593Smuzhiyun 		mutex_unlock(&bnad->conf_mutex);
434*4882a593Smuzhiyun 		return -EINVAL;
435*4882a593Smuzhiyun 	}
436*4882a593Smuzhiyun 	if (ringparam->tx_pending < BNAD_MIN_Q_DEPTH ||
437*4882a593Smuzhiyun 	    ringparam->tx_pending > BNAD_MAX_TXQ_DEPTH ||
438*4882a593Smuzhiyun 	    !is_power_of_2(ringparam->tx_pending)) {
439*4882a593Smuzhiyun 		mutex_unlock(&bnad->conf_mutex);
440*4882a593Smuzhiyun 		return -EINVAL;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	if (ringparam->rx_pending != bnad->rxq_depth) {
444*4882a593Smuzhiyun 		bnad->rxq_depth = ringparam->rx_pending;
445*4882a593Smuzhiyun 		if (!netif_running(netdev)) {
446*4882a593Smuzhiyun 			mutex_unlock(&bnad->conf_mutex);
447*4882a593Smuzhiyun 			return 0;
448*4882a593Smuzhiyun 		}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		for (i = 0; i < bnad->num_rx; i++) {
451*4882a593Smuzhiyun 			if (!bnad->rx_info[i].rx)
452*4882a593Smuzhiyun 				continue;
453*4882a593Smuzhiyun 			bnad_destroy_rx(bnad, i);
454*4882a593Smuzhiyun 			current_err = bnad_setup_rx(bnad, i);
455*4882a593Smuzhiyun 			if (current_err && !err)
456*4882a593Smuzhiyun 				err = current_err;
457*4882a593Smuzhiyun 		}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 		if (!err && bnad->rx_info[0].rx) {
460*4882a593Smuzhiyun 			/* restore rx configuration */
461*4882a593Smuzhiyun 			bnad_restore_vlans(bnad, 0);
462*4882a593Smuzhiyun 			bnad_enable_default_bcast(bnad);
463*4882a593Smuzhiyun 			spin_lock_irqsave(&bnad->bna_lock, flags);
464*4882a593Smuzhiyun 			bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
465*4882a593Smuzhiyun 			spin_unlock_irqrestore(&bnad->bna_lock, flags);
466*4882a593Smuzhiyun 			bnad->cfg_flags &= ~(BNAD_CF_ALLMULTI |
467*4882a593Smuzhiyun 					     BNAD_CF_PROMISC);
468*4882a593Smuzhiyun 			bnad_set_rx_mode(netdev);
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 	}
471*4882a593Smuzhiyun 	if (ringparam->tx_pending != bnad->txq_depth) {
472*4882a593Smuzhiyun 		bnad->txq_depth = ringparam->tx_pending;
473*4882a593Smuzhiyun 		if (!netif_running(netdev)) {
474*4882a593Smuzhiyun 			mutex_unlock(&bnad->conf_mutex);
475*4882a593Smuzhiyun 			return 0;
476*4882a593Smuzhiyun 		}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 		for (i = 0; i < bnad->num_tx; i++) {
479*4882a593Smuzhiyun 			if (!bnad->tx_info[i].tx)
480*4882a593Smuzhiyun 				continue;
481*4882a593Smuzhiyun 			bnad_destroy_tx(bnad, i);
482*4882a593Smuzhiyun 			current_err = bnad_setup_tx(bnad, i);
483*4882a593Smuzhiyun 			if (current_err && !err)
484*4882a593Smuzhiyun 				err = current_err;
485*4882a593Smuzhiyun 		}
486*4882a593Smuzhiyun 	}
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
489*4882a593Smuzhiyun 	return err;
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun static void
bnad_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pauseparam)493*4882a593Smuzhiyun bnad_get_pauseparam(struct net_device *netdev,
494*4882a593Smuzhiyun 		    struct ethtool_pauseparam *pauseparam)
495*4882a593Smuzhiyun {
496*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun 	pauseparam->autoneg = 0;
499*4882a593Smuzhiyun 	pauseparam->rx_pause = bnad->bna.enet.pause_config.rx_pause;
500*4882a593Smuzhiyun 	pauseparam->tx_pause = bnad->bna.enet.pause_config.tx_pause;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun static int
bnad_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pauseparam)504*4882a593Smuzhiyun bnad_set_pauseparam(struct net_device *netdev,
505*4882a593Smuzhiyun 		    struct ethtool_pauseparam *pauseparam)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
508*4882a593Smuzhiyun 	struct bna_pause_config pause_config;
509*4882a593Smuzhiyun 	unsigned long flags;
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (pauseparam->autoneg == AUTONEG_ENABLE)
512*4882a593Smuzhiyun 		return -EINVAL;
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
515*4882a593Smuzhiyun 	if (pauseparam->rx_pause != bnad->bna.enet.pause_config.rx_pause ||
516*4882a593Smuzhiyun 	    pauseparam->tx_pause != bnad->bna.enet.pause_config.tx_pause) {
517*4882a593Smuzhiyun 		pause_config.rx_pause = pauseparam->rx_pause;
518*4882a593Smuzhiyun 		pause_config.tx_pause = pauseparam->tx_pause;
519*4882a593Smuzhiyun 		spin_lock_irqsave(&bnad->bna_lock, flags);
520*4882a593Smuzhiyun 		bna_enet_pause_config(&bnad->bna.enet, &pause_config);
521*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
524*4882a593Smuzhiyun 	return 0;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun static void
bnad_get_strings(struct net_device * netdev,u32 stringset,u8 * string)528*4882a593Smuzhiyun bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
531*4882a593Smuzhiyun 	int i, j, q_num;
532*4882a593Smuzhiyun 	u32 bmap;
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	switch (stringset) {
537*4882a593Smuzhiyun 	case ETH_SS_STATS:
538*4882a593Smuzhiyun 		for (i = 0; i < BNAD_ETHTOOL_STATS_NUM; i++) {
539*4882a593Smuzhiyun 			BUG_ON(!(strlen(bnad_net_stats_strings[i]) <
540*4882a593Smuzhiyun 				   ETH_GSTRING_LEN));
541*4882a593Smuzhiyun 			strncpy(string, bnad_net_stats_strings[i],
542*4882a593Smuzhiyun 				ETH_GSTRING_LEN);
543*4882a593Smuzhiyun 			string += ETH_GSTRING_LEN;
544*4882a593Smuzhiyun 		}
545*4882a593Smuzhiyun 		bmap = bna_tx_rid_mask(&bnad->bna);
546*4882a593Smuzhiyun 		for (i = 0; bmap; i++) {
547*4882a593Smuzhiyun 			if (bmap & 1) {
548*4882a593Smuzhiyun 				sprintf(string, "txf%d_ucast_octets", i);
549*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
550*4882a593Smuzhiyun 				sprintf(string, "txf%d_ucast", i);
551*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
552*4882a593Smuzhiyun 				sprintf(string, "txf%d_ucast_vlan", i);
553*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
554*4882a593Smuzhiyun 				sprintf(string, "txf%d_mcast_octets", i);
555*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
556*4882a593Smuzhiyun 				sprintf(string, "txf%d_mcast", i);
557*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
558*4882a593Smuzhiyun 				sprintf(string, "txf%d_mcast_vlan", i);
559*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
560*4882a593Smuzhiyun 				sprintf(string, "txf%d_bcast_octets", i);
561*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
562*4882a593Smuzhiyun 				sprintf(string, "txf%d_bcast", i);
563*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
564*4882a593Smuzhiyun 				sprintf(string, "txf%d_bcast_vlan", i);
565*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
566*4882a593Smuzhiyun 				sprintf(string, "txf%d_errors", i);
567*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
568*4882a593Smuzhiyun 				sprintf(string, "txf%d_filter_vlan", i);
569*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
570*4882a593Smuzhiyun 				sprintf(string, "txf%d_filter_mac_sa", i);
571*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
572*4882a593Smuzhiyun 			}
573*4882a593Smuzhiyun 			bmap >>= 1;
574*4882a593Smuzhiyun 		}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 		bmap = bna_rx_rid_mask(&bnad->bna);
577*4882a593Smuzhiyun 		for (i = 0; bmap; i++) {
578*4882a593Smuzhiyun 			if (bmap & 1) {
579*4882a593Smuzhiyun 				sprintf(string, "rxf%d_ucast_octets", i);
580*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
581*4882a593Smuzhiyun 				sprintf(string, "rxf%d_ucast", i);
582*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
583*4882a593Smuzhiyun 				sprintf(string, "rxf%d_ucast_vlan", i);
584*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
585*4882a593Smuzhiyun 				sprintf(string, "rxf%d_mcast_octets", i);
586*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
587*4882a593Smuzhiyun 				sprintf(string, "rxf%d_mcast", i);
588*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
589*4882a593Smuzhiyun 				sprintf(string, "rxf%d_mcast_vlan", i);
590*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
591*4882a593Smuzhiyun 				sprintf(string, "rxf%d_bcast_octets", i);
592*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
593*4882a593Smuzhiyun 				sprintf(string, "rxf%d_bcast", i);
594*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
595*4882a593Smuzhiyun 				sprintf(string, "rxf%d_bcast_vlan", i);
596*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
597*4882a593Smuzhiyun 				sprintf(string, "rxf%d_frame_drops", i);
598*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
599*4882a593Smuzhiyun 			}
600*4882a593Smuzhiyun 			bmap >>= 1;
601*4882a593Smuzhiyun 		}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 		q_num = 0;
604*4882a593Smuzhiyun 		for (i = 0; i < bnad->num_rx; i++) {
605*4882a593Smuzhiyun 			if (!bnad->rx_info[i].rx)
606*4882a593Smuzhiyun 				continue;
607*4882a593Smuzhiyun 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
608*4882a593Smuzhiyun 				sprintf(string, "cq%d_producer_index", q_num);
609*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
610*4882a593Smuzhiyun 				sprintf(string, "cq%d_consumer_index", q_num);
611*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
612*4882a593Smuzhiyun 				sprintf(string, "cq%d_hw_producer_index",
613*4882a593Smuzhiyun 					q_num);
614*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
615*4882a593Smuzhiyun 				sprintf(string, "cq%d_intr", q_num);
616*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
617*4882a593Smuzhiyun 				sprintf(string, "cq%d_poll", q_num);
618*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
619*4882a593Smuzhiyun 				sprintf(string, "cq%d_schedule", q_num);
620*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
621*4882a593Smuzhiyun 				sprintf(string, "cq%d_keep_poll", q_num);
622*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
623*4882a593Smuzhiyun 				sprintf(string, "cq%d_complete", q_num);
624*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
625*4882a593Smuzhiyun 				q_num++;
626*4882a593Smuzhiyun 			}
627*4882a593Smuzhiyun 		}
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		q_num = 0;
630*4882a593Smuzhiyun 		for (i = 0; i < bnad->num_rx; i++) {
631*4882a593Smuzhiyun 			if (!bnad->rx_info[i].rx)
632*4882a593Smuzhiyun 				continue;
633*4882a593Smuzhiyun 			for (j = 0; j < bnad->num_rxp_per_rx; j++) {
634*4882a593Smuzhiyun 				sprintf(string, "rxq%d_packets", q_num);
635*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
636*4882a593Smuzhiyun 				sprintf(string, "rxq%d_bytes", q_num);
637*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
638*4882a593Smuzhiyun 				sprintf(string, "rxq%d_packets_with_error",
639*4882a593Smuzhiyun 								q_num);
640*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
641*4882a593Smuzhiyun 				sprintf(string, "rxq%d_allocbuf_failed", q_num);
642*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
643*4882a593Smuzhiyun 				sprintf(string, "rxq%d_mapbuf_failed", q_num);
644*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
645*4882a593Smuzhiyun 				sprintf(string, "rxq%d_producer_index", q_num);
646*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
647*4882a593Smuzhiyun 				sprintf(string, "rxq%d_consumer_index", q_num);
648*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
649*4882a593Smuzhiyun 				q_num++;
650*4882a593Smuzhiyun 				if (bnad->rx_info[i].rx_ctrl[j].ccb &&
651*4882a593Smuzhiyun 					bnad->rx_info[i].rx_ctrl[j].ccb->
652*4882a593Smuzhiyun 					rcb[1] &&
653*4882a593Smuzhiyun 					bnad->rx_info[i].rx_ctrl[j].ccb->
654*4882a593Smuzhiyun 					rcb[1]->rxq) {
655*4882a593Smuzhiyun 					sprintf(string, "rxq%d_packets", q_num);
656*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
657*4882a593Smuzhiyun 					sprintf(string, "rxq%d_bytes", q_num);
658*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
659*4882a593Smuzhiyun 					sprintf(string,
660*4882a593Smuzhiyun 					"rxq%d_packets_with_error", q_num);
661*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
662*4882a593Smuzhiyun 					sprintf(string, "rxq%d_allocbuf_failed",
663*4882a593Smuzhiyun 								q_num);
664*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
665*4882a593Smuzhiyun 					sprintf(string, "rxq%d_mapbuf_failed",
666*4882a593Smuzhiyun 						q_num);
667*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
668*4882a593Smuzhiyun 					sprintf(string, "rxq%d_producer_index",
669*4882a593Smuzhiyun 								q_num);
670*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
671*4882a593Smuzhiyun 					sprintf(string, "rxq%d_consumer_index",
672*4882a593Smuzhiyun 								q_num);
673*4882a593Smuzhiyun 					string += ETH_GSTRING_LEN;
674*4882a593Smuzhiyun 					q_num++;
675*4882a593Smuzhiyun 				}
676*4882a593Smuzhiyun 			}
677*4882a593Smuzhiyun 		}
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		q_num = 0;
680*4882a593Smuzhiyun 		for (i = 0; i < bnad->num_tx; i++) {
681*4882a593Smuzhiyun 			if (!bnad->tx_info[i].tx)
682*4882a593Smuzhiyun 				continue;
683*4882a593Smuzhiyun 			for (j = 0; j < bnad->num_txq_per_tx; j++) {
684*4882a593Smuzhiyun 				sprintf(string, "txq%d_packets", q_num);
685*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
686*4882a593Smuzhiyun 				sprintf(string, "txq%d_bytes", q_num);
687*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
688*4882a593Smuzhiyun 				sprintf(string, "txq%d_producer_index", q_num);
689*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
690*4882a593Smuzhiyun 				sprintf(string, "txq%d_consumer_index", q_num);
691*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
692*4882a593Smuzhiyun 				sprintf(string, "txq%d_hw_consumer_index",
693*4882a593Smuzhiyun 									q_num);
694*4882a593Smuzhiyun 				string += ETH_GSTRING_LEN;
695*4882a593Smuzhiyun 				q_num++;
696*4882a593Smuzhiyun 			}
697*4882a593Smuzhiyun 		}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 		break;
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	default:
702*4882a593Smuzhiyun 		break;
703*4882a593Smuzhiyun 	}
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun static int
bnad_get_stats_count_locked(struct net_device * netdev)709*4882a593Smuzhiyun bnad_get_stats_count_locked(struct net_device *netdev)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
712*4882a593Smuzhiyun 	int i, j, count = 0, rxf_active_num = 0, txf_active_num = 0;
713*4882a593Smuzhiyun 	u32 bmap;
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	bmap = bna_tx_rid_mask(&bnad->bna);
716*4882a593Smuzhiyun 	for (i = 0; bmap; i++) {
717*4882a593Smuzhiyun 		if (bmap & 1)
718*4882a593Smuzhiyun 			txf_active_num++;
719*4882a593Smuzhiyun 		bmap >>= 1;
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun 	bmap = bna_rx_rid_mask(&bnad->bna);
722*4882a593Smuzhiyun 	for (i = 0; bmap; i++) {
723*4882a593Smuzhiyun 		if (bmap & 1)
724*4882a593Smuzhiyun 			rxf_active_num++;
725*4882a593Smuzhiyun 		bmap >>= 1;
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 	count = BNAD_ETHTOOL_STATS_NUM +
728*4882a593Smuzhiyun 		txf_active_num * BNAD_NUM_TXF_COUNTERS +
729*4882a593Smuzhiyun 		rxf_active_num * BNAD_NUM_RXF_COUNTERS;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
732*4882a593Smuzhiyun 		if (!bnad->rx_info[i].rx)
733*4882a593Smuzhiyun 			continue;
734*4882a593Smuzhiyun 		count += bnad->num_rxp_per_rx * BNAD_NUM_CQ_COUNTERS;
735*4882a593Smuzhiyun 		count += bnad->num_rxp_per_rx * BNAD_NUM_RXQ_COUNTERS;
736*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
737*4882a593Smuzhiyun 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
738*4882a593Smuzhiyun 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
739*4882a593Smuzhiyun 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1]->rxq)
740*4882a593Smuzhiyun 				count +=  BNAD_NUM_RXQ_COUNTERS;
741*4882a593Smuzhiyun 	}
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_tx; i++) {
744*4882a593Smuzhiyun 		if (!bnad->tx_info[i].tx)
745*4882a593Smuzhiyun 			continue;
746*4882a593Smuzhiyun 		count += bnad->num_txq_per_tx * BNAD_NUM_TXQ_COUNTERS;
747*4882a593Smuzhiyun 	}
748*4882a593Smuzhiyun 	return count;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun static int
bnad_per_q_stats_fill(struct bnad * bnad,u64 * buf,int bi)752*4882a593Smuzhiyun bnad_per_q_stats_fill(struct bnad *bnad, u64 *buf, int bi)
753*4882a593Smuzhiyun {
754*4882a593Smuzhiyun 	int i, j;
755*4882a593Smuzhiyun 	struct bna_rcb *rcb = NULL;
756*4882a593Smuzhiyun 	struct bna_tcb *tcb = NULL;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
759*4882a593Smuzhiyun 		if (!bnad->rx_info[i].rx)
760*4882a593Smuzhiyun 			continue;
761*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
762*4882a593Smuzhiyun 			if (bnad->rx_info[i].rx_ctrl[j].ccb &&
763*4882a593Smuzhiyun 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
764*4882a593Smuzhiyun 				bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0]->rxq) {
765*4882a593Smuzhiyun 				buf[bi++] = bnad->rx_info[i].rx_ctrl[j].
766*4882a593Smuzhiyun 						ccb->producer_index;
767*4882a593Smuzhiyun 				buf[bi++] = 0; /* ccb->consumer_index */
768*4882a593Smuzhiyun 				buf[bi++] = *(bnad->rx_info[i].rx_ctrl[j].
769*4882a593Smuzhiyun 						ccb->hw_producer_index);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 				buf[bi++] = bnad->rx_info[i].
772*4882a593Smuzhiyun 						rx_ctrl[j].rx_intr_ctr;
773*4882a593Smuzhiyun 				buf[bi++] = bnad->rx_info[i].
774*4882a593Smuzhiyun 						rx_ctrl[j].rx_poll_ctr;
775*4882a593Smuzhiyun 				buf[bi++] = bnad->rx_info[i].
776*4882a593Smuzhiyun 						rx_ctrl[j].rx_schedule;
777*4882a593Smuzhiyun 				buf[bi++] = bnad->rx_info[i].
778*4882a593Smuzhiyun 						rx_ctrl[j].rx_keep_poll;
779*4882a593Smuzhiyun 				buf[bi++] = bnad->rx_info[i].
780*4882a593Smuzhiyun 						rx_ctrl[j].rx_complete;
781*4882a593Smuzhiyun 			}
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_rx; i++) {
784*4882a593Smuzhiyun 		if (!bnad->rx_info[i].rx)
785*4882a593Smuzhiyun 			continue;
786*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_rxp_per_rx; j++)
787*4882a593Smuzhiyun 			if (bnad->rx_info[i].rx_ctrl[j].ccb) {
788*4882a593Smuzhiyun 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[0] &&
789*4882a593Smuzhiyun 					bnad->rx_info[i].rx_ctrl[j].ccb->
790*4882a593Smuzhiyun 					rcb[0]->rxq) {
791*4882a593Smuzhiyun 					rcb = bnad->rx_info[i].rx_ctrl[j].
792*4882a593Smuzhiyun 							ccb->rcb[0];
793*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->rx_packets;
794*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->rx_bytes;
795*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->
796*4882a593Smuzhiyun 							rx_packets_with_error;
797*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->
798*4882a593Smuzhiyun 							rxbuf_alloc_failed;
799*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
800*4882a593Smuzhiyun 					buf[bi++] = rcb->producer_index;
801*4882a593Smuzhiyun 					buf[bi++] = rcb->consumer_index;
802*4882a593Smuzhiyun 				}
803*4882a593Smuzhiyun 				if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
804*4882a593Smuzhiyun 					bnad->rx_info[i].rx_ctrl[j].ccb->
805*4882a593Smuzhiyun 					rcb[1]->rxq) {
806*4882a593Smuzhiyun 					rcb = bnad->rx_info[i].rx_ctrl[j].
807*4882a593Smuzhiyun 								ccb->rcb[1];
808*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->rx_packets;
809*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->rx_bytes;
810*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->
811*4882a593Smuzhiyun 							rx_packets_with_error;
812*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->
813*4882a593Smuzhiyun 							rxbuf_alloc_failed;
814*4882a593Smuzhiyun 					buf[bi++] = rcb->rxq->rxbuf_map_failed;
815*4882a593Smuzhiyun 					buf[bi++] = rcb->producer_index;
816*4882a593Smuzhiyun 					buf[bi++] = rcb->consumer_index;
817*4882a593Smuzhiyun 				}
818*4882a593Smuzhiyun 			}
819*4882a593Smuzhiyun 	}
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	for (i = 0; i < bnad->num_tx; i++) {
822*4882a593Smuzhiyun 		if (!bnad->tx_info[i].tx)
823*4882a593Smuzhiyun 			continue;
824*4882a593Smuzhiyun 		for (j = 0; j < bnad->num_txq_per_tx; j++)
825*4882a593Smuzhiyun 			if (bnad->tx_info[i].tcb[j] &&
826*4882a593Smuzhiyun 				bnad->tx_info[i].tcb[j]->txq) {
827*4882a593Smuzhiyun 				tcb = bnad->tx_info[i].tcb[j];
828*4882a593Smuzhiyun 				buf[bi++] = tcb->txq->tx_packets;
829*4882a593Smuzhiyun 				buf[bi++] = tcb->txq->tx_bytes;
830*4882a593Smuzhiyun 				buf[bi++] = tcb->producer_index;
831*4882a593Smuzhiyun 				buf[bi++] = tcb->consumer_index;
832*4882a593Smuzhiyun 				buf[bi++] = *(tcb->hw_consumer_index);
833*4882a593Smuzhiyun 			}
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	return bi;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun static void
bnad_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * buf)840*4882a593Smuzhiyun bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
841*4882a593Smuzhiyun 		       u64 *buf)
842*4882a593Smuzhiyun {
843*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
844*4882a593Smuzhiyun 	int i, j, bi = 0;
845*4882a593Smuzhiyun 	unsigned long flags;
846*4882a593Smuzhiyun 	struct rtnl_link_stats64 net_stats64;
847*4882a593Smuzhiyun 	u64 *stats64;
848*4882a593Smuzhiyun 	u32 bmap;
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	mutex_lock(&bnad->conf_mutex);
851*4882a593Smuzhiyun 	if (bnad_get_stats_count_locked(netdev) != stats->n_stats) {
852*4882a593Smuzhiyun 		mutex_unlock(&bnad->conf_mutex);
853*4882a593Smuzhiyun 		return;
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	/*
857*4882a593Smuzhiyun 	 * Used bna_lock to sync reads from bna_stats, which is written
858*4882a593Smuzhiyun 	 * under the same lock
859*4882a593Smuzhiyun 	 */
860*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
861*4882a593Smuzhiyun 
862*4882a593Smuzhiyun 	memset(&net_stats64, 0, sizeof(net_stats64));
863*4882a593Smuzhiyun 	bnad_netdev_qstats_fill(bnad, &net_stats64);
864*4882a593Smuzhiyun 	bnad_netdev_hwstats_fill(bnad, &net_stats64);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_packets;
867*4882a593Smuzhiyun 	buf[bi++] = net_stats64.tx_packets;
868*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_bytes;
869*4882a593Smuzhiyun 	buf[bi++] = net_stats64.tx_bytes;
870*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_errors;
871*4882a593Smuzhiyun 	buf[bi++] = net_stats64.tx_errors;
872*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_dropped;
873*4882a593Smuzhiyun 	buf[bi++] = net_stats64.tx_dropped;
874*4882a593Smuzhiyun 	buf[bi++] = net_stats64.multicast;
875*4882a593Smuzhiyun 	buf[bi++] = net_stats64.collisions;
876*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_length_errors;
877*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_crc_errors;
878*4882a593Smuzhiyun 	buf[bi++] = net_stats64.rx_frame_errors;
879*4882a593Smuzhiyun 	buf[bi++] = net_stats64.tx_fifo_errors;
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	/* Get netif_queue_stopped from stack */
882*4882a593Smuzhiyun 	bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	/* Fill driver stats into ethtool buffers */
885*4882a593Smuzhiyun 	stats64 = (u64 *)&bnad->stats.drv_stats;
886*4882a593Smuzhiyun 	for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
887*4882a593Smuzhiyun 		buf[bi++] = stats64[i];
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 	/* Fill hardware stats excluding the rxf/txf into ethtool bufs */
890*4882a593Smuzhiyun 	stats64 = (u64 *) &bnad->stats.bna_stats->hw_stats;
891*4882a593Smuzhiyun 	for (i = 0;
892*4882a593Smuzhiyun 	     i < offsetof(struct bfi_enet_stats, rxf_stats[0]) /
893*4882a593Smuzhiyun 		sizeof(u64);
894*4882a593Smuzhiyun 	     i++)
895*4882a593Smuzhiyun 		buf[bi++] = stats64[i];
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun 	/* Fill txf stats into ethtool buffers */
898*4882a593Smuzhiyun 	bmap = bna_tx_rid_mask(&bnad->bna);
899*4882a593Smuzhiyun 	for (i = 0; bmap; i++) {
900*4882a593Smuzhiyun 		if (bmap & 1) {
901*4882a593Smuzhiyun 			stats64 = (u64 *)&bnad->stats.bna_stats->
902*4882a593Smuzhiyun 						hw_stats.txf_stats[i];
903*4882a593Smuzhiyun 			for (j = 0; j < sizeof(struct bfi_enet_stats_txf) /
904*4882a593Smuzhiyun 					sizeof(u64); j++)
905*4882a593Smuzhiyun 				buf[bi++] = stats64[j];
906*4882a593Smuzhiyun 		}
907*4882a593Smuzhiyun 		bmap >>= 1;
908*4882a593Smuzhiyun 	}
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	/*  Fill rxf stats into ethtool buffers */
911*4882a593Smuzhiyun 	bmap = bna_rx_rid_mask(&bnad->bna);
912*4882a593Smuzhiyun 	for (i = 0; bmap; i++) {
913*4882a593Smuzhiyun 		if (bmap & 1) {
914*4882a593Smuzhiyun 			stats64 = (u64 *)&bnad->stats.bna_stats->
915*4882a593Smuzhiyun 						hw_stats.rxf_stats[i];
916*4882a593Smuzhiyun 			for (j = 0; j < sizeof(struct bfi_enet_stats_rxf) /
917*4882a593Smuzhiyun 					sizeof(u64); j++)
918*4882a593Smuzhiyun 				buf[bi++] = stats64[j];
919*4882a593Smuzhiyun 		}
920*4882a593Smuzhiyun 		bmap >>= 1;
921*4882a593Smuzhiyun 	}
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	/* Fill per Q stats into ethtool buffers */
924*4882a593Smuzhiyun 	bi = bnad_per_q_stats_fill(bnad, buf, bi);
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
927*4882a593Smuzhiyun 
928*4882a593Smuzhiyun 	mutex_unlock(&bnad->conf_mutex);
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun 
931*4882a593Smuzhiyun static int
bnad_get_sset_count(struct net_device * netdev,int sset)932*4882a593Smuzhiyun bnad_get_sset_count(struct net_device *netdev, int sset)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun 	switch (sset) {
935*4882a593Smuzhiyun 	case ETH_SS_STATS:
936*4882a593Smuzhiyun 		return bnad_get_stats_count_locked(netdev);
937*4882a593Smuzhiyun 	default:
938*4882a593Smuzhiyun 		return -EOPNOTSUPP;
939*4882a593Smuzhiyun 	}
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun 
942*4882a593Smuzhiyun static u32
bnad_get_flash_partition_by_offset(struct bnad * bnad,u32 offset,u32 * base_offset)943*4882a593Smuzhiyun bnad_get_flash_partition_by_offset(struct bnad *bnad, u32 offset,
944*4882a593Smuzhiyun 				u32 *base_offset)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun 	struct bfa_flash_attr *flash_attr;
947*4882a593Smuzhiyun 	struct bnad_iocmd_comp fcomp;
948*4882a593Smuzhiyun 	u32 i, flash_part = 0, ret;
949*4882a593Smuzhiyun 	unsigned long flags = 0;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	flash_attr = kzalloc(sizeof(struct bfa_flash_attr), GFP_KERNEL);
952*4882a593Smuzhiyun 	if (!flash_attr)
953*4882a593Smuzhiyun 		return 0;
954*4882a593Smuzhiyun 
955*4882a593Smuzhiyun 	fcomp.bnad = bnad;
956*4882a593Smuzhiyun 	fcomp.comp_status = 0;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	init_completion(&fcomp.comp);
959*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
960*4882a593Smuzhiyun 	ret = bfa_nw_flash_get_attr(&bnad->bna.flash, flash_attr,
961*4882a593Smuzhiyun 				bnad_cb_completion, &fcomp);
962*4882a593Smuzhiyun 	if (ret != BFA_STATUS_OK) {
963*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
964*4882a593Smuzhiyun 		kfree(flash_attr);
965*4882a593Smuzhiyun 		return 0;
966*4882a593Smuzhiyun 	}
967*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
968*4882a593Smuzhiyun 	wait_for_completion(&fcomp.comp);
969*4882a593Smuzhiyun 	ret = fcomp.comp_status;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/* Check for the flash type & base offset value */
972*4882a593Smuzhiyun 	if (ret == BFA_STATUS_OK) {
973*4882a593Smuzhiyun 		for (i = 0; i < flash_attr->npart; i++) {
974*4882a593Smuzhiyun 			if (offset >= flash_attr->part[i].part_off &&
975*4882a593Smuzhiyun 			    offset < (flash_attr->part[i].part_off +
976*4882a593Smuzhiyun 				      flash_attr->part[i].part_size)) {
977*4882a593Smuzhiyun 				flash_part = flash_attr->part[i].part_type;
978*4882a593Smuzhiyun 				*base_offset = flash_attr->part[i].part_off;
979*4882a593Smuzhiyun 				break;
980*4882a593Smuzhiyun 			}
981*4882a593Smuzhiyun 		}
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun 	kfree(flash_attr);
984*4882a593Smuzhiyun 	return flash_part;
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun static int
bnad_get_eeprom_len(struct net_device * netdev)988*4882a593Smuzhiyun bnad_get_eeprom_len(struct net_device *netdev)
989*4882a593Smuzhiyun {
990*4882a593Smuzhiyun 	return BFA_TOTAL_FLASH_SIZE;
991*4882a593Smuzhiyun }
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun static int
bnad_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)994*4882a593Smuzhiyun bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
995*4882a593Smuzhiyun 		u8 *bytes)
996*4882a593Smuzhiyun {
997*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
998*4882a593Smuzhiyun 	struct bnad_iocmd_comp fcomp;
999*4882a593Smuzhiyun 	u32 flash_part = 0, base_offset = 0;
1000*4882a593Smuzhiyun 	unsigned long flags = 0;
1001*4882a593Smuzhiyun 	int ret = 0;
1002*4882a593Smuzhiyun 
1003*4882a593Smuzhiyun 	/* Fill the magic value */
1004*4882a593Smuzhiyun 	eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun 	/* Query the flash partition based on the offset */
1007*4882a593Smuzhiyun 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1008*4882a593Smuzhiyun 				eeprom->offset, &base_offset);
1009*4882a593Smuzhiyun 	if (flash_part == 0)
1010*4882a593Smuzhiyun 		return -EFAULT;
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	fcomp.bnad = bnad;
1013*4882a593Smuzhiyun 	fcomp.comp_status = 0;
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	init_completion(&fcomp.comp);
1016*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1017*4882a593Smuzhiyun 	ret = bfa_nw_flash_read_part(&bnad->bna.flash, flash_part,
1018*4882a593Smuzhiyun 				bnad->id, bytes, eeprom->len,
1019*4882a593Smuzhiyun 				eeprom->offset - base_offset,
1020*4882a593Smuzhiyun 				bnad_cb_completion, &fcomp);
1021*4882a593Smuzhiyun 	if (ret != BFA_STATUS_OK) {
1022*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1023*4882a593Smuzhiyun 		goto done;
1024*4882a593Smuzhiyun 	}
1025*4882a593Smuzhiyun 
1026*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1027*4882a593Smuzhiyun 	wait_for_completion(&fcomp.comp);
1028*4882a593Smuzhiyun 	ret = fcomp.comp_status;
1029*4882a593Smuzhiyun done:
1030*4882a593Smuzhiyun 	return ret;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun 
1033*4882a593Smuzhiyun static int
bnad_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)1034*4882a593Smuzhiyun bnad_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
1035*4882a593Smuzhiyun 		u8 *bytes)
1036*4882a593Smuzhiyun {
1037*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
1038*4882a593Smuzhiyun 	struct bnad_iocmd_comp fcomp;
1039*4882a593Smuzhiyun 	u32 flash_part = 0, base_offset = 0;
1040*4882a593Smuzhiyun 	unsigned long flags = 0;
1041*4882a593Smuzhiyun 	int ret = 0;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	/* Check if the flash update request is valid */
1044*4882a593Smuzhiyun 	if (eeprom->magic != (bnad->pcidev->vendor |
1045*4882a593Smuzhiyun 			     (bnad->pcidev->device << 16)))
1046*4882a593Smuzhiyun 		return -EINVAL;
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun 	/* Query the flash partition based on the offset */
1049*4882a593Smuzhiyun 	flash_part = bnad_get_flash_partition_by_offset(bnad,
1050*4882a593Smuzhiyun 				eeprom->offset, &base_offset);
1051*4882a593Smuzhiyun 	if (flash_part == 0)
1052*4882a593Smuzhiyun 		return -EFAULT;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	fcomp.bnad = bnad;
1055*4882a593Smuzhiyun 	fcomp.comp_status = 0;
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	init_completion(&fcomp.comp);
1058*4882a593Smuzhiyun 	spin_lock_irqsave(&bnad->bna_lock, flags);
1059*4882a593Smuzhiyun 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, flash_part,
1060*4882a593Smuzhiyun 				bnad->id, bytes, eeprom->len,
1061*4882a593Smuzhiyun 				eeprom->offset - base_offset,
1062*4882a593Smuzhiyun 				bnad_cb_completion, &fcomp);
1063*4882a593Smuzhiyun 	if (ret != BFA_STATUS_OK) {
1064*4882a593Smuzhiyun 		spin_unlock_irqrestore(&bnad->bna_lock, flags);
1065*4882a593Smuzhiyun 		goto done;
1066*4882a593Smuzhiyun 	}
1067*4882a593Smuzhiyun 
1068*4882a593Smuzhiyun 	spin_unlock_irqrestore(&bnad->bna_lock, flags);
1069*4882a593Smuzhiyun 	wait_for_completion(&fcomp.comp);
1070*4882a593Smuzhiyun 	ret = fcomp.comp_status;
1071*4882a593Smuzhiyun done:
1072*4882a593Smuzhiyun 	return ret;
1073*4882a593Smuzhiyun }
1074*4882a593Smuzhiyun 
1075*4882a593Smuzhiyun static int
bnad_flash_device(struct net_device * netdev,struct ethtool_flash * eflash)1076*4882a593Smuzhiyun bnad_flash_device(struct net_device *netdev, struct ethtool_flash *eflash)
1077*4882a593Smuzhiyun {
1078*4882a593Smuzhiyun 	struct bnad *bnad = netdev_priv(netdev);
1079*4882a593Smuzhiyun 	struct bnad_iocmd_comp fcomp;
1080*4882a593Smuzhiyun 	const struct firmware *fw;
1081*4882a593Smuzhiyun 	int ret = 0;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	ret = request_firmware(&fw, eflash->data, &bnad->pcidev->dev);
1084*4882a593Smuzhiyun 	if (ret) {
1085*4882a593Smuzhiyun 		netdev_err(netdev, "can't load firmware %s\n", eflash->data);
1086*4882a593Smuzhiyun 		goto out;
1087*4882a593Smuzhiyun 	}
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	fcomp.bnad = bnad;
1090*4882a593Smuzhiyun 	fcomp.comp_status = 0;
1091*4882a593Smuzhiyun 
1092*4882a593Smuzhiyun 	init_completion(&fcomp.comp);
1093*4882a593Smuzhiyun 	spin_lock_irq(&bnad->bna_lock);
1094*4882a593Smuzhiyun 	ret = bfa_nw_flash_update_part(&bnad->bna.flash, BFA_FLASH_PART_FWIMG,
1095*4882a593Smuzhiyun 				bnad->id, (u8 *)fw->data, fw->size, 0,
1096*4882a593Smuzhiyun 				bnad_cb_completion, &fcomp);
1097*4882a593Smuzhiyun 	if (ret != BFA_STATUS_OK) {
1098*4882a593Smuzhiyun 		netdev_warn(netdev, "flash update failed with err=%d\n", ret);
1099*4882a593Smuzhiyun 		ret = -EIO;
1100*4882a593Smuzhiyun 		spin_unlock_irq(&bnad->bna_lock);
1101*4882a593Smuzhiyun 		goto out;
1102*4882a593Smuzhiyun 	}
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	spin_unlock_irq(&bnad->bna_lock);
1105*4882a593Smuzhiyun 	wait_for_completion(&fcomp.comp);
1106*4882a593Smuzhiyun 	if (fcomp.comp_status != BFA_STATUS_OK) {
1107*4882a593Smuzhiyun 		ret = -EIO;
1108*4882a593Smuzhiyun 		netdev_warn(netdev,
1109*4882a593Smuzhiyun 			    "firmware image update failed with err=%d\n",
1110*4882a593Smuzhiyun 			    fcomp.comp_status);
1111*4882a593Smuzhiyun 	}
1112*4882a593Smuzhiyun out:
1113*4882a593Smuzhiyun 	release_firmware(fw);
1114*4882a593Smuzhiyun 	return ret;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun static const struct ethtool_ops bnad_ethtool_ops = {
1118*4882a593Smuzhiyun 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
1119*4882a593Smuzhiyun 				     ETHTOOL_COALESCE_TX_MAX_FRAMES |
1120*4882a593Smuzhiyun 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX,
1121*4882a593Smuzhiyun 	.get_drvinfo = bnad_get_drvinfo,
1122*4882a593Smuzhiyun 	.get_wol = bnad_get_wol,
1123*4882a593Smuzhiyun 	.get_link = ethtool_op_get_link,
1124*4882a593Smuzhiyun 	.get_coalesce = bnad_get_coalesce,
1125*4882a593Smuzhiyun 	.set_coalesce = bnad_set_coalesce,
1126*4882a593Smuzhiyun 	.get_ringparam = bnad_get_ringparam,
1127*4882a593Smuzhiyun 	.set_ringparam = bnad_set_ringparam,
1128*4882a593Smuzhiyun 	.get_pauseparam = bnad_get_pauseparam,
1129*4882a593Smuzhiyun 	.set_pauseparam = bnad_set_pauseparam,
1130*4882a593Smuzhiyun 	.get_strings = bnad_get_strings,
1131*4882a593Smuzhiyun 	.get_ethtool_stats = bnad_get_ethtool_stats,
1132*4882a593Smuzhiyun 	.get_sset_count = bnad_get_sset_count,
1133*4882a593Smuzhiyun 	.get_eeprom_len = bnad_get_eeprom_len,
1134*4882a593Smuzhiyun 	.get_eeprom = bnad_get_eeprom,
1135*4882a593Smuzhiyun 	.set_eeprom = bnad_set_eeprom,
1136*4882a593Smuzhiyun 	.flash_device = bnad_flash_device,
1137*4882a593Smuzhiyun 	.get_ts_info = ethtool_op_get_ts_info,
1138*4882a593Smuzhiyun 	.get_link_ksettings = bnad_get_link_ksettings,
1139*4882a593Smuzhiyun 	.set_link_ksettings = bnad_set_link_ksettings,
1140*4882a593Smuzhiyun };
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun void
bnad_set_ethtool_ops(struct net_device * netdev)1143*4882a593Smuzhiyun bnad_set_ethtool_ops(struct net_device *netdev)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	netdev->ethtool_ops = &bnad_ethtool_ops;
1146*4882a593Smuzhiyun }
1147