xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/cisco/enic/enic_ethtool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright 2013 Cisco Systems, Inc.  All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This program is free software; you may redistribute it and/or modify
5*4882a593Smuzhiyun  * it under the terms of the GNU General Public License as published by
6*4882a593Smuzhiyun  * the Free Software Foundation; version 2 of the License.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
9*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
10*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
11*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
12*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
13*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
14*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
15*4882a593Smuzhiyun  * SOFTWARE.
16*4882a593Smuzhiyun  *
17*4882a593Smuzhiyun  */
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include <linux/ethtool.h>
21*4882a593Smuzhiyun #include <linux/net_tstamp.h>
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #include "enic_res.h"
24*4882a593Smuzhiyun #include "enic.h"
25*4882a593Smuzhiyun #include "enic_dev.h"
26*4882a593Smuzhiyun #include "enic_clsf.h"
27*4882a593Smuzhiyun #include "vnic_rss.h"
28*4882a593Smuzhiyun #include "vnic_stats.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun struct enic_stat {
31*4882a593Smuzhiyun 	char name[ETH_GSTRING_LEN];
32*4882a593Smuzhiyun 	unsigned int index;
33*4882a593Smuzhiyun };
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define ENIC_TX_STAT(stat) { \
36*4882a593Smuzhiyun 	.name = #stat, \
37*4882a593Smuzhiyun 	.index = offsetof(struct vnic_tx_stats, stat) / sizeof(u64) \
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define ENIC_RX_STAT(stat) { \
41*4882a593Smuzhiyun 	.name = #stat, \
42*4882a593Smuzhiyun 	.index = offsetof(struct vnic_rx_stats, stat) / sizeof(u64) \
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define ENIC_GEN_STAT(stat) { \
46*4882a593Smuzhiyun 	.name = #stat, \
47*4882a593Smuzhiyun 	.index = offsetof(struct vnic_gen_stats, stat) / sizeof(u64)\
48*4882a593Smuzhiyun }
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun static const struct enic_stat enic_tx_stats[] = {
51*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_frames_ok),
52*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_unicast_frames_ok),
53*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_multicast_frames_ok),
54*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_broadcast_frames_ok),
55*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_bytes_ok),
56*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_unicast_bytes_ok),
57*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_multicast_bytes_ok),
58*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_broadcast_bytes_ok),
59*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_drops),
60*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_errors),
61*4882a593Smuzhiyun 	ENIC_TX_STAT(tx_tso),
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun static const struct enic_stat enic_rx_stats[] = {
65*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_ok),
66*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_total),
67*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_unicast_frames_ok),
68*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_multicast_frames_ok),
69*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_broadcast_frames_ok),
70*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_bytes_ok),
71*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_unicast_bytes_ok),
72*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_multicast_bytes_ok),
73*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_broadcast_bytes_ok),
74*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_drop),
75*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_no_bufs),
76*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_errors),
77*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_rss),
78*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_crc_errors),
79*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_64),
80*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_127),
81*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_255),
82*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_511),
83*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_1023),
84*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_1518),
85*4882a593Smuzhiyun 	ENIC_RX_STAT(rx_frames_to_max),
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun static const struct enic_stat enic_gen_stats[] = {
89*4882a593Smuzhiyun 	ENIC_GEN_STAT(dma_map_error),
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
93*4882a593Smuzhiyun static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
94*4882a593Smuzhiyun static const unsigned int enic_n_gen_stats = ARRAY_SIZE(enic_gen_stats);
95*4882a593Smuzhiyun 
enic_intr_coal_set_rx(struct enic * enic,u32 timer)96*4882a593Smuzhiyun static void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	int i;
99*4882a593Smuzhiyun 	int intr;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	for (i = 0; i < enic->rq_count; i++) {
102*4882a593Smuzhiyun 		intr = enic_msix_rq_intr(enic, i);
103*4882a593Smuzhiyun 		vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
104*4882a593Smuzhiyun 	}
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
enic_get_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * ecmd)107*4882a593Smuzhiyun static int enic_get_ksettings(struct net_device *netdev,
108*4882a593Smuzhiyun 			      struct ethtool_link_ksettings *ecmd)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
111*4882a593Smuzhiyun 	struct ethtool_link_settings *base = &ecmd->base;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(ecmd, supported,
114*4882a593Smuzhiyun 					     10000baseT_Full);
115*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(ecmd, supported, FIBRE);
116*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(ecmd, advertising,
117*4882a593Smuzhiyun 					     10000baseT_Full);
118*4882a593Smuzhiyun 	ethtool_link_ksettings_add_link_mode(ecmd, advertising, FIBRE);
119*4882a593Smuzhiyun 	base->port = PORT_FIBRE;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	if (netif_carrier_ok(netdev)) {
122*4882a593Smuzhiyun 		base->speed = vnic_dev_port_speed(enic->vdev);
123*4882a593Smuzhiyun 		base->duplex = DUPLEX_FULL;
124*4882a593Smuzhiyun 	} else {
125*4882a593Smuzhiyun 		base->speed = SPEED_UNKNOWN;
126*4882a593Smuzhiyun 		base->duplex = DUPLEX_UNKNOWN;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	base->autoneg = AUTONEG_DISABLE;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	return 0;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
enic_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)134*4882a593Smuzhiyun static void enic_get_drvinfo(struct net_device *netdev,
135*4882a593Smuzhiyun 	struct ethtool_drvinfo *drvinfo)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
138*4882a593Smuzhiyun 	struct vnic_devcmd_fw_info *fw_info;
139*4882a593Smuzhiyun 	int err;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	err = enic_dev_fw_info(enic, &fw_info);
142*4882a593Smuzhiyun 	/* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
143*4882a593Smuzhiyun 	 * For other failures, like devcmd failure, we return previously
144*4882a593Smuzhiyun 	 * recorded info.
145*4882a593Smuzhiyun 	 */
146*4882a593Smuzhiyun 	if (err == -ENOMEM)
147*4882a593Smuzhiyun 		return;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
150*4882a593Smuzhiyun 	strlcpy(drvinfo->fw_version, fw_info->fw_version,
151*4882a593Smuzhiyun 		sizeof(drvinfo->fw_version));
152*4882a593Smuzhiyun 	strlcpy(drvinfo->bus_info, pci_name(enic->pdev),
153*4882a593Smuzhiyun 		sizeof(drvinfo->bus_info));
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
enic_get_strings(struct net_device * netdev,u32 stringset,u8 * data)156*4882a593Smuzhiyun static void enic_get_strings(struct net_device *netdev, u32 stringset,
157*4882a593Smuzhiyun 	u8 *data)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	unsigned int i;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	switch (stringset) {
162*4882a593Smuzhiyun 	case ETH_SS_STATS:
163*4882a593Smuzhiyun 		for (i = 0; i < enic_n_tx_stats; i++) {
164*4882a593Smuzhiyun 			memcpy(data, enic_tx_stats[i].name, ETH_GSTRING_LEN);
165*4882a593Smuzhiyun 			data += ETH_GSTRING_LEN;
166*4882a593Smuzhiyun 		}
167*4882a593Smuzhiyun 		for (i = 0; i < enic_n_rx_stats; i++) {
168*4882a593Smuzhiyun 			memcpy(data, enic_rx_stats[i].name, ETH_GSTRING_LEN);
169*4882a593Smuzhiyun 			data += ETH_GSTRING_LEN;
170*4882a593Smuzhiyun 		}
171*4882a593Smuzhiyun 		for (i = 0; i < enic_n_gen_stats; i++) {
172*4882a593Smuzhiyun 			memcpy(data, enic_gen_stats[i].name, ETH_GSTRING_LEN);
173*4882a593Smuzhiyun 			data += ETH_GSTRING_LEN;
174*4882a593Smuzhiyun 		}
175*4882a593Smuzhiyun 		break;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
enic_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)179*4882a593Smuzhiyun static void enic_get_ringparam(struct net_device *netdev,
180*4882a593Smuzhiyun 			       struct ethtool_ringparam *ring)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
183*4882a593Smuzhiyun 	struct vnic_enet_config *c = &enic->config;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	ring->rx_max_pending = ENIC_MAX_RQ_DESCS;
186*4882a593Smuzhiyun 	ring->rx_pending = c->rq_desc_count;
187*4882a593Smuzhiyun 	ring->tx_max_pending = ENIC_MAX_WQ_DESCS;
188*4882a593Smuzhiyun 	ring->tx_pending = c->wq_desc_count;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
enic_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)191*4882a593Smuzhiyun static int enic_set_ringparam(struct net_device *netdev,
192*4882a593Smuzhiyun 			      struct ethtool_ringparam *ring)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
195*4882a593Smuzhiyun 	struct vnic_enet_config *c = &enic->config;
196*4882a593Smuzhiyun 	int running = netif_running(netdev);
197*4882a593Smuzhiyun 	unsigned int rx_pending;
198*4882a593Smuzhiyun 	unsigned int tx_pending;
199*4882a593Smuzhiyun 	int err = 0;
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	if (ring->rx_mini_max_pending || ring->rx_mini_pending) {
202*4882a593Smuzhiyun 		netdev_info(netdev,
203*4882a593Smuzhiyun 			    "modifying mini ring params is not supported");
204*4882a593Smuzhiyun 		return -EINVAL;
205*4882a593Smuzhiyun 	}
206*4882a593Smuzhiyun 	if (ring->rx_jumbo_max_pending || ring->rx_jumbo_pending) {
207*4882a593Smuzhiyun 		netdev_info(netdev,
208*4882a593Smuzhiyun 			    "modifying jumbo ring params is not supported");
209*4882a593Smuzhiyun 		return -EINVAL;
210*4882a593Smuzhiyun 	}
211*4882a593Smuzhiyun 	rx_pending = c->rq_desc_count;
212*4882a593Smuzhiyun 	tx_pending = c->wq_desc_count;
213*4882a593Smuzhiyun 	if (ring->rx_pending > ENIC_MAX_RQ_DESCS ||
214*4882a593Smuzhiyun 	    ring->rx_pending < ENIC_MIN_RQ_DESCS) {
215*4882a593Smuzhiyun 		netdev_info(netdev, "rx pending (%u) not in range [%u,%u]",
216*4882a593Smuzhiyun 			    ring->rx_pending, ENIC_MIN_RQ_DESCS,
217*4882a593Smuzhiyun 			    ENIC_MAX_RQ_DESCS);
218*4882a593Smuzhiyun 		return -EINVAL;
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 	if (ring->tx_pending > ENIC_MAX_WQ_DESCS ||
221*4882a593Smuzhiyun 	    ring->tx_pending < ENIC_MIN_WQ_DESCS) {
222*4882a593Smuzhiyun 		netdev_info(netdev, "tx pending (%u) not in range [%u,%u]",
223*4882a593Smuzhiyun 			    ring->tx_pending, ENIC_MIN_WQ_DESCS,
224*4882a593Smuzhiyun 			    ENIC_MAX_WQ_DESCS);
225*4882a593Smuzhiyun 		return -EINVAL;
226*4882a593Smuzhiyun 	}
227*4882a593Smuzhiyun 	if (running)
228*4882a593Smuzhiyun 		dev_close(netdev);
229*4882a593Smuzhiyun 	c->rq_desc_count =
230*4882a593Smuzhiyun 		ring->rx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
231*4882a593Smuzhiyun 	c->wq_desc_count =
232*4882a593Smuzhiyun 		ring->tx_pending & 0xffffffe0; /* must be aligned to groups of 32 */
233*4882a593Smuzhiyun 	enic_free_vnic_resources(enic);
234*4882a593Smuzhiyun 	err = enic_alloc_vnic_resources(enic);
235*4882a593Smuzhiyun 	if (err) {
236*4882a593Smuzhiyun 		netdev_err(netdev,
237*4882a593Smuzhiyun 			   "Failed to alloc vNIC resources, aborting\n");
238*4882a593Smuzhiyun 		enic_free_vnic_resources(enic);
239*4882a593Smuzhiyun 		goto err_out;
240*4882a593Smuzhiyun 	}
241*4882a593Smuzhiyun 	enic_init_vnic_resources(enic);
242*4882a593Smuzhiyun 	if (running) {
243*4882a593Smuzhiyun 		err = dev_open(netdev, NULL);
244*4882a593Smuzhiyun 		if (err)
245*4882a593Smuzhiyun 			goto err_out;
246*4882a593Smuzhiyun 	}
247*4882a593Smuzhiyun 	return 0;
248*4882a593Smuzhiyun err_out:
249*4882a593Smuzhiyun 	c->rq_desc_count = rx_pending;
250*4882a593Smuzhiyun 	c->wq_desc_count = tx_pending;
251*4882a593Smuzhiyun 	return err;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun 
enic_get_sset_count(struct net_device * netdev,int sset)254*4882a593Smuzhiyun static int enic_get_sset_count(struct net_device *netdev, int sset)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	switch (sset) {
257*4882a593Smuzhiyun 	case ETH_SS_STATS:
258*4882a593Smuzhiyun 		return enic_n_tx_stats + enic_n_rx_stats + enic_n_gen_stats;
259*4882a593Smuzhiyun 	default:
260*4882a593Smuzhiyun 		return -EOPNOTSUPP;
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
enic_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)264*4882a593Smuzhiyun static void enic_get_ethtool_stats(struct net_device *netdev,
265*4882a593Smuzhiyun 	struct ethtool_stats *stats, u64 *data)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
268*4882a593Smuzhiyun 	struct vnic_stats *vstats;
269*4882a593Smuzhiyun 	unsigned int i;
270*4882a593Smuzhiyun 	int err;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	err = enic_dev_stats_dump(enic, &vstats);
273*4882a593Smuzhiyun 	/* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
274*4882a593Smuzhiyun 	 * For other failures, like devcmd failure, we return previously
275*4882a593Smuzhiyun 	 * recorded stats.
276*4882a593Smuzhiyun 	 */
277*4882a593Smuzhiyun 	if (err == -ENOMEM)
278*4882a593Smuzhiyun 		return;
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	for (i = 0; i < enic_n_tx_stats; i++)
281*4882a593Smuzhiyun 		*(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
282*4882a593Smuzhiyun 	for (i = 0; i < enic_n_rx_stats; i++)
283*4882a593Smuzhiyun 		*(data++) = ((u64 *)&vstats->rx)[enic_rx_stats[i].index];
284*4882a593Smuzhiyun 	for (i = 0; i < enic_n_gen_stats; i++)
285*4882a593Smuzhiyun 		*(data++) = ((u64 *)&enic->gen_stats)[enic_gen_stats[i].index];
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun 
enic_get_msglevel(struct net_device * netdev)288*4882a593Smuzhiyun static u32 enic_get_msglevel(struct net_device *netdev)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
291*4882a593Smuzhiyun 	return enic->msg_enable;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
enic_set_msglevel(struct net_device * netdev,u32 value)294*4882a593Smuzhiyun static void enic_set_msglevel(struct net_device *netdev, u32 value)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
297*4882a593Smuzhiyun 	enic->msg_enable = value;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun 
enic_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ecmd)300*4882a593Smuzhiyun static int enic_get_coalesce(struct net_device *netdev,
301*4882a593Smuzhiyun 	struct ethtool_coalesce *ecmd)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
304*4882a593Smuzhiyun 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
307*4882a593Smuzhiyun 		ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
308*4882a593Smuzhiyun 	ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
309*4882a593Smuzhiyun 	if (rxcoal->use_adaptive_rx_coalesce)
310*4882a593Smuzhiyun 		ecmd->use_adaptive_rx_coalesce = 1;
311*4882a593Smuzhiyun 	ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
312*4882a593Smuzhiyun 	ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	return 0;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun 
enic_coalesce_valid(struct enic * enic,struct ethtool_coalesce * ec)317*4882a593Smuzhiyun static int enic_coalesce_valid(struct enic *enic,
318*4882a593Smuzhiyun 			       struct ethtool_coalesce *ec)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
321*4882a593Smuzhiyun 	u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max,
322*4882a593Smuzhiyun 					   ec->rx_coalesce_usecs_high);
323*4882a593Smuzhiyun 	u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max,
324*4882a593Smuzhiyun 					  ec->rx_coalesce_usecs_low);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) &&
327*4882a593Smuzhiyun 	    ec->tx_coalesce_usecs)
328*4882a593Smuzhiyun 		return -EINVAL;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if ((ec->tx_coalesce_usecs > coalesce_usecs_max)	||
331*4882a593Smuzhiyun 	    (ec->rx_coalesce_usecs > coalesce_usecs_max)	||
332*4882a593Smuzhiyun 	    (ec->rx_coalesce_usecs_low > coalesce_usecs_max)	||
333*4882a593Smuzhiyun 	    (ec->rx_coalesce_usecs_high > coalesce_usecs_max))
334*4882a593Smuzhiyun 		netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n",
335*4882a593Smuzhiyun 			    coalesce_usecs_max);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	if (ec->rx_coalesce_usecs_high &&
338*4882a593Smuzhiyun 	    (rx_coalesce_usecs_high <
339*4882a593Smuzhiyun 	     rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
340*4882a593Smuzhiyun 		return -EINVAL;
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	return 0;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
enic_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ecmd)345*4882a593Smuzhiyun static int enic_set_coalesce(struct net_device *netdev,
346*4882a593Smuzhiyun 	struct ethtool_coalesce *ecmd)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
349*4882a593Smuzhiyun 	u32 tx_coalesce_usecs;
350*4882a593Smuzhiyun 	u32 rx_coalesce_usecs;
351*4882a593Smuzhiyun 	u32 rx_coalesce_usecs_low;
352*4882a593Smuzhiyun 	u32 rx_coalesce_usecs_high;
353*4882a593Smuzhiyun 	u32 coalesce_usecs_max;
354*4882a593Smuzhiyun 	unsigned int i, intr;
355*4882a593Smuzhiyun 	int ret;
356*4882a593Smuzhiyun 	struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	ret = enic_coalesce_valid(enic, ecmd);
359*4882a593Smuzhiyun 	if (ret)
360*4882a593Smuzhiyun 		return ret;
361*4882a593Smuzhiyun 	coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
362*4882a593Smuzhiyun 	tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
363*4882a593Smuzhiyun 				  coalesce_usecs_max);
364*4882a593Smuzhiyun 	rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
365*4882a593Smuzhiyun 				  coalesce_usecs_max);
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
368*4882a593Smuzhiyun 				      coalesce_usecs_max);
369*4882a593Smuzhiyun 	rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
370*4882a593Smuzhiyun 				       coalesce_usecs_max);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
373*4882a593Smuzhiyun 		for (i = 0; i < enic->wq_count; i++) {
374*4882a593Smuzhiyun 			intr = enic_msix_wq_intr(enic, i);
375*4882a593Smuzhiyun 			vnic_intr_coalescing_timer_set(&enic->intr[intr],
376*4882a593Smuzhiyun 						       tx_coalesce_usecs);
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 		enic->tx_coalesce_usecs = tx_coalesce_usecs;
379*4882a593Smuzhiyun 	}
380*4882a593Smuzhiyun 	rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce;
381*4882a593Smuzhiyun 	if (!rxcoal->use_adaptive_rx_coalesce)
382*4882a593Smuzhiyun 		enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
383*4882a593Smuzhiyun 	if (ecmd->rx_coalesce_usecs_high) {
384*4882a593Smuzhiyun 		rxcoal->range_end = rx_coalesce_usecs_high;
385*4882a593Smuzhiyun 		rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
386*4882a593Smuzhiyun 		rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
387*4882a593Smuzhiyun 						ENIC_AIC_LARGE_PKT_DIFF;
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	enic->rx_coalesce_usecs = rx_coalesce_usecs;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	return 0;
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
enic_grxclsrlall(struct enic * enic,struct ethtool_rxnfc * cmd,u32 * rule_locs)395*4882a593Smuzhiyun static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
396*4882a593Smuzhiyun 			    u32 *rule_locs)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun 	int j, ret = 0, cnt = 0;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	cmd->data = enic->rfs_h.max - enic->rfs_h.free;
401*4882a593Smuzhiyun 	for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
402*4882a593Smuzhiyun 		struct hlist_head *hhead;
403*4882a593Smuzhiyun 		struct hlist_node *tmp;
404*4882a593Smuzhiyun 		struct enic_rfs_fltr_node *n;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 		hhead = &enic->rfs_h.ht_head[j];
407*4882a593Smuzhiyun 		hlist_for_each_entry_safe(n, tmp, hhead, node) {
408*4882a593Smuzhiyun 			if (cnt == cmd->rule_cnt)
409*4882a593Smuzhiyun 				return -EMSGSIZE;
410*4882a593Smuzhiyun 			rule_locs[cnt] = n->fltr_id;
411*4882a593Smuzhiyun 			cnt++;
412*4882a593Smuzhiyun 		}
413*4882a593Smuzhiyun 	}
414*4882a593Smuzhiyun 	cmd->rule_cnt = cnt;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	return ret;
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun 
enic_grxclsrule(struct enic * enic,struct ethtool_rxnfc * cmd)419*4882a593Smuzhiyun static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun 	struct ethtool_rx_flow_spec *fsp =
422*4882a593Smuzhiyun 				(struct ethtool_rx_flow_spec *)&cmd->fs;
423*4882a593Smuzhiyun 	struct enic_rfs_fltr_node *n;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	n = htbl_fltr_search(enic, (u16)fsp->location);
426*4882a593Smuzhiyun 	if (!n)
427*4882a593Smuzhiyun 		return -EINVAL;
428*4882a593Smuzhiyun 	switch (n->keys.basic.ip_proto) {
429*4882a593Smuzhiyun 	case IPPROTO_TCP:
430*4882a593Smuzhiyun 		fsp->flow_type = TCP_V4_FLOW;
431*4882a593Smuzhiyun 		break;
432*4882a593Smuzhiyun 	case IPPROTO_UDP:
433*4882a593Smuzhiyun 		fsp->flow_type = UDP_V4_FLOW;
434*4882a593Smuzhiyun 		break;
435*4882a593Smuzhiyun 	default:
436*4882a593Smuzhiyun 		return -EINVAL;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	fsp->h_u.tcp_ip4_spec.ip4src = flow_get_u32_src(&n->keys);
440*4882a593Smuzhiyun 	fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	fsp->h_u.tcp_ip4_spec.ip4dst = flow_get_u32_dst(&n->keys);
443*4882a593Smuzhiyun 	fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	fsp->h_u.tcp_ip4_spec.psrc = n->keys.ports.src;
446*4882a593Smuzhiyun 	fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	fsp->h_u.tcp_ip4_spec.pdst = n->keys.ports.dst;
449*4882a593Smuzhiyun 	fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	fsp->ring_cookie = n->rq_id;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return 0;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
enic_get_rx_flow_hash(struct enic * enic,struct ethtool_rxnfc * cmd)456*4882a593Smuzhiyun static int enic_get_rx_flow_hash(struct enic *enic, struct ethtool_rxnfc *cmd)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	u8 rss_hash_type = 0;
459*4882a593Smuzhiyun 	cmd->data = 0;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	spin_lock_bh(&enic->devcmd_lock);
462*4882a593Smuzhiyun 	(void)vnic_dev_capable_rss_hash_type(enic->vdev, &rss_hash_type);
463*4882a593Smuzhiyun 	spin_unlock_bh(&enic->devcmd_lock);
464*4882a593Smuzhiyun 	switch (cmd->flow_type) {
465*4882a593Smuzhiyun 	case TCP_V6_FLOW:
466*4882a593Smuzhiyun 	case TCP_V4_FLOW:
467*4882a593Smuzhiyun 		cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3 |
468*4882a593Smuzhiyun 			     RXH_IP_SRC | RXH_IP_DST;
469*4882a593Smuzhiyun 		break;
470*4882a593Smuzhiyun 	case UDP_V6_FLOW:
471*4882a593Smuzhiyun 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
472*4882a593Smuzhiyun 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV6)
473*4882a593Smuzhiyun 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
474*4882a593Smuzhiyun 		break;
475*4882a593Smuzhiyun 	case UDP_V4_FLOW:
476*4882a593Smuzhiyun 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
477*4882a593Smuzhiyun 		if (rss_hash_type & NIC_CFG_RSS_HASH_TYPE_UDP_IPV4)
478*4882a593Smuzhiyun 			cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
479*4882a593Smuzhiyun 		break;
480*4882a593Smuzhiyun 	case SCTP_V4_FLOW:
481*4882a593Smuzhiyun 	case AH_ESP_V4_FLOW:
482*4882a593Smuzhiyun 	case AH_V4_FLOW:
483*4882a593Smuzhiyun 	case ESP_V4_FLOW:
484*4882a593Smuzhiyun 	case SCTP_V6_FLOW:
485*4882a593Smuzhiyun 	case AH_ESP_V6_FLOW:
486*4882a593Smuzhiyun 	case AH_V6_FLOW:
487*4882a593Smuzhiyun 	case ESP_V6_FLOW:
488*4882a593Smuzhiyun 	case IPV4_FLOW:
489*4882a593Smuzhiyun 	case IPV6_FLOW:
490*4882a593Smuzhiyun 		cmd->data |= RXH_IP_SRC | RXH_IP_DST;
491*4882a593Smuzhiyun 		break;
492*4882a593Smuzhiyun 	default:
493*4882a593Smuzhiyun 		return -EINVAL;
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
enic_get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * cmd,u32 * rule_locs)499*4882a593Smuzhiyun static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
500*4882a593Smuzhiyun 			  u32 *rule_locs)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(dev);
503*4882a593Smuzhiyun 	int ret = 0;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	switch (cmd->cmd) {
506*4882a593Smuzhiyun 	case ETHTOOL_GRXRINGS:
507*4882a593Smuzhiyun 		cmd->data = enic->rq_count;
508*4882a593Smuzhiyun 		break;
509*4882a593Smuzhiyun 	case ETHTOOL_GRXCLSRLCNT:
510*4882a593Smuzhiyun 		spin_lock_bh(&enic->rfs_h.lock);
511*4882a593Smuzhiyun 		cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
512*4882a593Smuzhiyun 		cmd->data = enic->rfs_h.max;
513*4882a593Smuzhiyun 		spin_unlock_bh(&enic->rfs_h.lock);
514*4882a593Smuzhiyun 		break;
515*4882a593Smuzhiyun 	case ETHTOOL_GRXCLSRLALL:
516*4882a593Smuzhiyun 		spin_lock_bh(&enic->rfs_h.lock);
517*4882a593Smuzhiyun 		ret = enic_grxclsrlall(enic, cmd, rule_locs);
518*4882a593Smuzhiyun 		spin_unlock_bh(&enic->rfs_h.lock);
519*4882a593Smuzhiyun 		break;
520*4882a593Smuzhiyun 	case ETHTOOL_GRXCLSRULE:
521*4882a593Smuzhiyun 		spin_lock_bh(&enic->rfs_h.lock);
522*4882a593Smuzhiyun 		ret = enic_grxclsrule(enic, cmd);
523*4882a593Smuzhiyun 		spin_unlock_bh(&enic->rfs_h.lock);
524*4882a593Smuzhiyun 		break;
525*4882a593Smuzhiyun 	case ETHTOOL_GRXFH:
526*4882a593Smuzhiyun 		ret = enic_get_rx_flow_hash(enic, cmd);
527*4882a593Smuzhiyun 		break;
528*4882a593Smuzhiyun 	default:
529*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
530*4882a593Smuzhiyun 		break;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	return ret;
534*4882a593Smuzhiyun }
535*4882a593Smuzhiyun 
enic_get_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,void * data)536*4882a593Smuzhiyun static int enic_get_tunable(struct net_device *dev,
537*4882a593Smuzhiyun 			    const struct ethtool_tunable *tuna, void *data)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(dev);
540*4882a593Smuzhiyun 	int ret = 0;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	switch (tuna->id) {
543*4882a593Smuzhiyun 	case ETHTOOL_RX_COPYBREAK:
544*4882a593Smuzhiyun 		*(u32 *)data = enic->rx_copybreak;
545*4882a593Smuzhiyun 		break;
546*4882a593Smuzhiyun 	default:
547*4882a593Smuzhiyun 		ret = -EINVAL;
548*4882a593Smuzhiyun 		break;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	return ret;
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun 
enic_set_tunable(struct net_device * dev,const struct ethtool_tunable * tuna,const void * data)554*4882a593Smuzhiyun static int enic_set_tunable(struct net_device *dev,
555*4882a593Smuzhiyun 			    const struct ethtool_tunable *tuna,
556*4882a593Smuzhiyun 			    const void *data)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(dev);
559*4882a593Smuzhiyun 	int ret = 0;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	switch (tuna->id) {
562*4882a593Smuzhiyun 	case ETHTOOL_RX_COPYBREAK:
563*4882a593Smuzhiyun 		enic->rx_copybreak = *(u32 *)data;
564*4882a593Smuzhiyun 		break;
565*4882a593Smuzhiyun 	default:
566*4882a593Smuzhiyun 		ret = -EINVAL;
567*4882a593Smuzhiyun 		break;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	return ret;
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun 
enic_get_rxfh_key_size(struct net_device * netdev)573*4882a593Smuzhiyun static u32 enic_get_rxfh_key_size(struct net_device *netdev)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	return ENIC_RSS_LEN;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
enic_get_rxfh(struct net_device * netdev,u32 * indir,u8 * hkey,u8 * hfunc)578*4882a593Smuzhiyun static int enic_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey,
579*4882a593Smuzhiyun 			 u8 *hfunc)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	if (hkey)
584*4882a593Smuzhiyun 		memcpy(hkey, enic->rss_key, ENIC_RSS_LEN);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (hfunc)
587*4882a593Smuzhiyun 		*hfunc = ETH_RSS_HASH_TOP;
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 	return 0;
590*4882a593Smuzhiyun }
591*4882a593Smuzhiyun 
enic_set_rxfh(struct net_device * netdev,const u32 * indir,const u8 * hkey,const u8 hfunc)592*4882a593Smuzhiyun static int enic_set_rxfh(struct net_device *netdev, const u32 *indir,
593*4882a593Smuzhiyun 			 const u8 *hkey, const u8 hfunc)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun 	struct enic *enic = netdev_priv(netdev);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if ((hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) ||
598*4882a593Smuzhiyun 	    indir)
599*4882a593Smuzhiyun 		return -EINVAL;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	if (hkey)
602*4882a593Smuzhiyun 		memcpy(enic->rss_key, hkey, ENIC_RSS_LEN);
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	return __enic_set_rsskey(enic);
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
enic_get_ts_info(struct net_device * netdev,struct ethtool_ts_info * info)607*4882a593Smuzhiyun static int enic_get_ts_info(struct net_device *netdev,
608*4882a593Smuzhiyun 			    struct ethtool_ts_info *info)
609*4882a593Smuzhiyun {
610*4882a593Smuzhiyun 	info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
611*4882a593Smuzhiyun 				SOF_TIMESTAMPING_RX_SOFTWARE |
612*4882a593Smuzhiyun 				SOF_TIMESTAMPING_SOFTWARE;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	return 0;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun static const struct ethtool_ops enic_ethtool_ops = {
618*4882a593Smuzhiyun 	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
619*4882a593Smuzhiyun 				     ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
620*4882a593Smuzhiyun 				     ETHTOOL_COALESCE_RX_USECS_LOW |
621*4882a593Smuzhiyun 				     ETHTOOL_COALESCE_RX_USECS_HIGH,
622*4882a593Smuzhiyun 	.get_drvinfo = enic_get_drvinfo,
623*4882a593Smuzhiyun 	.get_msglevel = enic_get_msglevel,
624*4882a593Smuzhiyun 	.set_msglevel = enic_set_msglevel,
625*4882a593Smuzhiyun 	.get_link = ethtool_op_get_link,
626*4882a593Smuzhiyun 	.get_strings = enic_get_strings,
627*4882a593Smuzhiyun 	.get_ringparam = enic_get_ringparam,
628*4882a593Smuzhiyun 	.set_ringparam = enic_set_ringparam,
629*4882a593Smuzhiyun 	.get_sset_count = enic_get_sset_count,
630*4882a593Smuzhiyun 	.get_ethtool_stats = enic_get_ethtool_stats,
631*4882a593Smuzhiyun 	.get_coalesce = enic_get_coalesce,
632*4882a593Smuzhiyun 	.set_coalesce = enic_set_coalesce,
633*4882a593Smuzhiyun 	.get_rxnfc = enic_get_rxnfc,
634*4882a593Smuzhiyun 	.get_tunable = enic_get_tunable,
635*4882a593Smuzhiyun 	.set_tunable = enic_set_tunable,
636*4882a593Smuzhiyun 	.get_rxfh_key_size = enic_get_rxfh_key_size,
637*4882a593Smuzhiyun 	.get_rxfh = enic_get_rxfh,
638*4882a593Smuzhiyun 	.set_rxfh = enic_set_rxfh,
639*4882a593Smuzhiyun 	.get_link_ksettings = enic_get_ksettings,
640*4882a593Smuzhiyun 	.get_ts_info = enic_get_ts_info,
641*4882a593Smuzhiyun };
642*4882a593Smuzhiyun 
enic_set_ethtool_ops(struct net_device * netdev)643*4882a593Smuzhiyun void enic_set_ethtool_ops(struct net_device *netdev)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	netdev->ethtool_ops = &enic_ethtool_ops;
646*4882a593Smuzhiyun }
647