1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright(c) 1999 - 2006 Intel Corporation. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun /* ethtool support for e1000 */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include "e1000.h"
7*4882a593Smuzhiyun #include <linux/jiffies.h>
8*4882a593Smuzhiyun #include <linux/uaccess.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun enum {NETDEV_STATS, E1000_STATS};
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun struct e1000_stats {
13*4882a593Smuzhiyun char stat_string[ETH_GSTRING_LEN];
14*4882a593Smuzhiyun int type;
15*4882a593Smuzhiyun int sizeof_stat;
16*4882a593Smuzhiyun int stat_offset;
17*4882a593Smuzhiyun };
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define E1000_STAT(m) E1000_STATS, \
20*4882a593Smuzhiyun sizeof(((struct e1000_adapter *)0)->m), \
21*4882a593Smuzhiyun offsetof(struct e1000_adapter, m)
22*4882a593Smuzhiyun #define E1000_NETDEV_STAT(m) NETDEV_STATS, \
23*4882a593Smuzhiyun sizeof(((struct net_device *)0)->m), \
24*4882a593Smuzhiyun offsetof(struct net_device, m)
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun static const struct e1000_stats e1000_gstrings_stats[] = {
27*4882a593Smuzhiyun { "rx_packets", E1000_STAT(stats.gprc) },
28*4882a593Smuzhiyun { "tx_packets", E1000_STAT(stats.gptc) },
29*4882a593Smuzhiyun { "rx_bytes", E1000_STAT(stats.gorcl) },
30*4882a593Smuzhiyun { "tx_bytes", E1000_STAT(stats.gotcl) },
31*4882a593Smuzhiyun { "rx_broadcast", E1000_STAT(stats.bprc) },
32*4882a593Smuzhiyun { "tx_broadcast", E1000_STAT(stats.bptc) },
33*4882a593Smuzhiyun { "rx_multicast", E1000_STAT(stats.mprc) },
34*4882a593Smuzhiyun { "tx_multicast", E1000_STAT(stats.mptc) },
35*4882a593Smuzhiyun { "rx_errors", E1000_STAT(stats.rxerrc) },
36*4882a593Smuzhiyun { "tx_errors", E1000_STAT(stats.txerrc) },
37*4882a593Smuzhiyun { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
38*4882a593Smuzhiyun { "multicast", E1000_STAT(stats.mprc) },
39*4882a593Smuzhiyun { "collisions", E1000_STAT(stats.colc) },
40*4882a593Smuzhiyun { "rx_length_errors", E1000_STAT(stats.rlerrc) },
41*4882a593Smuzhiyun { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
42*4882a593Smuzhiyun { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
43*4882a593Smuzhiyun { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
44*4882a593Smuzhiyun { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
45*4882a593Smuzhiyun { "rx_missed_errors", E1000_STAT(stats.mpc) },
46*4882a593Smuzhiyun { "tx_aborted_errors", E1000_STAT(stats.ecol) },
47*4882a593Smuzhiyun { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
48*4882a593Smuzhiyun { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
49*4882a593Smuzhiyun { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
50*4882a593Smuzhiyun { "tx_window_errors", E1000_STAT(stats.latecol) },
51*4882a593Smuzhiyun { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
52*4882a593Smuzhiyun { "tx_deferred_ok", E1000_STAT(stats.dc) },
53*4882a593Smuzhiyun { "tx_single_coll_ok", E1000_STAT(stats.scc) },
54*4882a593Smuzhiyun { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
55*4882a593Smuzhiyun { "tx_timeout_count", E1000_STAT(tx_timeout_count) },
56*4882a593Smuzhiyun { "tx_restart_queue", E1000_STAT(restart_queue) },
57*4882a593Smuzhiyun { "rx_long_length_errors", E1000_STAT(stats.roc) },
58*4882a593Smuzhiyun { "rx_short_length_errors", E1000_STAT(stats.ruc) },
59*4882a593Smuzhiyun { "rx_align_errors", E1000_STAT(stats.algnerrc) },
60*4882a593Smuzhiyun { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
61*4882a593Smuzhiyun { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
62*4882a593Smuzhiyun { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
63*4882a593Smuzhiyun { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
64*4882a593Smuzhiyun { "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
65*4882a593Smuzhiyun { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
66*4882a593Smuzhiyun { "rx_long_byte_count", E1000_STAT(stats.gorcl) },
67*4882a593Smuzhiyun { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
68*4882a593Smuzhiyun { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
69*4882a593Smuzhiyun { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
70*4882a593Smuzhiyun { "tx_smbus", E1000_STAT(stats.mgptc) },
71*4882a593Smuzhiyun { "rx_smbus", E1000_STAT(stats.mgprc) },
72*4882a593Smuzhiyun { "dropped_smbus", E1000_STAT(stats.mgpdc) },
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define E1000_QUEUE_STATS_LEN 0
76*4882a593Smuzhiyun #define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
77*4882a593Smuzhiyun #define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
78*4882a593Smuzhiyun static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
79*4882a593Smuzhiyun "Register test (offline)", "Eeprom test (offline)",
80*4882a593Smuzhiyun "Interrupt test (offline)", "Loopback test (offline)",
81*4882a593Smuzhiyun "Link test (on/offline)"
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun #define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
85*4882a593Smuzhiyun
e1000_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * cmd)86*4882a593Smuzhiyun static int e1000_get_link_ksettings(struct net_device *netdev,
87*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
90*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
91*4882a593Smuzhiyun u32 supported, advertising;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (hw->media_type == e1000_media_type_copper) {
94*4882a593Smuzhiyun supported = (SUPPORTED_10baseT_Half |
95*4882a593Smuzhiyun SUPPORTED_10baseT_Full |
96*4882a593Smuzhiyun SUPPORTED_100baseT_Half |
97*4882a593Smuzhiyun SUPPORTED_100baseT_Full |
98*4882a593Smuzhiyun SUPPORTED_1000baseT_Full|
99*4882a593Smuzhiyun SUPPORTED_Autoneg |
100*4882a593Smuzhiyun SUPPORTED_TP);
101*4882a593Smuzhiyun advertising = ADVERTISED_TP;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun if (hw->autoneg == 1) {
104*4882a593Smuzhiyun advertising |= ADVERTISED_Autoneg;
105*4882a593Smuzhiyun /* the e1000 autoneg seems to match ethtool nicely */
106*4882a593Smuzhiyun advertising |= hw->autoneg_advertised;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun cmd->base.port = PORT_TP;
110*4882a593Smuzhiyun cmd->base.phy_address = hw->phy_addr;
111*4882a593Smuzhiyun } else {
112*4882a593Smuzhiyun supported = (SUPPORTED_1000baseT_Full |
113*4882a593Smuzhiyun SUPPORTED_FIBRE |
114*4882a593Smuzhiyun SUPPORTED_Autoneg);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun advertising = (ADVERTISED_1000baseT_Full |
117*4882a593Smuzhiyun ADVERTISED_FIBRE |
118*4882a593Smuzhiyun ADVERTISED_Autoneg);
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun cmd->base.port = PORT_FIBRE;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun if (er32(STATUS) & E1000_STATUS_LU) {
124*4882a593Smuzhiyun e1000_get_speed_and_duplex(hw, &adapter->link_speed,
125*4882a593Smuzhiyun &adapter->link_duplex);
126*4882a593Smuzhiyun cmd->base.speed = adapter->link_speed;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* unfortunately FULL_DUPLEX != DUPLEX_FULL
129*4882a593Smuzhiyun * and HALF_DUPLEX != DUPLEX_HALF
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun if (adapter->link_duplex == FULL_DUPLEX)
132*4882a593Smuzhiyun cmd->base.duplex = DUPLEX_FULL;
133*4882a593Smuzhiyun else
134*4882a593Smuzhiyun cmd->base.duplex = DUPLEX_HALF;
135*4882a593Smuzhiyun } else {
136*4882a593Smuzhiyun cmd->base.speed = SPEED_UNKNOWN;
137*4882a593Smuzhiyun cmd->base.duplex = DUPLEX_UNKNOWN;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun cmd->base.autoneg = ((hw->media_type == e1000_media_type_fiber) ||
141*4882a593Smuzhiyun hw->autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun /* MDI-X => 1; MDI => 0 */
144*4882a593Smuzhiyun if ((hw->media_type == e1000_media_type_copper) &&
145*4882a593Smuzhiyun netif_carrier_ok(netdev))
146*4882a593Smuzhiyun cmd->base.eth_tp_mdix = (!!adapter->phy_info.mdix_mode ?
147*4882a593Smuzhiyun ETH_TP_MDI_X : ETH_TP_MDI);
148*4882a593Smuzhiyun else
149*4882a593Smuzhiyun cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (hw->mdix == AUTO_ALL_MODES)
152*4882a593Smuzhiyun cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
153*4882a593Smuzhiyun else
154*4882a593Smuzhiyun cmd->base.eth_tp_mdix_ctrl = hw->mdix;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
157*4882a593Smuzhiyun supported);
158*4882a593Smuzhiyun ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
159*4882a593Smuzhiyun advertising);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
e1000_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * cmd)164*4882a593Smuzhiyun static int e1000_set_link_ksettings(struct net_device *netdev,
165*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
168*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
169*4882a593Smuzhiyun u32 advertising;
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun ethtool_convert_link_mode_to_legacy_u32(&advertising,
172*4882a593Smuzhiyun cmd->link_modes.advertising);
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* MDI setting is only allowed when autoneg enabled because
175*4882a593Smuzhiyun * some hardware doesn't allow MDI setting when speed or
176*4882a593Smuzhiyun * duplex is forced.
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun if (cmd->base.eth_tp_mdix_ctrl) {
179*4882a593Smuzhiyun if (hw->media_type != e1000_media_type_copper)
180*4882a593Smuzhiyun return -EOPNOTSUPP;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if ((cmd->base.eth_tp_mdix_ctrl != ETH_TP_MDI_AUTO) &&
183*4882a593Smuzhiyun (cmd->base.autoneg != AUTONEG_ENABLE)) {
184*4882a593Smuzhiyun e_err(drv, "forcing MDI/MDI-X state is not supported when link speed and/or duplex are forced\n");
185*4882a593Smuzhiyun return -EINVAL;
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
190*4882a593Smuzhiyun msleep(1);
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun if (cmd->base.autoneg == AUTONEG_ENABLE) {
193*4882a593Smuzhiyun hw->autoneg = 1;
194*4882a593Smuzhiyun if (hw->media_type == e1000_media_type_fiber)
195*4882a593Smuzhiyun hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
196*4882a593Smuzhiyun ADVERTISED_FIBRE |
197*4882a593Smuzhiyun ADVERTISED_Autoneg;
198*4882a593Smuzhiyun else
199*4882a593Smuzhiyun hw->autoneg_advertised = advertising |
200*4882a593Smuzhiyun ADVERTISED_TP |
201*4882a593Smuzhiyun ADVERTISED_Autoneg;
202*4882a593Smuzhiyun } else {
203*4882a593Smuzhiyun u32 speed = cmd->base.speed;
204*4882a593Smuzhiyun /* calling this overrides forced MDI setting */
205*4882a593Smuzhiyun if (e1000_set_spd_dplx(adapter, speed, cmd->base.duplex)) {
206*4882a593Smuzhiyun clear_bit(__E1000_RESETTING, &adapter->flags);
207*4882a593Smuzhiyun return -EINVAL;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun /* MDI-X => 2; MDI => 1; Auto => 3 */
212*4882a593Smuzhiyun if (cmd->base.eth_tp_mdix_ctrl) {
213*4882a593Smuzhiyun if (cmd->base.eth_tp_mdix_ctrl == ETH_TP_MDI_AUTO)
214*4882a593Smuzhiyun hw->mdix = AUTO_ALL_MODES;
215*4882a593Smuzhiyun else
216*4882a593Smuzhiyun hw->mdix = cmd->base.eth_tp_mdix_ctrl;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* reset the link */
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (netif_running(adapter->netdev)) {
222*4882a593Smuzhiyun e1000_down(adapter);
223*4882a593Smuzhiyun e1000_up(adapter);
224*4882a593Smuzhiyun } else {
225*4882a593Smuzhiyun e1000_reset(adapter);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun clear_bit(__E1000_RESETTING, &adapter->flags);
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
e1000_get_link(struct net_device * netdev)231*4882a593Smuzhiyun static u32 e1000_get_link(struct net_device *netdev)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* If the link is not reported up to netdev, interrupts are disabled,
236*4882a593Smuzhiyun * and so the physical link state may have changed since we last
237*4882a593Smuzhiyun * looked. Set get_link_status to make sure that the true link
238*4882a593Smuzhiyun * state is interrogated, rather than pulling a cached and possibly
239*4882a593Smuzhiyun * stale link state from the driver.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun if (!netif_carrier_ok(netdev))
242*4882a593Smuzhiyun adapter->hw.get_link_status = 1;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun return e1000_has_link(adapter);
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
e1000_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)247*4882a593Smuzhiyun static void e1000_get_pauseparam(struct net_device *netdev,
248*4882a593Smuzhiyun struct ethtool_pauseparam *pause)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
251*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun pause->autoneg =
254*4882a593Smuzhiyun (adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun if (hw->fc == E1000_FC_RX_PAUSE) {
257*4882a593Smuzhiyun pause->rx_pause = 1;
258*4882a593Smuzhiyun } else if (hw->fc == E1000_FC_TX_PAUSE) {
259*4882a593Smuzhiyun pause->tx_pause = 1;
260*4882a593Smuzhiyun } else if (hw->fc == E1000_FC_FULL) {
261*4882a593Smuzhiyun pause->rx_pause = 1;
262*4882a593Smuzhiyun pause->tx_pause = 1;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
e1000_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)266*4882a593Smuzhiyun static int e1000_set_pauseparam(struct net_device *netdev,
267*4882a593Smuzhiyun struct ethtool_pauseparam *pause)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
270*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
271*4882a593Smuzhiyun int retval = 0;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun adapter->fc_autoneg = pause->autoneg;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
276*4882a593Smuzhiyun msleep(1);
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun if (pause->rx_pause && pause->tx_pause)
279*4882a593Smuzhiyun hw->fc = E1000_FC_FULL;
280*4882a593Smuzhiyun else if (pause->rx_pause && !pause->tx_pause)
281*4882a593Smuzhiyun hw->fc = E1000_FC_RX_PAUSE;
282*4882a593Smuzhiyun else if (!pause->rx_pause && pause->tx_pause)
283*4882a593Smuzhiyun hw->fc = E1000_FC_TX_PAUSE;
284*4882a593Smuzhiyun else if (!pause->rx_pause && !pause->tx_pause)
285*4882a593Smuzhiyun hw->fc = E1000_FC_NONE;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun hw->original_fc = hw->fc;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun if (adapter->fc_autoneg == AUTONEG_ENABLE) {
290*4882a593Smuzhiyun if (netif_running(adapter->netdev)) {
291*4882a593Smuzhiyun e1000_down(adapter);
292*4882a593Smuzhiyun e1000_up(adapter);
293*4882a593Smuzhiyun } else {
294*4882a593Smuzhiyun e1000_reset(adapter);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun } else
297*4882a593Smuzhiyun retval = ((hw->media_type == e1000_media_type_fiber) ?
298*4882a593Smuzhiyun e1000_setup_link(hw) : e1000_force_mac_fc(hw));
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun clear_bit(__E1000_RESETTING, &adapter->flags);
301*4882a593Smuzhiyun return retval;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
e1000_get_msglevel(struct net_device * netdev)304*4882a593Smuzhiyun static u32 e1000_get_msglevel(struct net_device *netdev)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return adapter->msg_enable;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
e1000_set_msglevel(struct net_device * netdev,u32 data)311*4882a593Smuzhiyun static void e1000_set_msglevel(struct net_device *netdev, u32 data)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun adapter->msg_enable = data;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
e1000_get_regs_len(struct net_device * netdev)318*4882a593Smuzhiyun static int e1000_get_regs_len(struct net_device *netdev)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun #define E1000_REGS_LEN 32
321*4882a593Smuzhiyun return E1000_REGS_LEN * sizeof(u32);
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun
e1000_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * p)324*4882a593Smuzhiyun static void e1000_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
325*4882a593Smuzhiyun void *p)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
328*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
329*4882a593Smuzhiyun u32 *regs_buff = p;
330*4882a593Smuzhiyun u16 phy_data;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun memset(p, 0, E1000_REGS_LEN * sizeof(u32));
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun regs_buff[0] = er32(CTRL);
337*4882a593Smuzhiyun regs_buff[1] = er32(STATUS);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun regs_buff[2] = er32(RCTL);
340*4882a593Smuzhiyun regs_buff[3] = er32(RDLEN);
341*4882a593Smuzhiyun regs_buff[4] = er32(RDH);
342*4882a593Smuzhiyun regs_buff[5] = er32(RDT);
343*4882a593Smuzhiyun regs_buff[6] = er32(RDTR);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun regs_buff[7] = er32(TCTL);
346*4882a593Smuzhiyun regs_buff[8] = er32(TDLEN);
347*4882a593Smuzhiyun regs_buff[9] = er32(TDH);
348*4882a593Smuzhiyun regs_buff[10] = er32(TDT);
349*4882a593Smuzhiyun regs_buff[11] = er32(TIDV);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun regs_buff[12] = hw->phy_type; /* PHY type (IGP=1, M88=0) */
352*4882a593Smuzhiyun if (hw->phy_type == e1000_phy_igp) {
353*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
354*4882a593Smuzhiyun IGP01E1000_PHY_AGC_A);
355*4882a593Smuzhiyun e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
356*4882a593Smuzhiyun IGP01E1000_PHY_PAGE_SELECT, &phy_data);
357*4882a593Smuzhiyun regs_buff[13] = (u32)phy_data; /* cable length */
358*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
359*4882a593Smuzhiyun IGP01E1000_PHY_AGC_B);
360*4882a593Smuzhiyun e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
361*4882a593Smuzhiyun IGP01E1000_PHY_PAGE_SELECT, &phy_data);
362*4882a593Smuzhiyun regs_buff[14] = (u32)phy_data; /* cable length */
363*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
364*4882a593Smuzhiyun IGP01E1000_PHY_AGC_C);
365*4882a593Smuzhiyun e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
366*4882a593Smuzhiyun IGP01E1000_PHY_PAGE_SELECT, &phy_data);
367*4882a593Smuzhiyun regs_buff[15] = (u32)phy_data; /* cable length */
368*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
369*4882a593Smuzhiyun IGP01E1000_PHY_AGC_D);
370*4882a593Smuzhiyun e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
371*4882a593Smuzhiyun IGP01E1000_PHY_PAGE_SELECT, &phy_data);
372*4882a593Smuzhiyun regs_buff[16] = (u32)phy_data; /* cable length */
373*4882a593Smuzhiyun regs_buff[17] = 0; /* extended 10bt distance (not needed) */
374*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
375*4882a593Smuzhiyun e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
376*4882a593Smuzhiyun IGP01E1000_PHY_PAGE_SELECT, &phy_data);
377*4882a593Smuzhiyun regs_buff[18] = (u32)phy_data; /* cable polarity */
378*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
379*4882a593Smuzhiyun IGP01E1000_PHY_PCS_INIT_REG);
380*4882a593Smuzhiyun e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
381*4882a593Smuzhiyun IGP01E1000_PHY_PAGE_SELECT, &phy_data);
382*4882a593Smuzhiyun regs_buff[19] = (u32)phy_data; /* cable polarity */
383*4882a593Smuzhiyun regs_buff[20] = 0; /* polarity correction enabled (always) */
384*4882a593Smuzhiyun regs_buff[22] = 0; /* phy receive errors (unavailable) */
385*4882a593Smuzhiyun regs_buff[23] = regs_buff[18]; /* mdix mode */
386*4882a593Smuzhiyun e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
387*4882a593Smuzhiyun } else {
388*4882a593Smuzhiyun e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
389*4882a593Smuzhiyun regs_buff[13] = (u32)phy_data; /* cable length */
390*4882a593Smuzhiyun regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
391*4882a593Smuzhiyun regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
392*4882a593Smuzhiyun regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
393*4882a593Smuzhiyun e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
394*4882a593Smuzhiyun regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
395*4882a593Smuzhiyun regs_buff[18] = regs_buff[13]; /* cable polarity */
396*4882a593Smuzhiyun regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
397*4882a593Smuzhiyun regs_buff[20] = regs_buff[17]; /* polarity correction */
398*4882a593Smuzhiyun /* phy receive errors */
399*4882a593Smuzhiyun regs_buff[22] = adapter->phy_stats.receive_errors;
400*4882a593Smuzhiyun regs_buff[23] = regs_buff[13]; /* mdix mode */
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
403*4882a593Smuzhiyun e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
404*4882a593Smuzhiyun regs_buff[24] = (u32)phy_data; /* phy local receiver status */
405*4882a593Smuzhiyun regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
406*4882a593Smuzhiyun if (hw->mac_type >= e1000_82540 &&
407*4882a593Smuzhiyun hw->media_type == e1000_media_type_copper) {
408*4882a593Smuzhiyun regs_buff[26] = er32(MANC);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
e1000_get_eeprom_len(struct net_device * netdev)412*4882a593Smuzhiyun static int e1000_get_eeprom_len(struct net_device *netdev)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
415*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return hw->eeprom.word_size * 2;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
e1000_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)420*4882a593Smuzhiyun static int e1000_get_eeprom(struct net_device *netdev,
421*4882a593Smuzhiyun struct ethtool_eeprom *eeprom, u8 *bytes)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
424*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
425*4882a593Smuzhiyun u16 *eeprom_buff;
426*4882a593Smuzhiyun int first_word, last_word;
427*4882a593Smuzhiyun int ret_val = 0;
428*4882a593Smuzhiyun u16 i;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (eeprom->len == 0)
431*4882a593Smuzhiyun return -EINVAL;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun eeprom->magic = hw->vendor_id | (hw->device_id << 16);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun first_word = eeprom->offset >> 1;
436*4882a593Smuzhiyun last_word = (eeprom->offset + eeprom->len - 1) >> 1;
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun eeprom_buff = kmalloc_array(last_word - first_word + 1, sizeof(u16),
439*4882a593Smuzhiyun GFP_KERNEL);
440*4882a593Smuzhiyun if (!eeprom_buff)
441*4882a593Smuzhiyun return -ENOMEM;
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun if (hw->eeprom.type == e1000_eeprom_spi)
444*4882a593Smuzhiyun ret_val = e1000_read_eeprom(hw, first_word,
445*4882a593Smuzhiyun last_word - first_word + 1,
446*4882a593Smuzhiyun eeprom_buff);
447*4882a593Smuzhiyun else {
448*4882a593Smuzhiyun for (i = 0; i < last_word - first_word + 1; i++) {
449*4882a593Smuzhiyun ret_val = e1000_read_eeprom(hw, first_word + i, 1,
450*4882a593Smuzhiyun &eeprom_buff[i]);
451*4882a593Smuzhiyun if (ret_val)
452*4882a593Smuzhiyun break;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun /* Device's eeprom is always little-endian, word addressable */
457*4882a593Smuzhiyun for (i = 0; i < last_word - first_word + 1; i++)
458*4882a593Smuzhiyun le16_to_cpus(&eeprom_buff[i]);
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
461*4882a593Smuzhiyun eeprom->len);
462*4882a593Smuzhiyun kfree(eeprom_buff);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun return ret_val;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
e1000_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * eeprom,u8 * bytes)467*4882a593Smuzhiyun static int e1000_set_eeprom(struct net_device *netdev,
468*4882a593Smuzhiyun struct ethtool_eeprom *eeprom, u8 *bytes)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
471*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
472*4882a593Smuzhiyun u16 *eeprom_buff;
473*4882a593Smuzhiyun void *ptr;
474*4882a593Smuzhiyun int max_len, first_word, last_word, ret_val = 0;
475*4882a593Smuzhiyun u16 i;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if (eeprom->len == 0)
478*4882a593Smuzhiyun return -EOPNOTSUPP;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
481*4882a593Smuzhiyun return -EFAULT;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun max_len = hw->eeprom.word_size * 2;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun first_word = eeprom->offset >> 1;
486*4882a593Smuzhiyun last_word = (eeprom->offset + eeprom->len - 1) >> 1;
487*4882a593Smuzhiyun eeprom_buff = kmalloc(max_len, GFP_KERNEL);
488*4882a593Smuzhiyun if (!eeprom_buff)
489*4882a593Smuzhiyun return -ENOMEM;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun ptr = (void *)eeprom_buff;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun if (eeprom->offset & 1) {
494*4882a593Smuzhiyun /* need read/modify/write of first changed EEPROM word
495*4882a593Smuzhiyun * only the second byte of the word is being modified
496*4882a593Smuzhiyun */
497*4882a593Smuzhiyun ret_val = e1000_read_eeprom(hw, first_word, 1,
498*4882a593Smuzhiyun &eeprom_buff[0]);
499*4882a593Smuzhiyun ptr++;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
502*4882a593Smuzhiyun /* need read/modify/write of last changed EEPROM word
503*4882a593Smuzhiyun * only the first byte of the word is being modified
504*4882a593Smuzhiyun */
505*4882a593Smuzhiyun ret_val = e1000_read_eeprom(hw, last_word, 1,
506*4882a593Smuzhiyun &eeprom_buff[last_word - first_word]);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun /* Device's eeprom is always little-endian, word addressable */
510*4882a593Smuzhiyun for (i = 0; i < last_word - first_word + 1; i++)
511*4882a593Smuzhiyun le16_to_cpus(&eeprom_buff[i]);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun memcpy(ptr, bytes, eeprom->len);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun for (i = 0; i < last_word - first_word + 1; i++)
516*4882a593Smuzhiyun eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun ret_val = e1000_write_eeprom(hw, first_word,
519*4882a593Smuzhiyun last_word - first_word + 1, eeprom_buff);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* Update the checksum over the first part of the EEPROM if needed */
522*4882a593Smuzhiyun if ((ret_val == 0) && (first_word <= EEPROM_CHECKSUM_REG))
523*4882a593Smuzhiyun e1000_update_eeprom_checksum(hw);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun kfree(eeprom_buff);
526*4882a593Smuzhiyun return ret_val;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
e1000_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * drvinfo)529*4882a593Smuzhiyun static void e1000_get_drvinfo(struct net_device *netdev,
530*4882a593Smuzhiyun struct ethtool_drvinfo *drvinfo)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun strlcpy(drvinfo->driver, e1000_driver_name,
535*4882a593Smuzhiyun sizeof(drvinfo->driver));
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
538*4882a593Smuzhiyun sizeof(drvinfo->bus_info));
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
e1000_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)541*4882a593Smuzhiyun static void e1000_get_ringparam(struct net_device *netdev,
542*4882a593Smuzhiyun struct ethtool_ringparam *ring)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
545*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
546*4882a593Smuzhiyun e1000_mac_type mac_type = hw->mac_type;
547*4882a593Smuzhiyun struct e1000_tx_ring *txdr = adapter->tx_ring;
548*4882a593Smuzhiyun struct e1000_rx_ring *rxdr = adapter->rx_ring;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
551*4882a593Smuzhiyun E1000_MAX_82544_RXD;
552*4882a593Smuzhiyun ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
553*4882a593Smuzhiyun E1000_MAX_82544_TXD;
554*4882a593Smuzhiyun ring->rx_pending = rxdr->count;
555*4882a593Smuzhiyun ring->tx_pending = txdr->count;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
e1000_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring)558*4882a593Smuzhiyun static int e1000_set_ringparam(struct net_device *netdev,
559*4882a593Smuzhiyun struct ethtool_ringparam *ring)
560*4882a593Smuzhiyun {
561*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
562*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
563*4882a593Smuzhiyun e1000_mac_type mac_type = hw->mac_type;
564*4882a593Smuzhiyun struct e1000_tx_ring *txdr, *tx_old;
565*4882a593Smuzhiyun struct e1000_rx_ring *rxdr, *rx_old;
566*4882a593Smuzhiyun int i, err;
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
569*4882a593Smuzhiyun return -EINVAL;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
572*4882a593Smuzhiyun msleep(1);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (netif_running(adapter->netdev))
575*4882a593Smuzhiyun e1000_down(adapter);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun tx_old = adapter->tx_ring;
578*4882a593Smuzhiyun rx_old = adapter->rx_ring;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun err = -ENOMEM;
581*4882a593Smuzhiyun txdr = kcalloc(adapter->num_tx_queues, sizeof(struct e1000_tx_ring),
582*4882a593Smuzhiyun GFP_KERNEL);
583*4882a593Smuzhiyun if (!txdr)
584*4882a593Smuzhiyun goto err_alloc_tx;
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun rxdr = kcalloc(adapter->num_rx_queues, sizeof(struct e1000_rx_ring),
587*4882a593Smuzhiyun GFP_KERNEL);
588*4882a593Smuzhiyun if (!rxdr)
589*4882a593Smuzhiyun goto err_alloc_rx;
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun adapter->tx_ring = txdr;
592*4882a593Smuzhiyun adapter->rx_ring = rxdr;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun rxdr->count = max(ring->rx_pending, (u32)E1000_MIN_RXD);
595*4882a593Smuzhiyun rxdr->count = min(rxdr->count, (u32)(mac_type < e1000_82544 ?
596*4882a593Smuzhiyun E1000_MAX_RXD : E1000_MAX_82544_RXD));
597*4882a593Smuzhiyun rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
598*4882a593Smuzhiyun txdr->count = max(ring->tx_pending, (u32)E1000_MIN_TXD);
599*4882a593Smuzhiyun txdr->count = min(txdr->count, (u32)(mac_type < e1000_82544 ?
600*4882a593Smuzhiyun E1000_MAX_TXD : E1000_MAX_82544_TXD));
601*4882a593Smuzhiyun txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun for (i = 0; i < adapter->num_tx_queues; i++)
604*4882a593Smuzhiyun txdr[i].count = txdr->count;
605*4882a593Smuzhiyun for (i = 0; i < adapter->num_rx_queues; i++)
606*4882a593Smuzhiyun rxdr[i].count = rxdr->count;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun err = 0;
609*4882a593Smuzhiyun if (netif_running(adapter->netdev)) {
610*4882a593Smuzhiyun /* Try to get new resources before deleting old */
611*4882a593Smuzhiyun err = e1000_setup_all_rx_resources(adapter);
612*4882a593Smuzhiyun if (err)
613*4882a593Smuzhiyun goto err_setup_rx;
614*4882a593Smuzhiyun err = e1000_setup_all_tx_resources(adapter);
615*4882a593Smuzhiyun if (err)
616*4882a593Smuzhiyun goto err_setup_tx;
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* save the new, restore the old in order to free it,
619*4882a593Smuzhiyun * then restore the new back again
620*4882a593Smuzhiyun */
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun adapter->rx_ring = rx_old;
623*4882a593Smuzhiyun adapter->tx_ring = tx_old;
624*4882a593Smuzhiyun e1000_free_all_rx_resources(adapter);
625*4882a593Smuzhiyun e1000_free_all_tx_resources(adapter);
626*4882a593Smuzhiyun adapter->rx_ring = rxdr;
627*4882a593Smuzhiyun adapter->tx_ring = txdr;
628*4882a593Smuzhiyun err = e1000_up(adapter);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun kfree(tx_old);
631*4882a593Smuzhiyun kfree(rx_old);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun clear_bit(__E1000_RESETTING, &adapter->flags);
634*4882a593Smuzhiyun return err;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun err_setup_tx:
637*4882a593Smuzhiyun e1000_free_all_rx_resources(adapter);
638*4882a593Smuzhiyun err_setup_rx:
639*4882a593Smuzhiyun adapter->rx_ring = rx_old;
640*4882a593Smuzhiyun adapter->tx_ring = tx_old;
641*4882a593Smuzhiyun kfree(rxdr);
642*4882a593Smuzhiyun err_alloc_rx:
643*4882a593Smuzhiyun kfree(txdr);
644*4882a593Smuzhiyun err_alloc_tx:
645*4882a593Smuzhiyun if (netif_running(adapter->netdev))
646*4882a593Smuzhiyun e1000_up(adapter);
647*4882a593Smuzhiyun clear_bit(__E1000_RESETTING, &adapter->flags);
648*4882a593Smuzhiyun return err;
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun
reg_pattern_test(struct e1000_adapter * adapter,u64 * data,int reg,u32 mask,u32 write)651*4882a593Smuzhiyun static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data, int reg,
652*4882a593Smuzhiyun u32 mask, u32 write)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
655*4882a593Smuzhiyun static const u32 test[] = {
656*4882a593Smuzhiyun 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
657*4882a593Smuzhiyun };
658*4882a593Smuzhiyun u8 __iomem *address = hw->hw_addr + reg;
659*4882a593Smuzhiyun u32 read;
660*4882a593Smuzhiyun int i;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(test); i++) {
663*4882a593Smuzhiyun writel(write & test[i], address);
664*4882a593Smuzhiyun read = readl(address);
665*4882a593Smuzhiyun if (read != (write & test[i] & mask)) {
666*4882a593Smuzhiyun e_err(drv, "pattern test reg %04X failed: "
667*4882a593Smuzhiyun "got 0x%08X expected 0x%08X\n",
668*4882a593Smuzhiyun reg, read, (write & test[i] & mask));
669*4882a593Smuzhiyun *data = reg;
670*4882a593Smuzhiyun return true;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun return false;
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
reg_set_and_check(struct e1000_adapter * adapter,u64 * data,int reg,u32 mask,u32 write)676*4882a593Smuzhiyun static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data, int reg,
677*4882a593Smuzhiyun u32 mask, u32 write)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
680*4882a593Smuzhiyun u8 __iomem *address = hw->hw_addr + reg;
681*4882a593Smuzhiyun u32 read;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun writel(write & mask, address);
684*4882a593Smuzhiyun read = readl(address);
685*4882a593Smuzhiyun if ((read & mask) != (write & mask)) {
686*4882a593Smuzhiyun e_err(drv, "set/check reg %04X test failed: "
687*4882a593Smuzhiyun "got 0x%08X expected 0x%08X\n",
688*4882a593Smuzhiyun reg, (read & mask), (write & mask));
689*4882a593Smuzhiyun *data = reg;
690*4882a593Smuzhiyun return true;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun return false;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun #define REG_PATTERN_TEST(reg, mask, write) \
696*4882a593Smuzhiyun do { \
697*4882a593Smuzhiyun if (reg_pattern_test(adapter, data, \
698*4882a593Smuzhiyun (hw->mac_type >= e1000_82543) \
699*4882a593Smuzhiyun ? E1000_##reg : E1000_82542_##reg, \
700*4882a593Smuzhiyun mask, write)) \
701*4882a593Smuzhiyun return 1; \
702*4882a593Smuzhiyun } while (0)
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun #define REG_SET_AND_CHECK(reg, mask, write) \
705*4882a593Smuzhiyun do { \
706*4882a593Smuzhiyun if (reg_set_and_check(adapter, data, \
707*4882a593Smuzhiyun (hw->mac_type >= e1000_82543) \
708*4882a593Smuzhiyun ? E1000_##reg : E1000_82542_##reg, \
709*4882a593Smuzhiyun mask, write)) \
710*4882a593Smuzhiyun return 1; \
711*4882a593Smuzhiyun } while (0)
712*4882a593Smuzhiyun
e1000_reg_test(struct e1000_adapter * adapter,u64 * data)713*4882a593Smuzhiyun static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun u32 value, before, after;
716*4882a593Smuzhiyun u32 i, toggle;
717*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun /* The status register is Read Only, so a write should fail.
720*4882a593Smuzhiyun * Some bits that get toggled are ignored.
721*4882a593Smuzhiyun */
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun /* there are several bits on newer hardware that are r/w */
724*4882a593Smuzhiyun toggle = 0xFFFFF833;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun before = er32(STATUS);
727*4882a593Smuzhiyun value = (er32(STATUS) & toggle);
728*4882a593Smuzhiyun ew32(STATUS, toggle);
729*4882a593Smuzhiyun after = er32(STATUS) & toggle;
730*4882a593Smuzhiyun if (value != after) {
731*4882a593Smuzhiyun e_err(drv, "failed STATUS register test got: "
732*4882a593Smuzhiyun "0x%08X expected: 0x%08X\n", after, value);
733*4882a593Smuzhiyun *data = 1;
734*4882a593Smuzhiyun return 1;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun /* restore previous status */
737*4882a593Smuzhiyun ew32(STATUS, before);
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun REG_PATTERN_TEST(FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
740*4882a593Smuzhiyun REG_PATTERN_TEST(FCAH, 0x0000FFFF, 0xFFFFFFFF);
741*4882a593Smuzhiyun REG_PATTERN_TEST(FCT, 0x0000FFFF, 0xFFFFFFFF);
742*4882a593Smuzhiyun REG_PATTERN_TEST(VET, 0x0000FFFF, 0xFFFFFFFF);
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun REG_PATTERN_TEST(RDTR, 0x0000FFFF, 0xFFFFFFFF);
745*4882a593Smuzhiyun REG_PATTERN_TEST(RDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
746*4882a593Smuzhiyun REG_PATTERN_TEST(RDLEN, 0x000FFF80, 0x000FFFFF);
747*4882a593Smuzhiyun REG_PATTERN_TEST(RDH, 0x0000FFFF, 0x0000FFFF);
748*4882a593Smuzhiyun REG_PATTERN_TEST(RDT, 0x0000FFFF, 0x0000FFFF);
749*4882a593Smuzhiyun REG_PATTERN_TEST(FCRTH, 0x0000FFF8, 0x0000FFF8);
750*4882a593Smuzhiyun REG_PATTERN_TEST(FCTTV, 0x0000FFFF, 0x0000FFFF);
751*4882a593Smuzhiyun REG_PATTERN_TEST(TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
752*4882a593Smuzhiyun REG_PATTERN_TEST(TDBAH, 0xFFFFFFFF, 0xFFFFFFFF);
753*4882a593Smuzhiyun REG_PATTERN_TEST(TDLEN, 0x000FFF80, 0x000FFFFF);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x00000000);
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun before = 0x06DFB3FE;
758*4882a593Smuzhiyun REG_SET_AND_CHECK(RCTL, before, 0x003FFFFB);
759*4882a593Smuzhiyun REG_SET_AND_CHECK(TCTL, 0xFFFFFFFF, 0x00000000);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun if (hw->mac_type >= e1000_82543) {
762*4882a593Smuzhiyun REG_SET_AND_CHECK(RCTL, before, 0xFFFFFFFF);
763*4882a593Smuzhiyun REG_PATTERN_TEST(RDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
764*4882a593Smuzhiyun REG_PATTERN_TEST(TXCW, 0xC000FFFF, 0x0000FFFF);
765*4882a593Smuzhiyun REG_PATTERN_TEST(TDBAL, 0xFFFFFFF0, 0xFFFFFFFF);
766*4882a593Smuzhiyun REG_PATTERN_TEST(TIDV, 0x0000FFFF, 0x0000FFFF);
767*4882a593Smuzhiyun value = E1000_RAR_ENTRIES;
768*4882a593Smuzhiyun for (i = 0; i < value; i++) {
769*4882a593Smuzhiyun REG_PATTERN_TEST(RA + (((i << 1) + 1) << 2),
770*4882a593Smuzhiyun 0x8003FFFF, 0xFFFFFFFF);
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun } else {
773*4882a593Smuzhiyun REG_SET_AND_CHECK(RCTL, 0xFFFFFFFF, 0x01FFFFFF);
774*4882a593Smuzhiyun REG_PATTERN_TEST(RDBAL, 0xFFFFF000, 0xFFFFFFFF);
775*4882a593Smuzhiyun REG_PATTERN_TEST(TXCW, 0x0000FFFF, 0x0000FFFF);
776*4882a593Smuzhiyun REG_PATTERN_TEST(TDBAL, 0xFFFFF000, 0xFFFFFFFF);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun value = E1000_MC_TBL_SIZE;
780*4882a593Smuzhiyun for (i = 0; i < value; i++)
781*4882a593Smuzhiyun REG_PATTERN_TEST(MTA + (i << 2), 0xFFFFFFFF, 0xFFFFFFFF);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun *data = 0;
784*4882a593Smuzhiyun return 0;
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
e1000_eeprom_test(struct e1000_adapter * adapter,u64 * data)787*4882a593Smuzhiyun static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
790*4882a593Smuzhiyun u16 temp;
791*4882a593Smuzhiyun u16 checksum = 0;
792*4882a593Smuzhiyun u16 i;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun *data = 0;
795*4882a593Smuzhiyun /* Read and add up the contents of the EEPROM */
796*4882a593Smuzhiyun for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
797*4882a593Smuzhiyun if ((e1000_read_eeprom(hw, i, 1, &temp)) < 0) {
798*4882a593Smuzhiyun *data = 1;
799*4882a593Smuzhiyun break;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun checksum += temp;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun /* If Checksum is not Correct return error else test passed */
805*4882a593Smuzhiyun if ((checksum != (u16)EEPROM_SUM) && !(*data))
806*4882a593Smuzhiyun *data = 2;
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun return *data;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun
e1000_test_intr(int irq,void * data)811*4882a593Smuzhiyun static irqreturn_t e1000_test_intr(int irq, void *data)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun struct net_device *netdev = (struct net_device *)data;
814*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
815*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun adapter->test_icr |= er32(ICR);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun return IRQ_HANDLED;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
e1000_intr_test(struct e1000_adapter * adapter,u64 * data)822*4882a593Smuzhiyun static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun struct net_device *netdev = adapter->netdev;
825*4882a593Smuzhiyun u32 mask, i = 0;
826*4882a593Smuzhiyun bool shared_int = true;
827*4882a593Smuzhiyun u32 irq = adapter->pdev->irq;
828*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun *data = 0;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun /* NOTE: we don't test MSI interrupts here, yet
833*4882a593Smuzhiyun * Hook up test interrupt handler just for this test
834*4882a593Smuzhiyun */
835*4882a593Smuzhiyun if (!request_irq(irq, e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
836*4882a593Smuzhiyun netdev))
837*4882a593Smuzhiyun shared_int = false;
838*4882a593Smuzhiyun else if (request_irq(irq, e1000_test_intr, IRQF_SHARED,
839*4882a593Smuzhiyun netdev->name, netdev)) {
840*4882a593Smuzhiyun *data = 1;
841*4882a593Smuzhiyun return -1;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun e_info(hw, "testing %s interrupt\n", (shared_int ?
844*4882a593Smuzhiyun "shared" : "unshared"));
845*4882a593Smuzhiyun
846*4882a593Smuzhiyun /* Disable all the interrupts */
847*4882a593Smuzhiyun ew32(IMC, 0xFFFFFFFF);
848*4882a593Smuzhiyun E1000_WRITE_FLUSH();
849*4882a593Smuzhiyun msleep(10);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun /* Test each interrupt */
852*4882a593Smuzhiyun for (; i < 10; i++) {
853*4882a593Smuzhiyun /* Interrupt to test */
854*4882a593Smuzhiyun mask = 1 << i;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if (!shared_int) {
857*4882a593Smuzhiyun /* Disable the interrupt to be reported in
858*4882a593Smuzhiyun * the cause register and then force the same
859*4882a593Smuzhiyun * interrupt and see if one gets posted. If
860*4882a593Smuzhiyun * an interrupt was posted to the bus, the
861*4882a593Smuzhiyun * test failed.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun adapter->test_icr = 0;
864*4882a593Smuzhiyun ew32(IMC, mask);
865*4882a593Smuzhiyun ew32(ICS, mask);
866*4882a593Smuzhiyun E1000_WRITE_FLUSH();
867*4882a593Smuzhiyun msleep(10);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (adapter->test_icr & mask) {
870*4882a593Smuzhiyun *data = 3;
871*4882a593Smuzhiyun break;
872*4882a593Smuzhiyun }
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun /* Enable the interrupt to be reported in
876*4882a593Smuzhiyun * the cause register and then force the same
877*4882a593Smuzhiyun * interrupt and see if one gets posted. If
878*4882a593Smuzhiyun * an interrupt was not posted to the bus, the
879*4882a593Smuzhiyun * test failed.
880*4882a593Smuzhiyun */
881*4882a593Smuzhiyun adapter->test_icr = 0;
882*4882a593Smuzhiyun ew32(IMS, mask);
883*4882a593Smuzhiyun ew32(ICS, mask);
884*4882a593Smuzhiyun E1000_WRITE_FLUSH();
885*4882a593Smuzhiyun msleep(10);
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (!(adapter->test_icr & mask)) {
888*4882a593Smuzhiyun *data = 4;
889*4882a593Smuzhiyun break;
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun if (!shared_int) {
893*4882a593Smuzhiyun /* Disable the other interrupts to be reported in
894*4882a593Smuzhiyun * the cause register and then force the other
895*4882a593Smuzhiyun * interrupts and see if any get posted. If
896*4882a593Smuzhiyun * an interrupt was posted to the bus, the
897*4882a593Smuzhiyun * test failed.
898*4882a593Smuzhiyun */
899*4882a593Smuzhiyun adapter->test_icr = 0;
900*4882a593Smuzhiyun ew32(IMC, ~mask & 0x00007FFF);
901*4882a593Smuzhiyun ew32(ICS, ~mask & 0x00007FFF);
902*4882a593Smuzhiyun E1000_WRITE_FLUSH();
903*4882a593Smuzhiyun msleep(10);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun if (adapter->test_icr) {
906*4882a593Smuzhiyun *data = 5;
907*4882a593Smuzhiyun break;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun /* Disable all the interrupts */
913*4882a593Smuzhiyun ew32(IMC, 0xFFFFFFFF);
914*4882a593Smuzhiyun E1000_WRITE_FLUSH();
915*4882a593Smuzhiyun msleep(10);
916*4882a593Smuzhiyun
917*4882a593Smuzhiyun /* Unhook test interrupt handler */
918*4882a593Smuzhiyun free_irq(irq, netdev);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun return *data;
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun
e1000_free_desc_rings(struct e1000_adapter * adapter)923*4882a593Smuzhiyun static void e1000_free_desc_rings(struct e1000_adapter *adapter)
924*4882a593Smuzhiyun {
925*4882a593Smuzhiyun struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
926*4882a593Smuzhiyun struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
927*4882a593Smuzhiyun struct pci_dev *pdev = adapter->pdev;
928*4882a593Smuzhiyun int i;
929*4882a593Smuzhiyun
930*4882a593Smuzhiyun if (txdr->desc && txdr->buffer_info) {
931*4882a593Smuzhiyun for (i = 0; i < txdr->count; i++) {
932*4882a593Smuzhiyun if (txdr->buffer_info[i].dma)
933*4882a593Smuzhiyun dma_unmap_single(&pdev->dev,
934*4882a593Smuzhiyun txdr->buffer_info[i].dma,
935*4882a593Smuzhiyun txdr->buffer_info[i].length,
936*4882a593Smuzhiyun DMA_TO_DEVICE);
937*4882a593Smuzhiyun dev_kfree_skb(txdr->buffer_info[i].skb);
938*4882a593Smuzhiyun }
939*4882a593Smuzhiyun }
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun if (rxdr->desc && rxdr->buffer_info) {
942*4882a593Smuzhiyun for (i = 0; i < rxdr->count; i++) {
943*4882a593Smuzhiyun if (rxdr->buffer_info[i].dma)
944*4882a593Smuzhiyun dma_unmap_single(&pdev->dev,
945*4882a593Smuzhiyun rxdr->buffer_info[i].dma,
946*4882a593Smuzhiyun E1000_RXBUFFER_2048,
947*4882a593Smuzhiyun DMA_FROM_DEVICE);
948*4882a593Smuzhiyun kfree(rxdr->buffer_info[i].rxbuf.data);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (txdr->desc) {
953*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
954*4882a593Smuzhiyun txdr->dma);
955*4882a593Smuzhiyun txdr->desc = NULL;
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun if (rxdr->desc) {
958*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
959*4882a593Smuzhiyun rxdr->dma);
960*4882a593Smuzhiyun rxdr->desc = NULL;
961*4882a593Smuzhiyun }
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun kfree(txdr->buffer_info);
964*4882a593Smuzhiyun txdr->buffer_info = NULL;
965*4882a593Smuzhiyun kfree(rxdr->buffer_info);
966*4882a593Smuzhiyun rxdr->buffer_info = NULL;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun
e1000_setup_desc_rings(struct e1000_adapter * adapter)969*4882a593Smuzhiyun static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
970*4882a593Smuzhiyun {
971*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
972*4882a593Smuzhiyun struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
973*4882a593Smuzhiyun struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
974*4882a593Smuzhiyun struct pci_dev *pdev = adapter->pdev;
975*4882a593Smuzhiyun u32 rctl;
976*4882a593Smuzhiyun int i, ret_val;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun /* Setup Tx descriptor ring and Tx buffers */
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun if (!txdr->count)
981*4882a593Smuzhiyun txdr->count = E1000_DEFAULT_TXD;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun txdr->buffer_info = kcalloc(txdr->count, sizeof(struct e1000_tx_buffer),
984*4882a593Smuzhiyun GFP_KERNEL);
985*4882a593Smuzhiyun if (!txdr->buffer_info) {
986*4882a593Smuzhiyun ret_val = 1;
987*4882a593Smuzhiyun goto err_nomem;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
991*4882a593Smuzhiyun txdr->size = ALIGN(txdr->size, 4096);
992*4882a593Smuzhiyun txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
993*4882a593Smuzhiyun GFP_KERNEL);
994*4882a593Smuzhiyun if (!txdr->desc) {
995*4882a593Smuzhiyun ret_val = 2;
996*4882a593Smuzhiyun goto err_nomem;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun txdr->next_to_use = txdr->next_to_clean = 0;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun ew32(TDBAL, ((u64)txdr->dma & 0x00000000FFFFFFFF));
1001*4882a593Smuzhiyun ew32(TDBAH, ((u64)txdr->dma >> 32));
1002*4882a593Smuzhiyun ew32(TDLEN, txdr->count * sizeof(struct e1000_tx_desc));
1003*4882a593Smuzhiyun ew32(TDH, 0);
1004*4882a593Smuzhiyun ew32(TDT, 0);
1005*4882a593Smuzhiyun ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN |
1006*4882a593Smuzhiyun E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1007*4882a593Smuzhiyun E1000_FDX_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1008*4882a593Smuzhiyun
1009*4882a593Smuzhiyun for (i = 0; i < txdr->count; i++) {
1010*4882a593Smuzhiyun struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*txdr, i);
1011*4882a593Smuzhiyun struct sk_buff *skb;
1012*4882a593Smuzhiyun unsigned int size = 1024;
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun skb = alloc_skb(size, GFP_KERNEL);
1015*4882a593Smuzhiyun if (!skb) {
1016*4882a593Smuzhiyun ret_val = 3;
1017*4882a593Smuzhiyun goto err_nomem;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun skb_put(skb, size);
1020*4882a593Smuzhiyun txdr->buffer_info[i].skb = skb;
1021*4882a593Smuzhiyun txdr->buffer_info[i].length = skb->len;
1022*4882a593Smuzhiyun txdr->buffer_info[i].dma =
1023*4882a593Smuzhiyun dma_map_single(&pdev->dev, skb->data, skb->len,
1024*4882a593Smuzhiyun DMA_TO_DEVICE);
1025*4882a593Smuzhiyun if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
1026*4882a593Smuzhiyun ret_val = 4;
1027*4882a593Smuzhiyun goto err_nomem;
1028*4882a593Smuzhiyun }
1029*4882a593Smuzhiyun tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
1030*4882a593Smuzhiyun tx_desc->lower.data = cpu_to_le32(skb->len);
1031*4882a593Smuzhiyun tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
1032*4882a593Smuzhiyun E1000_TXD_CMD_IFCS |
1033*4882a593Smuzhiyun E1000_TXD_CMD_RPS);
1034*4882a593Smuzhiyun tx_desc->upper.data = 0;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun /* Setup Rx descriptor ring and Rx buffers */
1038*4882a593Smuzhiyun
1039*4882a593Smuzhiyun if (!rxdr->count)
1040*4882a593Smuzhiyun rxdr->count = E1000_DEFAULT_RXD;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_rx_buffer),
1043*4882a593Smuzhiyun GFP_KERNEL);
1044*4882a593Smuzhiyun if (!rxdr->buffer_info) {
1045*4882a593Smuzhiyun ret_val = 5;
1046*4882a593Smuzhiyun goto err_nomem;
1047*4882a593Smuzhiyun }
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun rxdr->size = rxdr->count * sizeof(struct e1000_rx_desc);
1050*4882a593Smuzhiyun rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1051*4882a593Smuzhiyun GFP_KERNEL);
1052*4882a593Smuzhiyun if (!rxdr->desc) {
1053*4882a593Smuzhiyun ret_val = 6;
1054*4882a593Smuzhiyun goto err_nomem;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun rxdr->next_to_use = rxdr->next_to_clean = 0;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun rctl = er32(RCTL);
1059*4882a593Smuzhiyun ew32(RCTL, rctl & ~E1000_RCTL_EN);
1060*4882a593Smuzhiyun ew32(RDBAL, ((u64)rxdr->dma & 0xFFFFFFFF));
1061*4882a593Smuzhiyun ew32(RDBAH, ((u64)rxdr->dma >> 32));
1062*4882a593Smuzhiyun ew32(RDLEN, rxdr->size);
1063*4882a593Smuzhiyun ew32(RDH, 0);
1064*4882a593Smuzhiyun ew32(RDT, 0);
1065*4882a593Smuzhiyun rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
1066*4882a593Smuzhiyun E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
1067*4882a593Smuzhiyun (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1068*4882a593Smuzhiyun ew32(RCTL, rctl);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun for (i = 0; i < rxdr->count; i++) {
1071*4882a593Smuzhiyun struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
1072*4882a593Smuzhiyun u8 *buf;
1073*4882a593Smuzhiyun
1074*4882a593Smuzhiyun buf = kzalloc(E1000_RXBUFFER_2048 + NET_SKB_PAD + NET_IP_ALIGN,
1075*4882a593Smuzhiyun GFP_KERNEL);
1076*4882a593Smuzhiyun if (!buf) {
1077*4882a593Smuzhiyun ret_val = 7;
1078*4882a593Smuzhiyun goto err_nomem;
1079*4882a593Smuzhiyun }
1080*4882a593Smuzhiyun rxdr->buffer_info[i].rxbuf.data = buf;
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun rxdr->buffer_info[i].dma =
1083*4882a593Smuzhiyun dma_map_single(&pdev->dev,
1084*4882a593Smuzhiyun buf + NET_SKB_PAD + NET_IP_ALIGN,
1085*4882a593Smuzhiyun E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
1086*4882a593Smuzhiyun if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
1087*4882a593Smuzhiyun ret_val = 8;
1088*4882a593Smuzhiyun goto err_nomem;
1089*4882a593Smuzhiyun }
1090*4882a593Smuzhiyun rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun return 0;
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun err_nomem:
1096*4882a593Smuzhiyun e1000_free_desc_rings(adapter);
1097*4882a593Smuzhiyun return ret_val;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun
e1000_phy_disable_receiver(struct e1000_adapter * adapter)1100*4882a593Smuzhiyun static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
1101*4882a593Smuzhiyun {
1102*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1105*4882a593Smuzhiyun e1000_write_phy_reg(hw, 29, 0x001F);
1106*4882a593Smuzhiyun e1000_write_phy_reg(hw, 30, 0x8FFC);
1107*4882a593Smuzhiyun e1000_write_phy_reg(hw, 29, 0x001A);
1108*4882a593Smuzhiyun e1000_write_phy_reg(hw, 30, 0x8FF0);
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun
e1000_phy_reset_clk_and_crs(struct e1000_adapter * adapter)1111*4882a593Smuzhiyun static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
1112*4882a593Smuzhiyun {
1113*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1114*4882a593Smuzhiyun u16 phy_reg;
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun /* Because we reset the PHY above, we need to re-force TX_CLK in the
1117*4882a593Smuzhiyun * Extended PHY Specific Control Register to 25MHz clock. This
1118*4882a593Smuzhiyun * value defaults back to a 2.5MHz clock when the PHY is reset.
1119*4882a593Smuzhiyun */
1120*4882a593Smuzhiyun e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1121*4882a593Smuzhiyun phy_reg |= M88E1000_EPSCR_TX_CLK_25;
1122*4882a593Smuzhiyun e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* In addition, because of the s/w reset above, we need to enable
1125*4882a593Smuzhiyun * CRS on TX. This must be set for both full and half duplex
1126*4882a593Smuzhiyun * operation.
1127*4882a593Smuzhiyun */
1128*4882a593Smuzhiyun e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
1129*4882a593Smuzhiyun phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
1130*4882a593Smuzhiyun e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun
e1000_nonintegrated_phy_loopback(struct e1000_adapter * adapter)1133*4882a593Smuzhiyun static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
1134*4882a593Smuzhiyun {
1135*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1136*4882a593Smuzhiyun u32 ctrl_reg;
1137*4882a593Smuzhiyun u16 phy_reg;
1138*4882a593Smuzhiyun
1139*4882a593Smuzhiyun /* Setup the Device Control Register for PHY loopback test. */
1140*4882a593Smuzhiyun
1141*4882a593Smuzhiyun ctrl_reg = er32(CTRL);
1142*4882a593Smuzhiyun ctrl_reg |= (E1000_CTRL_ILOS | /* Invert Loss-Of-Signal */
1143*4882a593Smuzhiyun E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1144*4882a593Smuzhiyun E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1145*4882a593Smuzhiyun E1000_CTRL_SPD_1000 | /* Force Speed to 1000 */
1146*4882a593Smuzhiyun E1000_CTRL_FD); /* Force Duplex to FULL */
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun ew32(CTRL, ctrl_reg);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun /* Read the PHY Specific Control Register (0x10) */
1151*4882a593Smuzhiyun e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun /* Clear Auto-Crossover bits in PHY Specific Control Register
1154*4882a593Smuzhiyun * (bits 6:5).
1155*4882a593Smuzhiyun */
1156*4882a593Smuzhiyun phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
1157*4882a593Smuzhiyun e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
1158*4882a593Smuzhiyun
1159*4882a593Smuzhiyun /* Perform software reset on the PHY */
1160*4882a593Smuzhiyun e1000_phy_reset(hw);
1161*4882a593Smuzhiyun
1162*4882a593Smuzhiyun /* Have to setup TX_CLK and TX_CRS after software reset */
1163*4882a593Smuzhiyun e1000_phy_reset_clk_and_crs(adapter);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, 0x8100);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /* Wait for reset to complete. */
1168*4882a593Smuzhiyun udelay(500);
1169*4882a593Smuzhiyun
1170*4882a593Smuzhiyun /* Have to setup TX_CLK and TX_CRS after software reset */
1171*4882a593Smuzhiyun e1000_phy_reset_clk_and_crs(adapter);
1172*4882a593Smuzhiyun
1173*4882a593Smuzhiyun /* Write out to PHY registers 29 and 30 to disable the Receiver. */
1174*4882a593Smuzhiyun e1000_phy_disable_receiver(adapter);
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun /* Set the loopback bit in the PHY control register. */
1177*4882a593Smuzhiyun e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1178*4882a593Smuzhiyun phy_reg |= MII_CR_LOOPBACK;
1179*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun /* Setup TX_CLK and TX_CRS one more time. */
1182*4882a593Smuzhiyun e1000_phy_reset_clk_and_crs(adapter);
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun /* Check Phy Configuration */
1185*4882a593Smuzhiyun e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1186*4882a593Smuzhiyun if (phy_reg != 0x4100)
1187*4882a593Smuzhiyun return 9;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
1190*4882a593Smuzhiyun if (phy_reg != 0x0070)
1191*4882a593Smuzhiyun return 10;
1192*4882a593Smuzhiyun
1193*4882a593Smuzhiyun e1000_read_phy_reg(hw, 29, &phy_reg);
1194*4882a593Smuzhiyun if (phy_reg != 0x001A)
1195*4882a593Smuzhiyun return 11;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun return 0;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun
e1000_integrated_phy_loopback(struct e1000_adapter * adapter)1200*4882a593Smuzhiyun static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1201*4882a593Smuzhiyun {
1202*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1203*4882a593Smuzhiyun u32 ctrl_reg = 0;
1204*4882a593Smuzhiyun u32 stat_reg = 0;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun hw->autoneg = false;
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun if (hw->phy_type == e1000_phy_m88) {
1209*4882a593Smuzhiyun /* Auto-MDI/MDIX Off */
1210*4882a593Smuzhiyun e1000_write_phy_reg(hw,
1211*4882a593Smuzhiyun M88E1000_PHY_SPEC_CTRL, 0x0808);
1212*4882a593Smuzhiyun /* reset to update Auto-MDI/MDIX */
1213*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, 0x9140);
1214*4882a593Smuzhiyun /* autoneg off */
1215*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, 0x8140);
1216*4882a593Smuzhiyun }
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun ctrl_reg = er32(CTRL);
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /* force 1000, set loopback */
1221*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, 0x4140);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun /* Now set up the MAC to the same speed/duplex as the PHY. */
1224*4882a593Smuzhiyun ctrl_reg = er32(CTRL);
1225*4882a593Smuzhiyun ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
1226*4882a593Smuzhiyun ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
1227*4882a593Smuzhiyun E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
1228*4882a593Smuzhiyun E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
1229*4882a593Smuzhiyun E1000_CTRL_FD); /* Force Duplex to FULL */
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun if (hw->media_type == e1000_media_type_copper &&
1232*4882a593Smuzhiyun hw->phy_type == e1000_phy_m88)
1233*4882a593Smuzhiyun ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
1234*4882a593Smuzhiyun else {
1235*4882a593Smuzhiyun /* Set the ILOS bit on the fiber Nic is half
1236*4882a593Smuzhiyun * duplex link is detected.
1237*4882a593Smuzhiyun */
1238*4882a593Smuzhiyun stat_reg = er32(STATUS);
1239*4882a593Smuzhiyun if ((stat_reg & E1000_STATUS_FD) == 0)
1240*4882a593Smuzhiyun ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun ew32(CTRL, ctrl_reg);
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun /* Disable the receiver on the PHY so when a cable is plugged in, the
1246*4882a593Smuzhiyun * PHY does not begin to autoneg when a cable is reconnected to the NIC.
1247*4882a593Smuzhiyun */
1248*4882a593Smuzhiyun if (hw->phy_type == e1000_phy_m88)
1249*4882a593Smuzhiyun e1000_phy_disable_receiver(adapter);
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun udelay(500);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun return 0;
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun
e1000_set_phy_loopback(struct e1000_adapter * adapter)1256*4882a593Smuzhiyun static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1259*4882a593Smuzhiyun u16 phy_reg = 0;
1260*4882a593Smuzhiyun u16 count = 0;
1261*4882a593Smuzhiyun
1262*4882a593Smuzhiyun switch (hw->mac_type) {
1263*4882a593Smuzhiyun case e1000_82543:
1264*4882a593Smuzhiyun if (hw->media_type == e1000_media_type_copper) {
1265*4882a593Smuzhiyun /* Attempt to setup Loopback mode on Non-integrated PHY.
1266*4882a593Smuzhiyun * Some PHY registers get corrupted at random, so
1267*4882a593Smuzhiyun * attempt this 10 times.
1268*4882a593Smuzhiyun */
1269*4882a593Smuzhiyun while (e1000_nonintegrated_phy_loopback(adapter) &&
1270*4882a593Smuzhiyun count++ < 10);
1271*4882a593Smuzhiyun if (count < 11)
1272*4882a593Smuzhiyun return 0;
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun break;
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun case e1000_82544:
1277*4882a593Smuzhiyun case e1000_82540:
1278*4882a593Smuzhiyun case e1000_82545:
1279*4882a593Smuzhiyun case e1000_82545_rev_3:
1280*4882a593Smuzhiyun case e1000_82546:
1281*4882a593Smuzhiyun case e1000_82546_rev_3:
1282*4882a593Smuzhiyun case e1000_82541:
1283*4882a593Smuzhiyun case e1000_82541_rev_2:
1284*4882a593Smuzhiyun case e1000_82547:
1285*4882a593Smuzhiyun case e1000_82547_rev_2:
1286*4882a593Smuzhiyun return e1000_integrated_phy_loopback(adapter);
1287*4882a593Smuzhiyun default:
1288*4882a593Smuzhiyun /* Default PHY loopback work is to read the MII
1289*4882a593Smuzhiyun * control register and assert bit 14 (loopback mode).
1290*4882a593Smuzhiyun */
1291*4882a593Smuzhiyun e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1292*4882a593Smuzhiyun phy_reg |= MII_CR_LOOPBACK;
1293*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
1294*4882a593Smuzhiyun return 0;
1295*4882a593Smuzhiyun }
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun return 8;
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
e1000_setup_loopback_test(struct e1000_adapter * adapter)1300*4882a593Smuzhiyun static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1303*4882a593Smuzhiyun u32 rctl;
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun if (hw->media_type == e1000_media_type_fiber ||
1306*4882a593Smuzhiyun hw->media_type == e1000_media_type_internal_serdes) {
1307*4882a593Smuzhiyun switch (hw->mac_type) {
1308*4882a593Smuzhiyun case e1000_82545:
1309*4882a593Smuzhiyun case e1000_82546:
1310*4882a593Smuzhiyun case e1000_82545_rev_3:
1311*4882a593Smuzhiyun case e1000_82546_rev_3:
1312*4882a593Smuzhiyun return e1000_set_phy_loopback(adapter);
1313*4882a593Smuzhiyun default:
1314*4882a593Smuzhiyun rctl = er32(RCTL);
1315*4882a593Smuzhiyun rctl |= E1000_RCTL_LBM_TCVR;
1316*4882a593Smuzhiyun ew32(RCTL, rctl);
1317*4882a593Smuzhiyun return 0;
1318*4882a593Smuzhiyun }
1319*4882a593Smuzhiyun } else if (hw->media_type == e1000_media_type_copper) {
1320*4882a593Smuzhiyun return e1000_set_phy_loopback(adapter);
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun return 7;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
e1000_loopback_cleanup(struct e1000_adapter * adapter)1326*4882a593Smuzhiyun static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1329*4882a593Smuzhiyun u32 rctl;
1330*4882a593Smuzhiyun u16 phy_reg;
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun rctl = er32(RCTL);
1333*4882a593Smuzhiyun rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1334*4882a593Smuzhiyun ew32(RCTL, rctl);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun switch (hw->mac_type) {
1337*4882a593Smuzhiyun case e1000_82545:
1338*4882a593Smuzhiyun case e1000_82546:
1339*4882a593Smuzhiyun case e1000_82545_rev_3:
1340*4882a593Smuzhiyun case e1000_82546_rev_3:
1341*4882a593Smuzhiyun default:
1342*4882a593Smuzhiyun hw->autoneg = true;
1343*4882a593Smuzhiyun e1000_read_phy_reg(hw, PHY_CTRL, &phy_reg);
1344*4882a593Smuzhiyun if (phy_reg & MII_CR_LOOPBACK) {
1345*4882a593Smuzhiyun phy_reg &= ~MII_CR_LOOPBACK;
1346*4882a593Smuzhiyun e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
1347*4882a593Smuzhiyun e1000_phy_reset(hw);
1348*4882a593Smuzhiyun }
1349*4882a593Smuzhiyun break;
1350*4882a593Smuzhiyun }
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
e1000_create_lbtest_frame(struct sk_buff * skb,unsigned int frame_size)1353*4882a593Smuzhiyun static void e1000_create_lbtest_frame(struct sk_buff *skb,
1354*4882a593Smuzhiyun unsigned int frame_size)
1355*4882a593Smuzhiyun {
1356*4882a593Smuzhiyun memset(skb->data, 0xFF, frame_size);
1357*4882a593Smuzhiyun frame_size &= ~1;
1358*4882a593Smuzhiyun memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
1359*4882a593Smuzhiyun skb->data[frame_size / 2 + 10] = 0xBE;
1360*4882a593Smuzhiyun skb->data[frame_size / 2 + 12] = 0xAF;
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
e1000_check_lbtest_frame(const unsigned char * data,unsigned int frame_size)1363*4882a593Smuzhiyun static int e1000_check_lbtest_frame(const unsigned char *data,
1364*4882a593Smuzhiyun unsigned int frame_size)
1365*4882a593Smuzhiyun {
1366*4882a593Smuzhiyun frame_size &= ~1;
1367*4882a593Smuzhiyun if (*(data + 3) == 0xFF) {
1368*4882a593Smuzhiyun if ((*(data + frame_size / 2 + 10) == 0xBE) &&
1369*4882a593Smuzhiyun (*(data + frame_size / 2 + 12) == 0xAF)) {
1370*4882a593Smuzhiyun return 0;
1371*4882a593Smuzhiyun }
1372*4882a593Smuzhiyun }
1373*4882a593Smuzhiyun return 13;
1374*4882a593Smuzhiyun }
1375*4882a593Smuzhiyun
e1000_run_loopback_test(struct e1000_adapter * adapter)1376*4882a593Smuzhiyun static int e1000_run_loopback_test(struct e1000_adapter *adapter)
1377*4882a593Smuzhiyun {
1378*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1379*4882a593Smuzhiyun struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
1380*4882a593Smuzhiyun struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
1381*4882a593Smuzhiyun struct pci_dev *pdev = adapter->pdev;
1382*4882a593Smuzhiyun int i, j, k, l, lc, good_cnt, ret_val = 0;
1383*4882a593Smuzhiyun unsigned long time;
1384*4882a593Smuzhiyun
1385*4882a593Smuzhiyun ew32(RDT, rxdr->count - 1);
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun /* Calculate the loop count based on the largest descriptor ring
1388*4882a593Smuzhiyun * The idea is to wrap the largest ring a number of times using 64
1389*4882a593Smuzhiyun * send/receive pairs during each loop
1390*4882a593Smuzhiyun */
1391*4882a593Smuzhiyun
1392*4882a593Smuzhiyun if (rxdr->count <= txdr->count)
1393*4882a593Smuzhiyun lc = ((txdr->count / 64) * 2) + 1;
1394*4882a593Smuzhiyun else
1395*4882a593Smuzhiyun lc = ((rxdr->count / 64) * 2) + 1;
1396*4882a593Smuzhiyun
1397*4882a593Smuzhiyun k = l = 0;
1398*4882a593Smuzhiyun for (j = 0; j <= lc; j++) { /* loop count loop */
1399*4882a593Smuzhiyun for (i = 0; i < 64; i++) { /* send the packets */
1400*4882a593Smuzhiyun e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1401*4882a593Smuzhiyun 1024);
1402*4882a593Smuzhiyun dma_sync_single_for_device(&pdev->dev,
1403*4882a593Smuzhiyun txdr->buffer_info[k].dma,
1404*4882a593Smuzhiyun txdr->buffer_info[k].length,
1405*4882a593Smuzhiyun DMA_TO_DEVICE);
1406*4882a593Smuzhiyun if (unlikely(++k == txdr->count))
1407*4882a593Smuzhiyun k = 0;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun ew32(TDT, k);
1410*4882a593Smuzhiyun E1000_WRITE_FLUSH();
1411*4882a593Smuzhiyun msleep(200);
1412*4882a593Smuzhiyun time = jiffies; /* set the start time for the receive */
1413*4882a593Smuzhiyun good_cnt = 0;
1414*4882a593Smuzhiyun do { /* receive the sent packets */
1415*4882a593Smuzhiyun dma_sync_single_for_cpu(&pdev->dev,
1416*4882a593Smuzhiyun rxdr->buffer_info[l].dma,
1417*4882a593Smuzhiyun E1000_RXBUFFER_2048,
1418*4882a593Smuzhiyun DMA_FROM_DEVICE);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun ret_val = e1000_check_lbtest_frame(
1421*4882a593Smuzhiyun rxdr->buffer_info[l].rxbuf.data +
1422*4882a593Smuzhiyun NET_SKB_PAD + NET_IP_ALIGN,
1423*4882a593Smuzhiyun 1024);
1424*4882a593Smuzhiyun if (!ret_val)
1425*4882a593Smuzhiyun good_cnt++;
1426*4882a593Smuzhiyun if (unlikely(++l == rxdr->count))
1427*4882a593Smuzhiyun l = 0;
1428*4882a593Smuzhiyun /* time + 20 msecs (200 msecs on 2.4) is more than
1429*4882a593Smuzhiyun * enough time to complete the receives, if it's
1430*4882a593Smuzhiyun * exceeded, break and error off
1431*4882a593Smuzhiyun */
1432*4882a593Smuzhiyun } while (good_cnt < 64 && time_after(time + 20, jiffies));
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun if (good_cnt != 64) {
1435*4882a593Smuzhiyun ret_val = 13; /* ret_val is the same as mis-compare */
1436*4882a593Smuzhiyun break;
1437*4882a593Smuzhiyun }
1438*4882a593Smuzhiyun if (time_after_eq(jiffies, time + 2)) {
1439*4882a593Smuzhiyun ret_val = 14; /* error code for time out error */
1440*4882a593Smuzhiyun break;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun } /* end loop count loop */
1443*4882a593Smuzhiyun return ret_val;
1444*4882a593Smuzhiyun }
1445*4882a593Smuzhiyun
e1000_loopback_test(struct e1000_adapter * adapter,u64 * data)1446*4882a593Smuzhiyun static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun *data = e1000_setup_desc_rings(adapter);
1449*4882a593Smuzhiyun if (*data)
1450*4882a593Smuzhiyun goto out;
1451*4882a593Smuzhiyun *data = e1000_setup_loopback_test(adapter);
1452*4882a593Smuzhiyun if (*data)
1453*4882a593Smuzhiyun goto err_loopback;
1454*4882a593Smuzhiyun *data = e1000_run_loopback_test(adapter);
1455*4882a593Smuzhiyun e1000_loopback_cleanup(adapter);
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun err_loopback:
1458*4882a593Smuzhiyun e1000_free_desc_rings(adapter);
1459*4882a593Smuzhiyun out:
1460*4882a593Smuzhiyun return *data;
1461*4882a593Smuzhiyun }
1462*4882a593Smuzhiyun
e1000_link_test(struct e1000_adapter * adapter,u64 * data)1463*4882a593Smuzhiyun static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
1464*4882a593Smuzhiyun {
1465*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1466*4882a593Smuzhiyun *data = 0;
1467*4882a593Smuzhiyun if (hw->media_type == e1000_media_type_internal_serdes) {
1468*4882a593Smuzhiyun int i = 0;
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun hw->serdes_has_link = false;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun /* On some blade server designs, link establishment
1473*4882a593Smuzhiyun * could take as long as 2-3 minutes
1474*4882a593Smuzhiyun */
1475*4882a593Smuzhiyun do {
1476*4882a593Smuzhiyun e1000_check_for_link(hw);
1477*4882a593Smuzhiyun if (hw->serdes_has_link)
1478*4882a593Smuzhiyun return *data;
1479*4882a593Smuzhiyun msleep(20);
1480*4882a593Smuzhiyun } while (i++ < 3750);
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun *data = 1;
1483*4882a593Smuzhiyun } else {
1484*4882a593Smuzhiyun e1000_check_for_link(hw);
1485*4882a593Smuzhiyun if (hw->autoneg) /* if auto_neg is set wait for it */
1486*4882a593Smuzhiyun msleep(4000);
1487*4882a593Smuzhiyun
1488*4882a593Smuzhiyun if (!(er32(STATUS) & E1000_STATUS_LU))
1489*4882a593Smuzhiyun *data = 1;
1490*4882a593Smuzhiyun }
1491*4882a593Smuzhiyun return *data;
1492*4882a593Smuzhiyun }
1493*4882a593Smuzhiyun
e1000_get_sset_count(struct net_device * netdev,int sset)1494*4882a593Smuzhiyun static int e1000_get_sset_count(struct net_device *netdev, int sset)
1495*4882a593Smuzhiyun {
1496*4882a593Smuzhiyun switch (sset) {
1497*4882a593Smuzhiyun case ETH_SS_TEST:
1498*4882a593Smuzhiyun return E1000_TEST_LEN;
1499*4882a593Smuzhiyun case ETH_SS_STATS:
1500*4882a593Smuzhiyun return E1000_STATS_LEN;
1501*4882a593Smuzhiyun default:
1502*4882a593Smuzhiyun return -EOPNOTSUPP;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun }
1505*4882a593Smuzhiyun
e1000_diag_test(struct net_device * netdev,struct ethtool_test * eth_test,u64 * data)1506*4882a593Smuzhiyun static void e1000_diag_test(struct net_device *netdev,
1507*4882a593Smuzhiyun struct ethtool_test *eth_test, u64 *data)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1510*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1511*4882a593Smuzhiyun bool if_running = netif_running(netdev);
1512*4882a593Smuzhiyun
1513*4882a593Smuzhiyun set_bit(__E1000_TESTING, &adapter->flags);
1514*4882a593Smuzhiyun if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1515*4882a593Smuzhiyun /* Offline tests */
1516*4882a593Smuzhiyun
1517*4882a593Smuzhiyun /* save speed, duplex, autoneg settings */
1518*4882a593Smuzhiyun u16 autoneg_advertised = hw->autoneg_advertised;
1519*4882a593Smuzhiyun u8 forced_speed_duplex = hw->forced_speed_duplex;
1520*4882a593Smuzhiyun u8 autoneg = hw->autoneg;
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun e_info(hw, "offline testing starting\n");
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun /* Link test performed before hardware reset so autoneg doesn't
1525*4882a593Smuzhiyun * interfere with test result
1526*4882a593Smuzhiyun */
1527*4882a593Smuzhiyun if (e1000_link_test(adapter, &data[4]))
1528*4882a593Smuzhiyun eth_test->flags |= ETH_TEST_FL_FAILED;
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun if (if_running)
1531*4882a593Smuzhiyun /* indicate we're in test mode */
1532*4882a593Smuzhiyun e1000_close(netdev);
1533*4882a593Smuzhiyun else
1534*4882a593Smuzhiyun e1000_reset(adapter);
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun if (e1000_reg_test(adapter, &data[0]))
1537*4882a593Smuzhiyun eth_test->flags |= ETH_TEST_FL_FAILED;
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun e1000_reset(adapter);
1540*4882a593Smuzhiyun if (e1000_eeprom_test(adapter, &data[1]))
1541*4882a593Smuzhiyun eth_test->flags |= ETH_TEST_FL_FAILED;
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun e1000_reset(adapter);
1544*4882a593Smuzhiyun if (e1000_intr_test(adapter, &data[2]))
1545*4882a593Smuzhiyun eth_test->flags |= ETH_TEST_FL_FAILED;
1546*4882a593Smuzhiyun
1547*4882a593Smuzhiyun e1000_reset(adapter);
1548*4882a593Smuzhiyun /* make sure the phy is powered up */
1549*4882a593Smuzhiyun e1000_power_up_phy(adapter);
1550*4882a593Smuzhiyun if (e1000_loopback_test(adapter, &data[3]))
1551*4882a593Smuzhiyun eth_test->flags |= ETH_TEST_FL_FAILED;
1552*4882a593Smuzhiyun
1553*4882a593Smuzhiyun /* restore speed, duplex, autoneg settings */
1554*4882a593Smuzhiyun hw->autoneg_advertised = autoneg_advertised;
1555*4882a593Smuzhiyun hw->forced_speed_duplex = forced_speed_duplex;
1556*4882a593Smuzhiyun hw->autoneg = autoneg;
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun e1000_reset(adapter);
1559*4882a593Smuzhiyun clear_bit(__E1000_TESTING, &adapter->flags);
1560*4882a593Smuzhiyun if (if_running)
1561*4882a593Smuzhiyun e1000_open(netdev);
1562*4882a593Smuzhiyun } else {
1563*4882a593Smuzhiyun e_info(hw, "online testing starting\n");
1564*4882a593Smuzhiyun /* Online tests */
1565*4882a593Smuzhiyun if (e1000_link_test(adapter, &data[4]))
1566*4882a593Smuzhiyun eth_test->flags |= ETH_TEST_FL_FAILED;
1567*4882a593Smuzhiyun
1568*4882a593Smuzhiyun /* Online tests aren't run; pass by default */
1569*4882a593Smuzhiyun data[0] = 0;
1570*4882a593Smuzhiyun data[1] = 0;
1571*4882a593Smuzhiyun data[2] = 0;
1572*4882a593Smuzhiyun data[3] = 0;
1573*4882a593Smuzhiyun
1574*4882a593Smuzhiyun clear_bit(__E1000_TESTING, &adapter->flags);
1575*4882a593Smuzhiyun }
1576*4882a593Smuzhiyun msleep_interruptible(4 * 1000);
1577*4882a593Smuzhiyun }
1578*4882a593Smuzhiyun
e1000_wol_exclusion(struct e1000_adapter * adapter,struct ethtool_wolinfo * wol)1579*4882a593Smuzhiyun static int e1000_wol_exclusion(struct e1000_adapter *adapter,
1580*4882a593Smuzhiyun struct ethtool_wolinfo *wol)
1581*4882a593Smuzhiyun {
1582*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1583*4882a593Smuzhiyun int retval = 1; /* fail by default */
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun switch (hw->device_id) {
1586*4882a593Smuzhiyun case E1000_DEV_ID_82542:
1587*4882a593Smuzhiyun case E1000_DEV_ID_82543GC_FIBER:
1588*4882a593Smuzhiyun case E1000_DEV_ID_82543GC_COPPER:
1589*4882a593Smuzhiyun case E1000_DEV_ID_82544EI_FIBER:
1590*4882a593Smuzhiyun case E1000_DEV_ID_82546EB_QUAD_COPPER:
1591*4882a593Smuzhiyun case E1000_DEV_ID_82545EM_FIBER:
1592*4882a593Smuzhiyun case E1000_DEV_ID_82545EM_COPPER:
1593*4882a593Smuzhiyun case E1000_DEV_ID_82546GB_QUAD_COPPER:
1594*4882a593Smuzhiyun case E1000_DEV_ID_82546GB_PCIE:
1595*4882a593Smuzhiyun /* these don't support WoL at all */
1596*4882a593Smuzhiyun wol->supported = 0;
1597*4882a593Smuzhiyun break;
1598*4882a593Smuzhiyun case E1000_DEV_ID_82546EB_FIBER:
1599*4882a593Smuzhiyun case E1000_DEV_ID_82546GB_FIBER:
1600*4882a593Smuzhiyun /* Wake events not supported on port B */
1601*4882a593Smuzhiyun if (er32(STATUS) & E1000_STATUS_FUNC_1) {
1602*4882a593Smuzhiyun wol->supported = 0;
1603*4882a593Smuzhiyun break;
1604*4882a593Smuzhiyun }
1605*4882a593Smuzhiyun /* return success for non excluded adapter ports */
1606*4882a593Smuzhiyun retval = 0;
1607*4882a593Smuzhiyun break;
1608*4882a593Smuzhiyun case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1609*4882a593Smuzhiyun /* quad port adapters only support WoL on port A */
1610*4882a593Smuzhiyun if (!adapter->quad_port_a) {
1611*4882a593Smuzhiyun wol->supported = 0;
1612*4882a593Smuzhiyun break;
1613*4882a593Smuzhiyun }
1614*4882a593Smuzhiyun /* return success for non excluded adapter ports */
1615*4882a593Smuzhiyun retval = 0;
1616*4882a593Smuzhiyun break;
1617*4882a593Smuzhiyun default:
1618*4882a593Smuzhiyun /* dual port cards only support WoL on port A from now on
1619*4882a593Smuzhiyun * unless it was enabled in the eeprom for port B
1620*4882a593Smuzhiyun * so exclude FUNC_1 ports from having WoL enabled
1621*4882a593Smuzhiyun */
1622*4882a593Smuzhiyun if (er32(STATUS) & E1000_STATUS_FUNC_1 &&
1623*4882a593Smuzhiyun !adapter->eeprom_wol) {
1624*4882a593Smuzhiyun wol->supported = 0;
1625*4882a593Smuzhiyun break;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun
1628*4882a593Smuzhiyun retval = 0;
1629*4882a593Smuzhiyun }
1630*4882a593Smuzhiyun
1631*4882a593Smuzhiyun return retval;
1632*4882a593Smuzhiyun }
1633*4882a593Smuzhiyun
e1000_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1634*4882a593Smuzhiyun static void e1000_get_wol(struct net_device *netdev,
1635*4882a593Smuzhiyun struct ethtool_wolinfo *wol)
1636*4882a593Smuzhiyun {
1637*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1638*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1639*4882a593Smuzhiyun
1640*4882a593Smuzhiyun wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
1641*4882a593Smuzhiyun wol->wolopts = 0;
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun /* this function will set ->supported = 0 and return 1 if wol is not
1644*4882a593Smuzhiyun * supported by this hardware
1645*4882a593Smuzhiyun */
1646*4882a593Smuzhiyun if (e1000_wol_exclusion(adapter, wol) ||
1647*4882a593Smuzhiyun !device_can_wakeup(&adapter->pdev->dev))
1648*4882a593Smuzhiyun return;
1649*4882a593Smuzhiyun
1650*4882a593Smuzhiyun /* apply any specific unsupported masks here */
1651*4882a593Smuzhiyun switch (hw->device_id) {
1652*4882a593Smuzhiyun case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1653*4882a593Smuzhiyun /* KSP3 does not support UCAST wake-ups */
1654*4882a593Smuzhiyun wol->supported &= ~WAKE_UCAST;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun if (adapter->wol & E1000_WUFC_EX)
1657*4882a593Smuzhiyun e_err(drv, "Interface does not support directed "
1658*4882a593Smuzhiyun "(unicast) frame wake-up packets\n");
1659*4882a593Smuzhiyun break;
1660*4882a593Smuzhiyun default:
1661*4882a593Smuzhiyun break;
1662*4882a593Smuzhiyun }
1663*4882a593Smuzhiyun
1664*4882a593Smuzhiyun if (adapter->wol & E1000_WUFC_EX)
1665*4882a593Smuzhiyun wol->wolopts |= WAKE_UCAST;
1666*4882a593Smuzhiyun if (adapter->wol & E1000_WUFC_MC)
1667*4882a593Smuzhiyun wol->wolopts |= WAKE_MCAST;
1668*4882a593Smuzhiyun if (adapter->wol & E1000_WUFC_BC)
1669*4882a593Smuzhiyun wol->wolopts |= WAKE_BCAST;
1670*4882a593Smuzhiyun if (adapter->wol & E1000_WUFC_MAG)
1671*4882a593Smuzhiyun wol->wolopts |= WAKE_MAGIC;
1672*4882a593Smuzhiyun }
1673*4882a593Smuzhiyun
e1000_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1674*4882a593Smuzhiyun static int e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1675*4882a593Smuzhiyun {
1676*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1677*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
1680*4882a593Smuzhiyun return -EOPNOTSUPP;
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun if (e1000_wol_exclusion(adapter, wol) ||
1683*4882a593Smuzhiyun !device_can_wakeup(&adapter->pdev->dev))
1684*4882a593Smuzhiyun return wol->wolopts ? -EOPNOTSUPP : 0;
1685*4882a593Smuzhiyun
1686*4882a593Smuzhiyun switch (hw->device_id) {
1687*4882a593Smuzhiyun case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1688*4882a593Smuzhiyun if (wol->wolopts & WAKE_UCAST) {
1689*4882a593Smuzhiyun e_err(drv, "Interface does not support directed "
1690*4882a593Smuzhiyun "(unicast) frame wake-up packets\n");
1691*4882a593Smuzhiyun return -EOPNOTSUPP;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun break;
1694*4882a593Smuzhiyun default:
1695*4882a593Smuzhiyun break;
1696*4882a593Smuzhiyun }
1697*4882a593Smuzhiyun
1698*4882a593Smuzhiyun /* these settings will always override what we currently have */
1699*4882a593Smuzhiyun adapter->wol = 0;
1700*4882a593Smuzhiyun
1701*4882a593Smuzhiyun if (wol->wolopts & WAKE_UCAST)
1702*4882a593Smuzhiyun adapter->wol |= E1000_WUFC_EX;
1703*4882a593Smuzhiyun if (wol->wolopts & WAKE_MCAST)
1704*4882a593Smuzhiyun adapter->wol |= E1000_WUFC_MC;
1705*4882a593Smuzhiyun if (wol->wolopts & WAKE_BCAST)
1706*4882a593Smuzhiyun adapter->wol |= E1000_WUFC_BC;
1707*4882a593Smuzhiyun if (wol->wolopts & WAKE_MAGIC)
1708*4882a593Smuzhiyun adapter->wol |= E1000_WUFC_MAG;
1709*4882a593Smuzhiyun
1710*4882a593Smuzhiyun device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1711*4882a593Smuzhiyun
1712*4882a593Smuzhiyun return 0;
1713*4882a593Smuzhiyun }
1714*4882a593Smuzhiyun
e1000_set_phys_id(struct net_device * netdev,enum ethtool_phys_id_state state)1715*4882a593Smuzhiyun static int e1000_set_phys_id(struct net_device *netdev,
1716*4882a593Smuzhiyun enum ethtool_phys_id_state state)
1717*4882a593Smuzhiyun {
1718*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1719*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun switch (state) {
1722*4882a593Smuzhiyun case ETHTOOL_ID_ACTIVE:
1723*4882a593Smuzhiyun e1000_setup_led(hw);
1724*4882a593Smuzhiyun return 2;
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun case ETHTOOL_ID_ON:
1727*4882a593Smuzhiyun e1000_led_on(hw);
1728*4882a593Smuzhiyun break;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun case ETHTOOL_ID_OFF:
1731*4882a593Smuzhiyun e1000_led_off(hw);
1732*4882a593Smuzhiyun break;
1733*4882a593Smuzhiyun
1734*4882a593Smuzhiyun case ETHTOOL_ID_INACTIVE:
1735*4882a593Smuzhiyun e1000_cleanup_led(hw);
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun return 0;
1739*4882a593Smuzhiyun }
1740*4882a593Smuzhiyun
e1000_get_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)1741*4882a593Smuzhiyun static int e1000_get_coalesce(struct net_device *netdev,
1742*4882a593Smuzhiyun struct ethtool_coalesce *ec)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1745*4882a593Smuzhiyun
1746*4882a593Smuzhiyun if (adapter->hw.mac_type < e1000_82545)
1747*4882a593Smuzhiyun return -EOPNOTSUPP;
1748*4882a593Smuzhiyun
1749*4882a593Smuzhiyun if (adapter->itr_setting <= 4)
1750*4882a593Smuzhiyun ec->rx_coalesce_usecs = adapter->itr_setting;
1751*4882a593Smuzhiyun else
1752*4882a593Smuzhiyun ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
1753*4882a593Smuzhiyun
1754*4882a593Smuzhiyun return 0;
1755*4882a593Smuzhiyun }
1756*4882a593Smuzhiyun
e1000_set_coalesce(struct net_device * netdev,struct ethtool_coalesce * ec)1757*4882a593Smuzhiyun static int e1000_set_coalesce(struct net_device *netdev,
1758*4882a593Smuzhiyun struct ethtool_coalesce *ec)
1759*4882a593Smuzhiyun {
1760*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1761*4882a593Smuzhiyun struct e1000_hw *hw = &adapter->hw;
1762*4882a593Smuzhiyun
1763*4882a593Smuzhiyun if (hw->mac_type < e1000_82545)
1764*4882a593Smuzhiyun return -EOPNOTSUPP;
1765*4882a593Smuzhiyun
1766*4882a593Smuzhiyun if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
1767*4882a593Smuzhiyun ((ec->rx_coalesce_usecs > 4) &&
1768*4882a593Smuzhiyun (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
1769*4882a593Smuzhiyun (ec->rx_coalesce_usecs == 2))
1770*4882a593Smuzhiyun return -EINVAL;
1771*4882a593Smuzhiyun
1772*4882a593Smuzhiyun if (ec->rx_coalesce_usecs == 4) {
1773*4882a593Smuzhiyun adapter->itr = adapter->itr_setting = 4;
1774*4882a593Smuzhiyun } else if (ec->rx_coalesce_usecs <= 3) {
1775*4882a593Smuzhiyun adapter->itr = 20000;
1776*4882a593Smuzhiyun adapter->itr_setting = ec->rx_coalesce_usecs;
1777*4882a593Smuzhiyun } else {
1778*4882a593Smuzhiyun adapter->itr = (1000000 / ec->rx_coalesce_usecs);
1779*4882a593Smuzhiyun adapter->itr_setting = adapter->itr & ~3;
1780*4882a593Smuzhiyun }
1781*4882a593Smuzhiyun
1782*4882a593Smuzhiyun if (adapter->itr_setting != 0)
1783*4882a593Smuzhiyun ew32(ITR, 1000000000 / (adapter->itr * 256));
1784*4882a593Smuzhiyun else
1785*4882a593Smuzhiyun ew32(ITR, 0);
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun return 0;
1788*4882a593Smuzhiyun }
1789*4882a593Smuzhiyun
e1000_nway_reset(struct net_device * netdev)1790*4882a593Smuzhiyun static int e1000_nway_reset(struct net_device *netdev)
1791*4882a593Smuzhiyun {
1792*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1793*4882a593Smuzhiyun
1794*4882a593Smuzhiyun if (netif_running(netdev))
1795*4882a593Smuzhiyun e1000_reinit_locked(adapter);
1796*4882a593Smuzhiyun return 0;
1797*4882a593Smuzhiyun }
1798*4882a593Smuzhiyun
e1000_get_ethtool_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1799*4882a593Smuzhiyun static void e1000_get_ethtool_stats(struct net_device *netdev,
1800*4882a593Smuzhiyun struct ethtool_stats *stats, u64 *data)
1801*4882a593Smuzhiyun {
1802*4882a593Smuzhiyun struct e1000_adapter *adapter = netdev_priv(netdev);
1803*4882a593Smuzhiyun int i;
1804*4882a593Smuzhiyun const struct e1000_stats *stat = e1000_gstrings_stats;
1805*4882a593Smuzhiyun
1806*4882a593Smuzhiyun e1000_update_stats(adapter);
1807*4882a593Smuzhiyun for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
1808*4882a593Smuzhiyun char *p;
1809*4882a593Smuzhiyun
1810*4882a593Smuzhiyun switch (stat->type) {
1811*4882a593Smuzhiyun case NETDEV_STATS:
1812*4882a593Smuzhiyun p = (char *)netdev + stat->stat_offset;
1813*4882a593Smuzhiyun break;
1814*4882a593Smuzhiyun case E1000_STATS:
1815*4882a593Smuzhiyun p = (char *)adapter + stat->stat_offset;
1816*4882a593Smuzhiyun break;
1817*4882a593Smuzhiyun default:
1818*4882a593Smuzhiyun netdev_WARN_ONCE(netdev, "Invalid E1000 stat type: %u index %d\n",
1819*4882a593Smuzhiyun stat->type, i);
1820*4882a593Smuzhiyun continue;
1821*4882a593Smuzhiyun }
1822*4882a593Smuzhiyun
1823*4882a593Smuzhiyun if (stat->sizeof_stat == sizeof(u64))
1824*4882a593Smuzhiyun data[i] = *(u64 *)p;
1825*4882a593Smuzhiyun else
1826*4882a593Smuzhiyun data[i] = *(u32 *)p;
1827*4882a593Smuzhiyun }
1828*4882a593Smuzhiyun /* BUG_ON(i != E1000_STATS_LEN); */
1829*4882a593Smuzhiyun }
1830*4882a593Smuzhiyun
e1000_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1831*4882a593Smuzhiyun static void e1000_get_strings(struct net_device *netdev, u32 stringset,
1832*4882a593Smuzhiyun u8 *data)
1833*4882a593Smuzhiyun {
1834*4882a593Smuzhiyun u8 *p = data;
1835*4882a593Smuzhiyun int i;
1836*4882a593Smuzhiyun
1837*4882a593Smuzhiyun switch (stringset) {
1838*4882a593Smuzhiyun case ETH_SS_TEST:
1839*4882a593Smuzhiyun memcpy(data, e1000_gstrings_test, sizeof(e1000_gstrings_test));
1840*4882a593Smuzhiyun break;
1841*4882a593Smuzhiyun case ETH_SS_STATS:
1842*4882a593Smuzhiyun for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1843*4882a593Smuzhiyun memcpy(p, e1000_gstrings_stats[i].stat_string,
1844*4882a593Smuzhiyun ETH_GSTRING_LEN);
1845*4882a593Smuzhiyun p += ETH_GSTRING_LEN;
1846*4882a593Smuzhiyun }
1847*4882a593Smuzhiyun /* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
1848*4882a593Smuzhiyun break;
1849*4882a593Smuzhiyun }
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun static const struct ethtool_ops e1000_ethtool_ops = {
1853*4882a593Smuzhiyun .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
1854*4882a593Smuzhiyun .get_drvinfo = e1000_get_drvinfo,
1855*4882a593Smuzhiyun .get_regs_len = e1000_get_regs_len,
1856*4882a593Smuzhiyun .get_regs = e1000_get_regs,
1857*4882a593Smuzhiyun .get_wol = e1000_get_wol,
1858*4882a593Smuzhiyun .set_wol = e1000_set_wol,
1859*4882a593Smuzhiyun .get_msglevel = e1000_get_msglevel,
1860*4882a593Smuzhiyun .set_msglevel = e1000_set_msglevel,
1861*4882a593Smuzhiyun .nway_reset = e1000_nway_reset,
1862*4882a593Smuzhiyun .get_link = e1000_get_link,
1863*4882a593Smuzhiyun .get_eeprom_len = e1000_get_eeprom_len,
1864*4882a593Smuzhiyun .get_eeprom = e1000_get_eeprom,
1865*4882a593Smuzhiyun .set_eeprom = e1000_set_eeprom,
1866*4882a593Smuzhiyun .get_ringparam = e1000_get_ringparam,
1867*4882a593Smuzhiyun .set_ringparam = e1000_set_ringparam,
1868*4882a593Smuzhiyun .get_pauseparam = e1000_get_pauseparam,
1869*4882a593Smuzhiyun .set_pauseparam = e1000_set_pauseparam,
1870*4882a593Smuzhiyun .self_test = e1000_diag_test,
1871*4882a593Smuzhiyun .get_strings = e1000_get_strings,
1872*4882a593Smuzhiyun .set_phys_id = e1000_set_phys_id,
1873*4882a593Smuzhiyun .get_ethtool_stats = e1000_get_ethtool_stats,
1874*4882a593Smuzhiyun .get_sset_count = e1000_get_sset_count,
1875*4882a593Smuzhiyun .get_coalesce = e1000_get_coalesce,
1876*4882a593Smuzhiyun .set_coalesce = e1000_set_coalesce,
1877*4882a593Smuzhiyun .get_ts_info = ethtool_op_get_ts_info,
1878*4882a593Smuzhiyun .get_link_ksettings = e1000_get_link_ksettings,
1879*4882a593Smuzhiyun .set_link_ksettings = e1000_set_link_ksettings,
1880*4882a593Smuzhiyun };
1881*4882a593Smuzhiyun
e1000_set_ethtool_ops(struct net_device * netdev)1882*4882a593Smuzhiyun void e1000_set_ethtool_ops(struct net_device *netdev)
1883*4882a593Smuzhiyun {
1884*4882a593Smuzhiyun netdev->ethtool_ops = &e1000_ethtool_ops;
1885*4882a593Smuzhiyun }
1886