1*4882a593Smuzhiyun /*****************************************************************************
2*4882a593Smuzhiyun * *
3*4882a593Smuzhiyun * File: cxgb2.c *
4*4882a593Smuzhiyun * $Revision: 1.25 $ *
5*4882a593Smuzhiyun * $Date: 2005/06/22 00:43:25 $ *
6*4882a593Smuzhiyun * Description: *
7*4882a593Smuzhiyun * Chelsio 10Gb Ethernet Driver. *
8*4882a593Smuzhiyun * *
9*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify *
10*4882a593Smuzhiyun * it under the terms of the GNU General Public License, version 2, as *
11*4882a593Smuzhiyun * published by the Free Software Foundation. *
12*4882a593Smuzhiyun * *
13*4882a593Smuzhiyun * You should have received a copy of the GNU General Public License along *
14*4882a593Smuzhiyun * with this program; if not, see <http://www.gnu.org/licenses/>. *
15*4882a593Smuzhiyun * *
16*4882a593Smuzhiyun * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
17*4882a593Smuzhiyun * WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
18*4882a593Smuzhiyun * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
19*4882a593Smuzhiyun * *
20*4882a593Smuzhiyun * http://www.chelsio.com *
21*4882a593Smuzhiyun * *
22*4882a593Smuzhiyun * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
23*4882a593Smuzhiyun * All rights reserved. *
24*4882a593Smuzhiyun * *
25*4882a593Smuzhiyun * Maintainers: maintainers@chelsio.com *
26*4882a593Smuzhiyun * *
27*4882a593Smuzhiyun * Authors: Dimitrios Michailidis <dm@chelsio.com> *
28*4882a593Smuzhiyun * Tina Yang <tainay@chelsio.com> *
29*4882a593Smuzhiyun * Felix Marti <felix@chelsio.com> *
30*4882a593Smuzhiyun * Scott Bardone <sbardone@chelsio.com> *
31*4882a593Smuzhiyun * Kurt Ottaway <kottaway@chelsio.com> *
32*4882a593Smuzhiyun * Frank DiMambro <frank@chelsio.com> *
33*4882a593Smuzhiyun * *
34*4882a593Smuzhiyun * History: *
35*4882a593Smuzhiyun * *
36*4882a593Smuzhiyun ****************************************************************************/
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun #include "common.h"
39*4882a593Smuzhiyun #include <linux/module.h>
40*4882a593Smuzhiyun #include <linux/pci.h>
41*4882a593Smuzhiyun #include <linux/netdevice.h>
42*4882a593Smuzhiyun #include <linux/etherdevice.h>
43*4882a593Smuzhiyun #include <linux/if_vlan.h>
44*4882a593Smuzhiyun #include <linux/mii.h>
45*4882a593Smuzhiyun #include <linux/sockios.h>
46*4882a593Smuzhiyun #include <linux/dma-mapping.h>
47*4882a593Smuzhiyun #include <linux/uaccess.h>
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun #include "cpl5_cmd.h"
50*4882a593Smuzhiyun #include "regs.h"
51*4882a593Smuzhiyun #include "gmac.h"
52*4882a593Smuzhiyun #include "cphy.h"
53*4882a593Smuzhiyun #include "sge.h"
54*4882a593Smuzhiyun #include "tp.h"
55*4882a593Smuzhiyun #include "espi.h"
56*4882a593Smuzhiyun #include "elmer0.h"
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun #include <linux/workqueue.h>
59*4882a593Smuzhiyun
schedule_mac_stats_update(struct adapter * ap,int secs)60*4882a593Smuzhiyun static inline void schedule_mac_stats_update(struct adapter *ap, int secs)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun schedule_delayed_work(&ap->stats_update_task, secs * HZ);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
cancel_mac_stats_update(struct adapter * ap)65*4882a593Smuzhiyun static inline void cancel_mac_stats_update(struct adapter *ap)
66*4882a593Smuzhiyun {
67*4882a593Smuzhiyun cancel_delayed_work(&ap->stats_update_task);
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define MAX_CMDQ_ENTRIES 16384
71*4882a593Smuzhiyun #define MAX_CMDQ1_ENTRIES 1024
72*4882a593Smuzhiyun #define MAX_RX_BUFFERS 16384
73*4882a593Smuzhiyun #define MAX_RX_JUMBO_BUFFERS 16384
74*4882a593Smuzhiyun #define MAX_TX_BUFFERS_HIGH 16384U
75*4882a593Smuzhiyun #define MAX_TX_BUFFERS_LOW 1536U
76*4882a593Smuzhiyun #define MAX_TX_BUFFERS 1460U
77*4882a593Smuzhiyun #define MIN_FL_ENTRIES 32
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
80*4882a593Smuzhiyun NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
81*4882a593Smuzhiyun NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /*
84*4882a593Smuzhiyun * The EEPROM is actually bigger but only the first few bytes are used so we
85*4882a593Smuzhiyun * only report those.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun #define EEPROM_SIZE 32
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun MODULE_DESCRIPTION(DRV_DESCRIPTION);
90*4882a593Smuzhiyun MODULE_AUTHOR("Chelsio Communications");
91*4882a593Smuzhiyun MODULE_LICENSE("GPL");
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static int dflt_msg_enable = DFLT_MSG_ENABLE;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun module_param(dflt_msg_enable, int, 0);
96*4882a593Smuzhiyun MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T1 default message enable bitmap");
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun #define HCLOCK 0x0
99*4882a593Smuzhiyun #define LCLOCK 0x1
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* T1 cards powersave mode */
102*4882a593Smuzhiyun static int t1_clock(struct adapter *adapter, int mode);
103*4882a593Smuzhiyun static int t1powersave = 1; /* HW default is powersave mode. */
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun module_param(t1powersave, int, 0);
106*4882a593Smuzhiyun MODULE_PARM_DESC(t1powersave, "Enable/Disable T1 powersaving mode");
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun static int disable_msi = 0;
109*4882a593Smuzhiyun module_param(disable_msi, int, 0);
110*4882a593Smuzhiyun MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun /*
113*4882a593Smuzhiyun * Setup MAC to receive the types of packets we want.
114*4882a593Smuzhiyun */
t1_set_rxmode(struct net_device * dev)115*4882a593Smuzhiyun static void t1_set_rxmode(struct net_device *dev)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
118*4882a593Smuzhiyun struct cmac *mac = adapter->port[dev->if_port].mac;
119*4882a593Smuzhiyun struct t1_rx_mode rm;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun rm.dev = dev;
122*4882a593Smuzhiyun mac->ops->set_rx_mode(mac, &rm);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
link_report(struct port_info * p)125*4882a593Smuzhiyun static void link_report(struct port_info *p)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun if (!netif_carrier_ok(p->dev))
128*4882a593Smuzhiyun netdev_info(p->dev, "link down\n");
129*4882a593Smuzhiyun else {
130*4882a593Smuzhiyun const char *s = "10Mbps";
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun switch (p->link_config.speed) {
133*4882a593Smuzhiyun case SPEED_10000: s = "10Gbps"; break;
134*4882a593Smuzhiyun case SPEED_1000: s = "1000Mbps"; break;
135*4882a593Smuzhiyun case SPEED_100: s = "100Mbps"; break;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun netdev_info(p->dev, "link up, %s, %s-duplex\n",
139*4882a593Smuzhiyun s, p->link_config.duplex == DUPLEX_FULL
140*4882a593Smuzhiyun ? "full" : "half");
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
t1_link_negotiated(struct adapter * adapter,int port_id,int link_stat,int speed,int duplex,int pause)144*4882a593Smuzhiyun void t1_link_negotiated(struct adapter *adapter, int port_id, int link_stat,
145*4882a593Smuzhiyun int speed, int duplex, int pause)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct port_info *p = &adapter->port[port_id];
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun if (link_stat != netif_carrier_ok(p->dev)) {
150*4882a593Smuzhiyun if (link_stat)
151*4882a593Smuzhiyun netif_carrier_on(p->dev);
152*4882a593Smuzhiyun else
153*4882a593Smuzhiyun netif_carrier_off(p->dev);
154*4882a593Smuzhiyun link_report(p);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* multi-ports: inform toe */
157*4882a593Smuzhiyun if ((speed > 0) && (adapter->params.nports > 1)) {
158*4882a593Smuzhiyun unsigned int sched_speed = 10;
159*4882a593Smuzhiyun switch (speed) {
160*4882a593Smuzhiyun case SPEED_1000:
161*4882a593Smuzhiyun sched_speed = 1000;
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun case SPEED_100:
164*4882a593Smuzhiyun sched_speed = 100;
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun case SPEED_10:
167*4882a593Smuzhiyun sched_speed = 10;
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun t1_sched_update_parms(adapter->sge, port_id, 0, sched_speed);
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
link_start(struct port_info * p)175*4882a593Smuzhiyun static void link_start(struct port_info *p)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct cmac *mac = p->mac;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun mac->ops->reset(mac);
180*4882a593Smuzhiyun if (mac->ops->macaddress_set)
181*4882a593Smuzhiyun mac->ops->macaddress_set(mac, p->dev->dev_addr);
182*4882a593Smuzhiyun t1_set_rxmode(p->dev);
183*4882a593Smuzhiyun t1_link_start(p->phy, mac, &p->link_config);
184*4882a593Smuzhiyun mac->ops->enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
enable_hw_csum(struct adapter * adapter)187*4882a593Smuzhiyun static void enable_hw_csum(struct adapter *adapter)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun if (adapter->port[0].dev->hw_features & NETIF_F_TSO)
190*4882a593Smuzhiyun t1_tp_set_ip_checksum_offload(adapter->tp, 1); /* for TSO only */
191*4882a593Smuzhiyun t1_tp_set_tcp_checksum_offload(adapter->tp, 1);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /*
195*4882a593Smuzhiyun * Things to do upon first use of a card.
196*4882a593Smuzhiyun * This must run with the rtnl lock held.
197*4882a593Smuzhiyun */
cxgb_up(struct adapter * adapter)198*4882a593Smuzhiyun static int cxgb_up(struct adapter *adapter)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun int err = 0;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun if (!(adapter->flags & FULL_INIT_DONE)) {
203*4882a593Smuzhiyun err = t1_init_hw_modules(adapter);
204*4882a593Smuzhiyun if (err)
205*4882a593Smuzhiyun goto out_err;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun enable_hw_csum(adapter);
208*4882a593Smuzhiyun adapter->flags |= FULL_INIT_DONE;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun t1_interrupts_clear(adapter);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun adapter->params.has_msi = !disable_msi && !pci_enable_msi(adapter->pdev);
214*4882a593Smuzhiyun err = request_irq(adapter->pdev->irq, t1_interrupt,
215*4882a593Smuzhiyun adapter->params.has_msi ? 0 : IRQF_SHARED,
216*4882a593Smuzhiyun adapter->name, adapter);
217*4882a593Smuzhiyun if (err) {
218*4882a593Smuzhiyun if (adapter->params.has_msi)
219*4882a593Smuzhiyun pci_disable_msi(adapter->pdev);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun goto out_err;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun t1_sge_start(adapter->sge);
225*4882a593Smuzhiyun t1_interrupts_enable(adapter);
226*4882a593Smuzhiyun out_err:
227*4882a593Smuzhiyun return err;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun /*
231*4882a593Smuzhiyun * Release resources when all the ports have been stopped.
232*4882a593Smuzhiyun */
cxgb_down(struct adapter * adapter)233*4882a593Smuzhiyun static void cxgb_down(struct adapter *adapter)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun t1_sge_stop(adapter->sge);
236*4882a593Smuzhiyun t1_interrupts_disable(adapter);
237*4882a593Smuzhiyun free_irq(adapter->pdev->irq, adapter);
238*4882a593Smuzhiyun if (adapter->params.has_msi)
239*4882a593Smuzhiyun pci_disable_msi(adapter->pdev);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
cxgb_open(struct net_device * dev)242*4882a593Smuzhiyun static int cxgb_open(struct net_device *dev)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun int err;
245*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
246*4882a593Smuzhiyun int other_ports = adapter->open_device_map & PORT_MASK;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun napi_enable(&adapter->napi);
249*4882a593Smuzhiyun if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0) {
250*4882a593Smuzhiyun napi_disable(&adapter->napi);
251*4882a593Smuzhiyun return err;
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun __set_bit(dev->if_port, &adapter->open_device_map);
255*4882a593Smuzhiyun link_start(&adapter->port[dev->if_port]);
256*4882a593Smuzhiyun netif_start_queue(dev);
257*4882a593Smuzhiyun if (!other_ports && adapter->params.stats_update_period)
258*4882a593Smuzhiyun schedule_mac_stats_update(adapter,
259*4882a593Smuzhiyun adapter->params.stats_update_period);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun t1_vlan_mode(adapter, dev->features);
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
cxgb_close(struct net_device * dev)265*4882a593Smuzhiyun static int cxgb_close(struct net_device *dev)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
268*4882a593Smuzhiyun struct port_info *p = &adapter->port[dev->if_port];
269*4882a593Smuzhiyun struct cmac *mac = p->mac;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun netif_stop_queue(dev);
272*4882a593Smuzhiyun napi_disable(&adapter->napi);
273*4882a593Smuzhiyun mac->ops->disable(mac, MAC_DIRECTION_TX | MAC_DIRECTION_RX);
274*4882a593Smuzhiyun netif_carrier_off(dev);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun clear_bit(dev->if_port, &adapter->open_device_map);
277*4882a593Smuzhiyun if (adapter->params.stats_update_period &&
278*4882a593Smuzhiyun !(adapter->open_device_map & PORT_MASK)) {
279*4882a593Smuzhiyun /* Stop statistics accumulation. */
280*4882a593Smuzhiyun smp_mb__after_atomic();
281*4882a593Smuzhiyun spin_lock(&adapter->work_lock); /* sync with update task */
282*4882a593Smuzhiyun spin_unlock(&adapter->work_lock);
283*4882a593Smuzhiyun cancel_mac_stats_update(adapter);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (!adapter->open_device_map)
287*4882a593Smuzhiyun cxgb_down(adapter);
288*4882a593Smuzhiyun return 0;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
t1_get_stats(struct net_device * dev)291*4882a593Smuzhiyun static struct net_device_stats *t1_get_stats(struct net_device *dev)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
294*4882a593Smuzhiyun struct port_info *p = &adapter->port[dev->if_port];
295*4882a593Smuzhiyun struct net_device_stats *ns = &dev->stats;
296*4882a593Smuzhiyun const struct cmac_statistics *pstats;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* Do a full update of the MAC stats */
299*4882a593Smuzhiyun pstats = p->mac->ops->statistics_update(p->mac,
300*4882a593Smuzhiyun MAC_STATS_UPDATE_FULL);
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun ns->tx_packets = pstats->TxUnicastFramesOK +
303*4882a593Smuzhiyun pstats->TxMulticastFramesOK + pstats->TxBroadcastFramesOK;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ns->rx_packets = pstats->RxUnicastFramesOK +
306*4882a593Smuzhiyun pstats->RxMulticastFramesOK + pstats->RxBroadcastFramesOK;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun ns->tx_bytes = pstats->TxOctetsOK;
309*4882a593Smuzhiyun ns->rx_bytes = pstats->RxOctetsOK;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun ns->tx_errors = pstats->TxLateCollisions + pstats->TxLengthErrors +
312*4882a593Smuzhiyun pstats->TxUnderrun + pstats->TxFramesAbortedDueToXSCollisions;
313*4882a593Smuzhiyun ns->rx_errors = pstats->RxDataErrors + pstats->RxJabberErrors +
314*4882a593Smuzhiyun pstats->RxFCSErrors + pstats->RxAlignErrors +
315*4882a593Smuzhiyun pstats->RxSequenceErrors + pstats->RxFrameTooLongErrors +
316*4882a593Smuzhiyun pstats->RxSymbolErrors + pstats->RxRuntErrors;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun ns->multicast = pstats->RxMulticastFramesOK;
319*4882a593Smuzhiyun ns->collisions = pstats->TxTotalCollisions;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* detailed rx_errors */
322*4882a593Smuzhiyun ns->rx_length_errors = pstats->RxFrameTooLongErrors +
323*4882a593Smuzhiyun pstats->RxJabberErrors;
324*4882a593Smuzhiyun ns->rx_over_errors = 0;
325*4882a593Smuzhiyun ns->rx_crc_errors = pstats->RxFCSErrors;
326*4882a593Smuzhiyun ns->rx_frame_errors = pstats->RxAlignErrors;
327*4882a593Smuzhiyun ns->rx_fifo_errors = 0;
328*4882a593Smuzhiyun ns->rx_missed_errors = 0;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* detailed tx_errors */
331*4882a593Smuzhiyun ns->tx_aborted_errors = pstats->TxFramesAbortedDueToXSCollisions;
332*4882a593Smuzhiyun ns->tx_carrier_errors = 0;
333*4882a593Smuzhiyun ns->tx_fifo_errors = pstats->TxUnderrun;
334*4882a593Smuzhiyun ns->tx_heartbeat_errors = 0;
335*4882a593Smuzhiyun ns->tx_window_errors = pstats->TxLateCollisions;
336*4882a593Smuzhiyun return ns;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
get_msglevel(struct net_device * dev)339*4882a593Smuzhiyun static u32 get_msglevel(struct net_device *dev)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun return adapter->msg_enable;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
set_msglevel(struct net_device * dev,u32 val)346*4882a593Smuzhiyun static void set_msglevel(struct net_device *dev, u32 val)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun adapter->msg_enable = val;
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun static const char stats_strings[][ETH_GSTRING_LEN] = {
354*4882a593Smuzhiyun "TxOctetsOK",
355*4882a593Smuzhiyun "TxOctetsBad",
356*4882a593Smuzhiyun "TxUnicastFramesOK",
357*4882a593Smuzhiyun "TxMulticastFramesOK",
358*4882a593Smuzhiyun "TxBroadcastFramesOK",
359*4882a593Smuzhiyun "TxPauseFrames",
360*4882a593Smuzhiyun "TxFramesWithDeferredXmissions",
361*4882a593Smuzhiyun "TxLateCollisions",
362*4882a593Smuzhiyun "TxTotalCollisions",
363*4882a593Smuzhiyun "TxFramesAbortedDueToXSCollisions",
364*4882a593Smuzhiyun "TxUnderrun",
365*4882a593Smuzhiyun "TxLengthErrors",
366*4882a593Smuzhiyun "TxInternalMACXmitError",
367*4882a593Smuzhiyun "TxFramesWithExcessiveDeferral",
368*4882a593Smuzhiyun "TxFCSErrors",
369*4882a593Smuzhiyun "TxJumboFramesOk",
370*4882a593Smuzhiyun "TxJumboOctetsOk",
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun "RxOctetsOK",
373*4882a593Smuzhiyun "RxOctetsBad",
374*4882a593Smuzhiyun "RxUnicastFramesOK",
375*4882a593Smuzhiyun "RxMulticastFramesOK",
376*4882a593Smuzhiyun "RxBroadcastFramesOK",
377*4882a593Smuzhiyun "RxPauseFrames",
378*4882a593Smuzhiyun "RxFCSErrors",
379*4882a593Smuzhiyun "RxAlignErrors",
380*4882a593Smuzhiyun "RxSymbolErrors",
381*4882a593Smuzhiyun "RxDataErrors",
382*4882a593Smuzhiyun "RxSequenceErrors",
383*4882a593Smuzhiyun "RxRuntErrors",
384*4882a593Smuzhiyun "RxJabberErrors",
385*4882a593Smuzhiyun "RxInternalMACRcvError",
386*4882a593Smuzhiyun "RxInRangeLengthErrors",
387*4882a593Smuzhiyun "RxOutOfRangeLengthField",
388*4882a593Smuzhiyun "RxFrameTooLongErrors",
389*4882a593Smuzhiyun "RxJumboFramesOk",
390*4882a593Smuzhiyun "RxJumboOctetsOk",
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun /* Port stats */
393*4882a593Smuzhiyun "RxCsumGood",
394*4882a593Smuzhiyun "TxCsumOffload",
395*4882a593Smuzhiyun "TxTso",
396*4882a593Smuzhiyun "RxVlan",
397*4882a593Smuzhiyun "TxVlan",
398*4882a593Smuzhiyun "TxNeedHeadroom",
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun /* Interrupt stats */
401*4882a593Smuzhiyun "rx drops",
402*4882a593Smuzhiyun "pure_rsps",
403*4882a593Smuzhiyun "unhandled irqs",
404*4882a593Smuzhiyun "respQ_empty",
405*4882a593Smuzhiyun "respQ_overflow",
406*4882a593Smuzhiyun "freelistQ_empty",
407*4882a593Smuzhiyun "pkt_too_big",
408*4882a593Smuzhiyun "pkt_mismatch",
409*4882a593Smuzhiyun "cmdQ_full0",
410*4882a593Smuzhiyun "cmdQ_full1",
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun "espi_DIP2ParityErr",
413*4882a593Smuzhiyun "espi_DIP4Err",
414*4882a593Smuzhiyun "espi_RxDrops",
415*4882a593Smuzhiyun "espi_TxDrops",
416*4882a593Smuzhiyun "espi_RxOvfl",
417*4882a593Smuzhiyun "espi_ParityErr"
418*4882a593Smuzhiyun };
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun #define T2_REGMAP_SIZE (3 * 1024)
421*4882a593Smuzhiyun
get_regs_len(struct net_device * dev)422*4882a593Smuzhiyun static int get_regs_len(struct net_device *dev)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun return T2_REGMAP_SIZE;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)427*4882a593Smuzhiyun static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
432*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(adapter->pdev),
433*4882a593Smuzhiyun sizeof(info->bus_info));
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
get_sset_count(struct net_device * dev,int sset)436*4882a593Smuzhiyun static int get_sset_count(struct net_device *dev, int sset)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun switch (sset) {
439*4882a593Smuzhiyun case ETH_SS_STATS:
440*4882a593Smuzhiyun return ARRAY_SIZE(stats_strings);
441*4882a593Smuzhiyun default:
442*4882a593Smuzhiyun return -EOPNOTSUPP;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
get_strings(struct net_device * dev,u32 stringset,u8 * data)446*4882a593Smuzhiyun static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun if (stringset == ETH_SS_STATS)
449*4882a593Smuzhiyun memcpy(data, stats_strings, sizeof(stats_strings));
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)452*4882a593Smuzhiyun static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
453*4882a593Smuzhiyun u64 *data)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
456*4882a593Smuzhiyun struct cmac *mac = adapter->port[dev->if_port].mac;
457*4882a593Smuzhiyun const struct cmac_statistics *s;
458*4882a593Smuzhiyun const struct sge_intr_counts *t;
459*4882a593Smuzhiyun struct sge_port_stats ss;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun s = mac->ops->statistics_update(mac, MAC_STATS_UPDATE_FULL);
462*4882a593Smuzhiyun t = t1_sge_get_intr_counts(adapter->sge);
463*4882a593Smuzhiyun t1_sge_get_port_stats(adapter->sge, dev->if_port, &ss);
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun *data++ = s->TxOctetsOK;
466*4882a593Smuzhiyun *data++ = s->TxOctetsBad;
467*4882a593Smuzhiyun *data++ = s->TxUnicastFramesOK;
468*4882a593Smuzhiyun *data++ = s->TxMulticastFramesOK;
469*4882a593Smuzhiyun *data++ = s->TxBroadcastFramesOK;
470*4882a593Smuzhiyun *data++ = s->TxPauseFrames;
471*4882a593Smuzhiyun *data++ = s->TxFramesWithDeferredXmissions;
472*4882a593Smuzhiyun *data++ = s->TxLateCollisions;
473*4882a593Smuzhiyun *data++ = s->TxTotalCollisions;
474*4882a593Smuzhiyun *data++ = s->TxFramesAbortedDueToXSCollisions;
475*4882a593Smuzhiyun *data++ = s->TxUnderrun;
476*4882a593Smuzhiyun *data++ = s->TxLengthErrors;
477*4882a593Smuzhiyun *data++ = s->TxInternalMACXmitError;
478*4882a593Smuzhiyun *data++ = s->TxFramesWithExcessiveDeferral;
479*4882a593Smuzhiyun *data++ = s->TxFCSErrors;
480*4882a593Smuzhiyun *data++ = s->TxJumboFramesOK;
481*4882a593Smuzhiyun *data++ = s->TxJumboOctetsOK;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun *data++ = s->RxOctetsOK;
484*4882a593Smuzhiyun *data++ = s->RxOctetsBad;
485*4882a593Smuzhiyun *data++ = s->RxUnicastFramesOK;
486*4882a593Smuzhiyun *data++ = s->RxMulticastFramesOK;
487*4882a593Smuzhiyun *data++ = s->RxBroadcastFramesOK;
488*4882a593Smuzhiyun *data++ = s->RxPauseFrames;
489*4882a593Smuzhiyun *data++ = s->RxFCSErrors;
490*4882a593Smuzhiyun *data++ = s->RxAlignErrors;
491*4882a593Smuzhiyun *data++ = s->RxSymbolErrors;
492*4882a593Smuzhiyun *data++ = s->RxDataErrors;
493*4882a593Smuzhiyun *data++ = s->RxSequenceErrors;
494*4882a593Smuzhiyun *data++ = s->RxRuntErrors;
495*4882a593Smuzhiyun *data++ = s->RxJabberErrors;
496*4882a593Smuzhiyun *data++ = s->RxInternalMACRcvError;
497*4882a593Smuzhiyun *data++ = s->RxInRangeLengthErrors;
498*4882a593Smuzhiyun *data++ = s->RxOutOfRangeLengthField;
499*4882a593Smuzhiyun *data++ = s->RxFrameTooLongErrors;
500*4882a593Smuzhiyun *data++ = s->RxJumboFramesOK;
501*4882a593Smuzhiyun *data++ = s->RxJumboOctetsOK;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun *data++ = ss.rx_cso_good;
504*4882a593Smuzhiyun *data++ = ss.tx_cso;
505*4882a593Smuzhiyun *data++ = ss.tx_tso;
506*4882a593Smuzhiyun *data++ = ss.vlan_xtract;
507*4882a593Smuzhiyun *data++ = ss.vlan_insert;
508*4882a593Smuzhiyun *data++ = ss.tx_need_hdrroom;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun *data++ = t->rx_drops;
511*4882a593Smuzhiyun *data++ = t->pure_rsps;
512*4882a593Smuzhiyun *data++ = t->unhandled_irqs;
513*4882a593Smuzhiyun *data++ = t->respQ_empty;
514*4882a593Smuzhiyun *data++ = t->respQ_overflow;
515*4882a593Smuzhiyun *data++ = t->freelistQ_empty;
516*4882a593Smuzhiyun *data++ = t->pkt_too_big;
517*4882a593Smuzhiyun *data++ = t->pkt_mismatch;
518*4882a593Smuzhiyun *data++ = t->cmdQ_full[0];
519*4882a593Smuzhiyun *data++ = t->cmdQ_full[1];
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun if (adapter->espi) {
522*4882a593Smuzhiyun const struct espi_intr_counts *e;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun e = t1_espi_get_intr_counts(adapter->espi);
525*4882a593Smuzhiyun *data++ = e->DIP2_parity_err;
526*4882a593Smuzhiyun *data++ = e->DIP4_err;
527*4882a593Smuzhiyun *data++ = e->rx_drops;
528*4882a593Smuzhiyun *data++ = e->tx_drops;
529*4882a593Smuzhiyun *data++ = e->rx_ovflw;
530*4882a593Smuzhiyun *data++ = e->parity_err;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun }
533*4882a593Smuzhiyun
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)534*4882a593Smuzhiyun static inline void reg_block_dump(struct adapter *ap, void *buf,
535*4882a593Smuzhiyun unsigned int start, unsigned int end)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun u32 *p = buf + start;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun for ( ; start <= end; start += sizeof(u32))
540*4882a593Smuzhiyun *p++ = readl(ap->regs + start);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)543*4882a593Smuzhiyun static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
544*4882a593Smuzhiyun void *buf)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun struct adapter *ap = dev->ml_priv;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /*
549*4882a593Smuzhiyun * Version scheme: bits 0..9: chip version, bits 10..15: chip revision
550*4882a593Smuzhiyun */
551*4882a593Smuzhiyun regs->version = 2;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun memset(buf, 0, T2_REGMAP_SIZE);
554*4882a593Smuzhiyun reg_block_dump(ap, buf, 0, A_SG_RESPACCUTIMER);
555*4882a593Smuzhiyun reg_block_dump(ap, buf, A_MC3_CFG, A_MC4_INT_CAUSE);
556*4882a593Smuzhiyun reg_block_dump(ap, buf, A_TPI_ADDR, A_TPI_PAR);
557*4882a593Smuzhiyun reg_block_dump(ap, buf, A_TP_IN_CONFIG, A_TP_TX_DROP_COUNT);
558*4882a593Smuzhiyun reg_block_dump(ap, buf, A_RAT_ROUTE_CONTROL, A_RAT_INTR_CAUSE);
559*4882a593Smuzhiyun reg_block_dump(ap, buf, A_CSPI_RX_AE_WM, A_CSPI_INTR_ENABLE);
560*4882a593Smuzhiyun reg_block_dump(ap, buf, A_ESPI_SCH_TOKEN0, A_ESPI_GOSTAT);
561*4882a593Smuzhiyun reg_block_dump(ap, buf, A_ULP_ULIMIT, A_ULP_PIO_CTRL);
562*4882a593Smuzhiyun reg_block_dump(ap, buf, A_PL_ENABLE, A_PL_CAUSE);
563*4882a593Smuzhiyun reg_block_dump(ap, buf, A_MC5_CONFIG, A_MC5_MASK_WRITE_CMD);
564*4882a593Smuzhiyun }
565*4882a593Smuzhiyun
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)566*4882a593Smuzhiyun static int get_link_ksettings(struct net_device *dev,
567*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
570*4882a593Smuzhiyun struct port_info *p = &adapter->port[dev->if_port];
571*4882a593Smuzhiyun u32 supported, advertising;
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun supported = p->link_config.supported;
574*4882a593Smuzhiyun advertising = p->link_config.advertising;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun if (netif_carrier_ok(dev)) {
577*4882a593Smuzhiyun cmd->base.speed = p->link_config.speed;
578*4882a593Smuzhiyun cmd->base.duplex = p->link_config.duplex;
579*4882a593Smuzhiyun } else {
580*4882a593Smuzhiyun cmd->base.speed = SPEED_UNKNOWN;
581*4882a593Smuzhiyun cmd->base.duplex = DUPLEX_UNKNOWN;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun cmd->base.port = (supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
585*4882a593Smuzhiyun cmd->base.phy_address = p->phy->mdio.prtad;
586*4882a593Smuzhiyun cmd->base.autoneg = p->link_config.autoneg;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
589*4882a593Smuzhiyun supported);
590*4882a593Smuzhiyun ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
591*4882a593Smuzhiyun advertising);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun return 0;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
speed_duplex_to_caps(int speed,int duplex)596*4882a593Smuzhiyun static int speed_duplex_to_caps(int speed, int duplex)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun int cap = 0;
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun switch (speed) {
601*4882a593Smuzhiyun case SPEED_10:
602*4882a593Smuzhiyun if (duplex == DUPLEX_FULL)
603*4882a593Smuzhiyun cap = SUPPORTED_10baseT_Full;
604*4882a593Smuzhiyun else
605*4882a593Smuzhiyun cap = SUPPORTED_10baseT_Half;
606*4882a593Smuzhiyun break;
607*4882a593Smuzhiyun case SPEED_100:
608*4882a593Smuzhiyun if (duplex == DUPLEX_FULL)
609*4882a593Smuzhiyun cap = SUPPORTED_100baseT_Full;
610*4882a593Smuzhiyun else
611*4882a593Smuzhiyun cap = SUPPORTED_100baseT_Half;
612*4882a593Smuzhiyun break;
613*4882a593Smuzhiyun case SPEED_1000:
614*4882a593Smuzhiyun if (duplex == DUPLEX_FULL)
615*4882a593Smuzhiyun cap = SUPPORTED_1000baseT_Full;
616*4882a593Smuzhiyun else
617*4882a593Smuzhiyun cap = SUPPORTED_1000baseT_Half;
618*4882a593Smuzhiyun break;
619*4882a593Smuzhiyun case SPEED_10000:
620*4882a593Smuzhiyun if (duplex == DUPLEX_FULL)
621*4882a593Smuzhiyun cap = SUPPORTED_10000baseT_Full;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun return cap;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun #define ADVERTISED_MASK (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
627*4882a593Smuzhiyun ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
628*4882a593Smuzhiyun ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full | \
629*4882a593Smuzhiyun ADVERTISED_10000baseT_Full)
630*4882a593Smuzhiyun
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)631*4882a593Smuzhiyun static int set_link_ksettings(struct net_device *dev,
632*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
633*4882a593Smuzhiyun {
634*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
635*4882a593Smuzhiyun struct port_info *p = &adapter->port[dev->if_port];
636*4882a593Smuzhiyun struct link_config *lc = &p->link_config;
637*4882a593Smuzhiyun u32 advertising;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun ethtool_convert_link_mode_to_legacy_u32(&advertising,
640*4882a593Smuzhiyun cmd->link_modes.advertising);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun if (!(lc->supported & SUPPORTED_Autoneg))
643*4882a593Smuzhiyun return -EOPNOTSUPP; /* can't change speed/duplex */
644*4882a593Smuzhiyun
645*4882a593Smuzhiyun if (cmd->base.autoneg == AUTONEG_DISABLE) {
646*4882a593Smuzhiyun u32 speed = cmd->base.speed;
647*4882a593Smuzhiyun int cap = speed_duplex_to_caps(speed, cmd->base.duplex);
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (!(lc->supported & cap) || (speed == SPEED_1000))
650*4882a593Smuzhiyun return -EINVAL;
651*4882a593Smuzhiyun lc->requested_speed = speed;
652*4882a593Smuzhiyun lc->requested_duplex = cmd->base.duplex;
653*4882a593Smuzhiyun lc->advertising = 0;
654*4882a593Smuzhiyun } else {
655*4882a593Smuzhiyun advertising &= ADVERTISED_MASK;
656*4882a593Smuzhiyun if (advertising & (advertising - 1))
657*4882a593Smuzhiyun advertising = lc->supported;
658*4882a593Smuzhiyun advertising &= lc->supported;
659*4882a593Smuzhiyun if (!advertising)
660*4882a593Smuzhiyun return -EINVAL;
661*4882a593Smuzhiyun lc->requested_speed = SPEED_INVALID;
662*4882a593Smuzhiyun lc->requested_duplex = DUPLEX_INVALID;
663*4882a593Smuzhiyun lc->advertising = advertising | ADVERTISED_Autoneg;
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun lc->autoneg = cmd->base.autoneg;
666*4882a593Smuzhiyun if (netif_running(dev))
667*4882a593Smuzhiyun t1_link_start(p->phy, p->mac, lc);
668*4882a593Smuzhiyun return 0;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)671*4882a593Smuzhiyun static void get_pauseparam(struct net_device *dev,
672*4882a593Smuzhiyun struct ethtool_pauseparam *epause)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
675*4882a593Smuzhiyun struct port_info *p = &adapter->port[dev->if_port];
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun epause->autoneg = (p->link_config.requested_fc & PAUSE_AUTONEG) != 0;
678*4882a593Smuzhiyun epause->rx_pause = (p->link_config.fc & PAUSE_RX) != 0;
679*4882a593Smuzhiyun epause->tx_pause = (p->link_config.fc & PAUSE_TX) != 0;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)682*4882a593Smuzhiyun static int set_pauseparam(struct net_device *dev,
683*4882a593Smuzhiyun struct ethtool_pauseparam *epause)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
686*4882a593Smuzhiyun struct port_info *p = &adapter->port[dev->if_port];
687*4882a593Smuzhiyun struct link_config *lc = &p->link_config;
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun if (epause->autoneg == AUTONEG_DISABLE)
690*4882a593Smuzhiyun lc->requested_fc = 0;
691*4882a593Smuzhiyun else if (lc->supported & SUPPORTED_Autoneg)
692*4882a593Smuzhiyun lc->requested_fc = PAUSE_AUTONEG;
693*4882a593Smuzhiyun else
694*4882a593Smuzhiyun return -EINVAL;
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (epause->rx_pause)
697*4882a593Smuzhiyun lc->requested_fc |= PAUSE_RX;
698*4882a593Smuzhiyun if (epause->tx_pause)
699*4882a593Smuzhiyun lc->requested_fc |= PAUSE_TX;
700*4882a593Smuzhiyun if (lc->autoneg == AUTONEG_ENABLE) {
701*4882a593Smuzhiyun if (netif_running(dev))
702*4882a593Smuzhiyun t1_link_start(p->phy, p->mac, lc);
703*4882a593Smuzhiyun } else {
704*4882a593Smuzhiyun lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
705*4882a593Smuzhiyun if (netif_running(dev))
706*4882a593Smuzhiyun p->mac->ops->set_speed_duplex_fc(p->mac, -1, -1,
707*4882a593Smuzhiyun lc->fc);
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun return 0;
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)712*4882a593Smuzhiyun static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
715*4882a593Smuzhiyun int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun e->rx_max_pending = MAX_RX_BUFFERS;
718*4882a593Smuzhiyun e->rx_jumbo_max_pending = MAX_RX_JUMBO_BUFFERS;
719*4882a593Smuzhiyun e->tx_max_pending = MAX_CMDQ_ENTRIES;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun e->rx_pending = adapter->params.sge.freelQ_size[!jumbo_fl];
722*4882a593Smuzhiyun e->rx_jumbo_pending = adapter->params.sge.freelQ_size[jumbo_fl];
723*4882a593Smuzhiyun e->tx_pending = adapter->params.sge.cmdQ_size[0];
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)726*4882a593Smuzhiyun static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
727*4882a593Smuzhiyun {
728*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
729*4882a593Smuzhiyun int jumbo_fl = t1_is_T1B(adapter) ? 1 : 0;
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun if (e->rx_pending > MAX_RX_BUFFERS || e->rx_mini_pending ||
732*4882a593Smuzhiyun e->rx_jumbo_pending > MAX_RX_JUMBO_BUFFERS ||
733*4882a593Smuzhiyun e->tx_pending > MAX_CMDQ_ENTRIES ||
734*4882a593Smuzhiyun e->rx_pending < MIN_FL_ENTRIES ||
735*4882a593Smuzhiyun e->rx_jumbo_pending < MIN_FL_ENTRIES ||
736*4882a593Smuzhiyun e->tx_pending < (adapter->params.nports + 1) * (MAX_SKB_FRAGS + 1))
737*4882a593Smuzhiyun return -EINVAL;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (adapter->flags & FULL_INIT_DONE)
740*4882a593Smuzhiyun return -EBUSY;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun adapter->params.sge.freelQ_size[!jumbo_fl] = e->rx_pending;
743*4882a593Smuzhiyun adapter->params.sge.freelQ_size[jumbo_fl] = e->rx_jumbo_pending;
744*4882a593Smuzhiyun adapter->params.sge.cmdQ_size[0] = e->tx_pending;
745*4882a593Smuzhiyun adapter->params.sge.cmdQ_size[1] = e->tx_pending > MAX_CMDQ1_ENTRIES ?
746*4882a593Smuzhiyun MAX_CMDQ1_ENTRIES : e->tx_pending;
747*4882a593Smuzhiyun return 0;
748*4882a593Smuzhiyun }
749*4882a593Smuzhiyun
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)750*4882a593Smuzhiyun static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
751*4882a593Smuzhiyun {
752*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun adapter->params.sge.rx_coalesce_usecs = c->rx_coalesce_usecs;
755*4882a593Smuzhiyun adapter->params.sge.coalesce_enable = c->use_adaptive_rx_coalesce;
756*4882a593Smuzhiyun adapter->params.sge.sample_interval_usecs = c->rate_sample_interval;
757*4882a593Smuzhiyun t1_sge_set_coalesce_params(adapter->sge, &adapter->params.sge);
758*4882a593Smuzhiyun return 0;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)761*4882a593Smuzhiyun static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
762*4882a593Smuzhiyun {
763*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun c->rx_coalesce_usecs = adapter->params.sge.rx_coalesce_usecs;
766*4882a593Smuzhiyun c->rate_sample_interval = adapter->params.sge.sample_interval_usecs;
767*4882a593Smuzhiyun c->use_adaptive_rx_coalesce = adapter->params.sge.coalesce_enable;
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
get_eeprom_len(struct net_device * dev)771*4882a593Smuzhiyun static int get_eeprom_len(struct net_device *dev)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun return t1_is_asic(adapter) ? EEPROM_SIZE : 0;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun #define EEPROM_MAGIC(ap) \
779*4882a593Smuzhiyun (PCI_VENDOR_ID_CHELSIO | ((ap)->params.chip_version << 16))
780*4882a593Smuzhiyun
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)781*4882a593Smuzhiyun static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
782*4882a593Smuzhiyun u8 *data)
783*4882a593Smuzhiyun {
784*4882a593Smuzhiyun int i;
785*4882a593Smuzhiyun u8 buf[EEPROM_SIZE] __attribute__((aligned(4)));
786*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun e->magic = EEPROM_MAGIC(adapter);
789*4882a593Smuzhiyun for (i = e->offset & ~3; i < e->offset + e->len; i += sizeof(u32))
790*4882a593Smuzhiyun t1_seeprom_read(adapter, i, (__le32 *)&buf[i]);
791*4882a593Smuzhiyun memcpy(data, buf + e->offset, e->len);
792*4882a593Smuzhiyun return 0;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun static const struct ethtool_ops t1_ethtool_ops = {
796*4882a593Smuzhiyun .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS |
797*4882a593Smuzhiyun ETHTOOL_COALESCE_USE_ADAPTIVE_RX |
798*4882a593Smuzhiyun ETHTOOL_COALESCE_RATE_SAMPLE_INTERVAL,
799*4882a593Smuzhiyun .get_drvinfo = get_drvinfo,
800*4882a593Smuzhiyun .get_msglevel = get_msglevel,
801*4882a593Smuzhiyun .set_msglevel = set_msglevel,
802*4882a593Smuzhiyun .get_ringparam = get_sge_param,
803*4882a593Smuzhiyun .set_ringparam = set_sge_param,
804*4882a593Smuzhiyun .get_coalesce = get_coalesce,
805*4882a593Smuzhiyun .set_coalesce = set_coalesce,
806*4882a593Smuzhiyun .get_eeprom_len = get_eeprom_len,
807*4882a593Smuzhiyun .get_eeprom = get_eeprom,
808*4882a593Smuzhiyun .get_pauseparam = get_pauseparam,
809*4882a593Smuzhiyun .set_pauseparam = set_pauseparam,
810*4882a593Smuzhiyun .get_link = ethtool_op_get_link,
811*4882a593Smuzhiyun .get_strings = get_strings,
812*4882a593Smuzhiyun .get_sset_count = get_sset_count,
813*4882a593Smuzhiyun .get_ethtool_stats = get_stats,
814*4882a593Smuzhiyun .get_regs_len = get_regs_len,
815*4882a593Smuzhiyun .get_regs = get_regs,
816*4882a593Smuzhiyun .get_link_ksettings = get_link_ksettings,
817*4882a593Smuzhiyun .set_link_ksettings = set_link_ksettings,
818*4882a593Smuzhiyun };
819*4882a593Smuzhiyun
t1_ioctl(struct net_device * dev,struct ifreq * req,int cmd)820*4882a593Smuzhiyun static int t1_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
821*4882a593Smuzhiyun {
822*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
823*4882a593Smuzhiyun struct mdio_if_info *mdio = &adapter->port[dev->if_port].phy->mdio;
824*4882a593Smuzhiyun
825*4882a593Smuzhiyun return mdio_mii_ioctl(mdio, if_mii(req), cmd);
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
t1_change_mtu(struct net_device * dev,int new_mtu)828*4882a593Smuzhiyun static int t1_change_mtu(struct net_device *dev, int new_mtu)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun int ret;
831*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
832*4882a593Smuzhiyun struct cmac *mac = adapter->port[dev->if_port].mac;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun if (!mac->ops->set_mtu)
835*4882a593Smuzhiyun return -EOPNOTSUPP;
836*4882a593Smuzhiyun if ((ret = mac->ops->set_mtu(mac, new_mtu)))
837*4882a593Smuzhiyun return ret;
838*4882a593Smuzhiyun dev->mtu = new_mtu;
839*4882a593Smuzhiyun return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
t1_set_mac_addr(struct net_device * dev,void * p)842*4882a593Smuzhiyun static int t1_set_mac_addr(struct net_device *dev, void *p)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
845*4882a593Smuzhiyun struct cmac *mac = adapter->port[dev->if_port].mac;
846*4882a593Smuzhiyun struct sockaddr *addr = p;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun if (!mac->ops->macaddress_set)
849*4882a593Smuzhiyun return -EOPNOTSUPP;
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
852*4882a593Smuzhiyun mac->ops->macaddress_set(mac, dev->dev_addr);
853*4882a593Smuzhiyun return 0;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun
t1_fix_features(struct net_device * dev,netdev_features_t features)856*4882a593Smuzhiyun static netdev_features_t t1_fix_features(struct net_device *dev,
857*4882a593Smuzhiyun netdev_features_t features)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun /*
860*4882a593Smuzhiyun * Since there is no support for separate rx/tx vlan accel
861*4882a593Smuzhiyun * enable/disable make sure tx flag is always in same state as rx.
862*4882a593Smuzhiyun */
863*4882a593Smuzhiyun if (features & NETIF_F_HW_VLAN_CTAG_RX)
864*4882a593Smuzhiyun features |= NETIF_F_HW_VLAN_CTAG_TX;
865*4882a593Smuzhiyun else
866*4882a593Smuzhiyun features &= ~NETIF_F_HW_VLAN_CTAG_TX;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun return features;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
t1_set_features(struct net_device * dev,netdev_features_t features)871*4882a593Smuzhiyun static int t1_set_features(struct net_device *dev, netdev_features_t features)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun netdev_features_t changed = dev->features ^ features;
874*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun if (changed & NETIF_F_HW_VLAN_CTAG_RX)
877*4882a593Smuzhiyun t1_vlan_mode(adapter, features);
878*4882a593Smuzhiyun
879*4882a593Smuzhiyun return 0;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
t1_netpoll(struct net_device * dev)882*4882a593Smuzhiyun static void t1_netpoll(struct net_device *dev)
883*4882a593Smuzhiyun {
884*4882a593Smuzhiyun unsigned long flags;
885*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun local_irq_save(flags);
888*4882a593Smuzhiyun t1_interrupt(adapter->pdev->irq, adapter);
889*4882a593Smuzhiyun local_irq_restore(flags);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun #endif
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /*
894*4882a593Smuzhiyun * Periodic accumulation of MAC statistics. This is used only if the MAC
895*4882a593Smuzhiyun * does not have any other way to prevent stats counter overflow.
896*4882a593Smuzhiyun */
mac_stats_task(struct work_struct * work)897*4882a593Smuzhiyun static void mac_stats_task(struct work_struct *work)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun int i;
900*4882a593Smuzhiyun struct adapter *adapter =
901*4882a593Smuzhiyun container_of(work, struct adapter, stats_update_task.work);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun for_each_port(adapter, i) {
904*4882a593Smuzhiyun struct port_info *p = &adapter->port[i];
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun if (netif_running(p->dev))
907*4882a593Smuzhiyun p->mac->ops->statistics_update(p->mac,
908*4882a593Smuzhiyun MAC_STATS_UPDATE_FAST);
909*4882a593Smuzhiyun }
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun /* Schedule the next statistics update if any port is active. */
912*4882a593Smuzhiyun spin_lock(&adapter->work_lock);
913*4882a593Smuzhiyun if (adapter->open_device_map & PORT_MASK)
914*4882a593Smuzhiyun schedule_mac_stats_update(adapter,
915*4882a593Smuzhiyun adapter->params.stats_update_period);
916*4882a593Smuzhiyun spin_unlock(&adapter->work_lock);
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun /*
920*4882a593Smuzhiyun * Processes elmer0 external interrupts in process context.
921*4882a593Smuzhiyun */
ext_intr_task(struct work_struct * work)922*4882a593Smuzhiyun static void ext_intr_task(struct work_struct *work)
923*4882a593Smuzhiyun {
924*4882a593Smuzhiyun struct adapter *adapter =
925*4882a593Smuzhiyun container_of(work, struct adapter, ext_intr_handler_task);
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun t1_elmer0_ext_intr_handler(adapter);
928*4882a593Smuzhiyun
929*4882a593Smuzhiyun /* Now reenable external interrupts */
930*4882a593Smuzhiyun spin_lock_irq(&adapter->async_lock);
931*4882a593Smuzhiyun adapter->slow_intr_mask |= F_PL_INTR_EXT;
932*4882a593Smuzhiyun writel(F_PL_INTR_EXT, adapter->regs + A_PL_CAUSE);
933*4882a593Smuzhiyun writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
934*4882a593Smuzhiyun adapter->regs + A_PL_ENABLE);
935*4882a593Smuzhiyun spin_unlock_irq(&adapter->async_lock);
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun /*
939*4882a593Smuzhiyun * Interrupt-context handler for elmer0 external interrupts.
940*4882a593Smuzhiyun */
t1_elmer0_ext_intr(struct adapter * adapter)941*4882a593Smuzhiyun void t1_elmer0_ext_intr(struct adapter *adapter)
942*4882a593Smuzhiyun {
943*4882a593Smuzhiyun /*
944*4882a593Smuzhiyun * Schedule a task to handle external interrupts as we require
945*4882a593Smuzhiyun * a process context. We disable EXT interrupts in the interim
946*4882a593Smuzhiyun * and let the task reenable them when it's done.
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun adapter->slow_intr_mask &= ~F_PL_INTR_EXT;
949*4882a593Smuzhiyun writel(adapter->slow_intr_mask | F_PL_INTR_SGE_DATA,
950*4882a593Smuzhiyun adapter->regs + A_PL_ENABLE);
951*4882a593Smuzhiyun schedule_work(&adapter->ext_intr_handler_task);
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
t1_fatal_err(struct adapter * adapter)954*4882a593Smuzhiyun void t1_fatal_err(struct adapter *adapter)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun if (adapter->flags & FULL_INIT_DONE) {
957*4882a593Smuzhiyun t1_sge_stop(adapter->sge);
958*4882a593Smuzhiyun t1_interrupts_disable(adapter);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun pr_alert("%s: encountered fatal error, operation suspended\n",
961*4882a593Smuzhiyun adapter->name);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun static const struct net_device_ops cxgb_netdev_ops = {
965*4882a593Smuzhiyun .ndo_open = cxgb_open,
966*4882a593Smuzhiyun .ndo_stop = cxgb_close,
967*4882a593Smuzhiyun .ndo_start_xmit = t1_start_xmit,
968*4882a593Smuzhiyun .ndo_get_stats = t1_get_stats,
969*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
970*4882a593Smuzhiyun .ndo_set_rx_mode = t1_set_rxmode,
971*4882a593Smuzhiyun .ndo_do_ioctl = t1_ioctl,
972*4882a593Smuzhiyun .ndo_change_mtu = t1_change_mtu,
973*4882a593Smuzhiyun .ndo_set_mac_address = t1_set_mac_addr,
974*4882a593Smuzhiyun .ndo_fix_features = t1_fix_features,
975*4882a593Smuzhiyun .ndo_set_features = t1_set_features,
976*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
977*4882a593Smuzhiyun .ndo_poll_controller = t1_netpoll,
978*4882a593Smuzhiyun #endif
979*4882a593Smuzhiyun };
980*4882a593Smuzhiyun
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)981*4882a593Smuzhiyun static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
982*4882a593Smuzhiyun {
983*4882a593Smuzhiyun int i, err, pci_using_dac = 0;
984*4882a593Smuzhiyun unsigned long mmio_start, mmio_len;
985*4882a593Smuzhiyun const struct board_info *bi;
986*4882a593Smuzhiyun struct adapter *adapter = NULL;
987*4882a593Smuzhiyun struct port_info *pi;
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun err = pci_enable_device(pdev);
990*4882a593Smuzhiyun if (err)
991*4882a593Smuzhiyun return err;
992*4882a593Smuzhiyun
993*4882a593Smuzhiyun if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
994*4882a593Smuzhiyun pr_err("%s: cannot find PCI device memory base address\n",
995*4882a593Smuzhiyun pci_name(pdev));
996*4882a593Smuzhiyun err = -ENODEV;
997*4882a593Smuzhiyun goto out_disable_pdev;
998*4882a593Smuzhiyun }
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1001*4882a593Smuzhiyun pci_using_dac = 1;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1004*4882a593Smuzhiyun pr_err("%s: unable to obtain 64-bit DMA for coherent allocations\n",
1005*4882a593Smuzhiyun pci_name(pdev));
1006*4882a593Smuzhiyun err = -ENODEV;
1007*4882a593Smuzhiyun goto out_disable_pdev;
1008*4882a593Smuzhiyun }
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) {
1011*4882a593Smuzhiyun pr_err("%s: no usable DMA configuration\n", pci_name(pdev));
1012*4882a593Smuzhiyun goto out_disable_pdev;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun err = pci_request_regions(pdev, DRV_NAME);
1016*4882a593Smuzhiyun if (err) {
1017*4882a593Smuzhiyun pr_err("%s: cannot obtain PCI resources\n", pci_name(pdev));
1018*4882a593Smuzhiyun goto out_disable_pdev;
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun pci_set_master(pdev);
1022*4882a593Smuzhiyun
1023*4882a593Smuzhiyun mmio_start = pci_resource_start(pdev, 0);
1024*4882a593Smuzhiyun mmio_len = pci_resource_len(pdev, 0);
1025*4882a593Smuzhiyun bi = t1_get_board_info(ent->driver_data);
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun for (i = 0; i < bi->port_number; ++i) {
1028*4882a593Smuzhiyun struct net_device *netdev;
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun netdev = alloc_etherdev(adapter ? 0 : sizeof(*adapter));
1031*4882a593Smuzhiyun if (!netdev) {
1032*4882a593Smuzhiyun err = -ENOMEM;
1033*4882a593Smuzhiyun goto out_free_dev;
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, &pdev->dev);
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun if (!adapter) {
1039*4882a593Smuzhiyun adapter = netdev_priv(netdev);
1040*4882a593Smuzhiyun adapter->pdev = pdev;
1041*4882a593Smuzhiyun adapter->port[0].dev = netdev; /* so we don't leak it */
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun adapter->regs = ioremap(mmio_start, mmio_len);
1044*4882a593Smuzhiyun if (!adapter->regs) {
1045*4882a593Smuzhiyun pr_err("%s: cannot map device registers\n",
1046*4882a593Smuzhiyun pci_name(pdev));
1047*4882a593Smuzhiyun err = -ENOMEM;
1048*4882a593Smuzhiyun goto out_free_dev;
1049*4882a593Smuzhiyun }
1050*4882a593Smuzhiyun
1051*4882a593Smuzhiyun if (t1_get_board_rev(adapter, bi, &adapter->params)) {
1052*4882a593Smuzhiyun err = -ENODEV; /* Can't handle this chip rev */
1053*4882a593Smuzhiyun goto out_free_dev;
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
1056*4882a593Smuzhiyun adapter->name = pci_name(pdev);
1057*4882a593Smuzhiyun adapter->msg_enable = dflt_msg_enable;
1058*4882a593Smuzhiyun adapter->mmio_len = mmio_len;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun spin_lock_init(&adapter->tpi_lock);
1061*4882a593Smuzhiyun spin_lock_init(&adapter->work_lock);
1062*4882a593Smuzhiyun spin_lock_init(&adapter->async_lock);
1063*4882a593Smuzhiyun spin_lock_init(&adapter->mac_lock);
1064*4882a593Smuzhiyun
1065*4882a593Smuzhiyun INIT_WORK(&adapter->ext_intr_handler_task,
1066*4882a593Smuzhiyun ext_intr_task);
1067*4882a593Smuzhiyun INIT_DELAYED_WORK(&adapter->stats_update_task,
1068*4882a593Smuzhiyun mac_stats_task);
1069*4882a593Smuzhiyun
1070*4882a593Smuzhiyun pci_set_drvdata(pdev, netdev);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun pi = &adapter->port[i];
1074*4882a593Smuzhiyun pi->dev = netdev;
1075*4882a593Smuzhiyun netif_carrier_off(netdev);
1076*4882a593Smuzhiyun netdev->irq = pdev->irq;
1077*4882a593Smuzhiyun netdev->if_port = i;
1078*4882a593Smuzhiyun netdev->mem_start = mmio_start;
1079*4882a593Smuzhiyun netdev->mem_end = mmio_start + mmio_len - 1;
1080*4882a593Smuzhiyun netdev->ml_priv = adapter;
1081*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1082*4882a593Smuzhiyun NETIF_F_RXCSUM;
1083*4882a593Smuzhiyun netdev->features |= NETIF_F_SG | NETIF_F_IP_CSUM |
1084*4882a593Smuzhiyun NETIF_F_RXCSUM | NETIF_F_LLTX;
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (pci_using_dac)
1087*4882a593Smuzhiyun netdev->features |= NETIF_F_HIGHDMA;
1088*4882a593Smuzhiyun if (vlan_tso_capable(adapter)) {
1089*4882a593Smuzhiyun netdev->features |=
1090*4882a593Smuzhiyun NETIF_F_HW_VLAN_CTAG_TX |
1091*4882a593Smuzhiyun NETIF_F_HW_VLAN_CTAG_RX;
1092*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* T204: disable TSO */
1095*4882a593Smuzhiyun if (!(is_T2(adapter)) || bi->port_number != 4) {
1096*4882a593Smuzhiyun netdev->hw_features |= NETIF_F_TSO;
1097*4882a593Smuzhiyun netdev->features |= NETIF_F_TSO;
1098*4882a593Smuzhiyun }
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun
1101*4882a593Smuzhiyun netdev->netdev_ops = &cxgb_netdev_ops;
1102*4882a593Smuzhiyun netdev->hard_header_len += (netdev->hw_features & NETIF_F_TSO) ?
1103*4882a593Smuzhiyun sizeof(struct cpl_tx_pkt_lso) : sizeof(struct cpl_tx_pkt);
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun netdev->ethtool_ops = &t1_ethtool_ops;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun switch (bi->board) {
1110*4882a593Smuzhiyun case CHBT_BOARD_CHT110:
1111*4882a593Smuzhiyun case CHBT_BOARD_N110:
1112*4882a593Smuzhiyun case CHBT_BOARD_N210:
1113*4882a593Smuzhiyun case CHBT_BOARD_CHT210:
1114*4882a593Smuzhiyun netdev->max_mtu = PM3393_MAX_FRAME_SIZE -
1115*4882a593Smuzhiyun (ETH_HLEN + ETH_FCS_LEN);
1116*4882a593Smuzhiyun break;
1117*4882a593Smuzhiyun case CHBT_BOARD_CHN204:
1118*4882a593Smuzhiyun netdev->max_mtu = VSC7326_MAX_MTU;
1119*4882a593Smuzhiyun break;
1120*4882a593Smuzhiyun default:
1121*4882a593Smuzhiyun netdev->max_mtu = ETH_DATA_LEN;
1122*4882a593Smuzhiyun break;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun }
1125*4882a593Smuzhiyun
1126*4882a593Smuzhiyun if (t1_init_sw_modules(adapter, bi) < 0) {
1127*4882a593Smuzhiyun err = -ENODEV;
1128*4882a593Smuzhiyun goto out_free_dev;
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /*
1132*4882a593Smuzhiyun * The card is now ready to go. If any errors occur during device
1133*4882a593Smuzhiyun * registration we do not fail the whole card but rather proceed only
1134*4882a593Smuzhiyun * with the ports we manage to register successfully. However we must
1135*4882a593Smuzhiyun * register at least one net device.
1136*4882a593Smuzhiyun */
1137*4882a593Smuzhiyun for (i = 0; i < bi->port_number; ++i) {
1138*4882a593Smuzhiyun err = register_netdev(adapter->port[i].dev);
1139*4882a593Smuzhiyun if (err)
1140*4882a593Smuzhiyun pr_warn("%s: cannot register net device %s, skipping\n",
1141*4882a593Smuzhiyun pci_name(pdev), adapter->port[i].dev->name);
1142*4882a593Smuzhiyun else {
1143*4882a593Smuzhiyun /*
1144*4882a593Smuzhiyun * Change the name we use for messages to the name of
1145*4882a593Smuzhiyun * the first successfully registered interface.
1146*4882a593Smuzhiyun */
1147*4882a593Smuzhiyun if (!adapter->registered_device_map)
1148*4882a593Smuzhiyun adapter->name = adapter->port[i].dev->name;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun __set_bit(i, &adapter->registered_device_map);
1151*4882a593Smuzhiyun }
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun if (!adapter->registered_device_map) {
1154*4882a593Smuzhiyun pr_err("%s: could not register any net devices\n",
1155*4882a593Smuzhiyun pci_name(pdev));
1156*4882a593Smuzhiyun err = -EINVAL;
1157*4882a593Smuzhiyun goto out_release_adapter_res;
1158*4882a593Smuzhiyun }
1159*4882a593Smuzhiyun
1160*4882a593Smuzhiyun pr_info("%s: %s (rev %d), %s %dMHz/%d-bit\n",
1161*4882a593Smuzhiyun adapter->name, bi->desc, adapter->params.chip_revision,
1162*4882a593Smuzhiyun adapter->params.pci.is_pcix ? "PCIX" : "PCI",
1163*4882a593Smuzhiyun adapter->params.pci.speed, adapter->params.pci.width);
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun /*
1166*4882a593Smuzhiyun * Set the T1B ASIC and memory clocks.
1167*4882a593Smuzhiyun */
1168*4882a593Smuzhiyun if (t1powersave)
1169*4882a593Smuzhiyun adapter->t1powersave = LCLOCK; /* HW default is powersave mode. */
1170*4882a593Smuzhiyun else
1171*4882a593Smuzhiyun adapter->t1powersave = HCLOCK;
1172*4882a593Smuzhiyun if (t1_is_T1B(adapter))
1173*4882a593Smuzhiyun t1_clock(adapter, t1powersave);
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun return 0;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun out_release_adapter_res:
1178*4882a593Smuzhiyun t1_free_sw_modules(adapter);
1179*4882a593Smuzhiyun out_free_dev:
1180*4882a593Smuzhiyun if (adapter) {
1181*4882a593Smuzhiyun if (adapter->regs)
1182*4882a593Smuzhiyun iounmap(adapter->regs);
1183*4882a593Smuzhiyun for (i = bi->port_number - 1; i >= 0; --i)
1184*4882a593Smuzhiyun if (adapter->port[i].dev)
1185*4882a593Smuzhiyun free_netdev(adapter->port[i].dev);
1186*4882a593Smuzhiyun }
1187*4882a593Smuzhiyun pci_release_regions(pdev);
1188*4882a593Smuzhiyun out_disable_pdev:
1189*4882a593Smuzhiyun pci_disable_device(pdev);
1190*4882a593Smuzhiyun return err;
1191*4882a593Smuzhiyun }
1192*4882a593Smuzhiyun
bit_bang(struct adapter * adapter,int bitdata,int nbits)1193*4882a593Smuzhiyun static void bit_bang(struct adapter *adapter, int bitdata, int nbits)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun int data;
1196*4882a593Smuzhiyun int i;
1197*4882a593Smuzhiyun u32 val;
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun enum {
1200*4882a593Smuzhiyun S_CLOCK = 1 << 3,
1201*4882a593Smuzhiyun S_DATA = 1 << 4
1202*4882a593Smuzhiyun };
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun for (i = (nbits - 1); i > -1; i--) {
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun udelay(50);
1207*4882a593Smuzhiyun
1208*4882a593Smuzhiyun data = ((bitdata >> i) & 0x1);
1209*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1210*4882a593Smuzhiyun
1211*4882a593Smuzhiyun if (data)
1212*4882a593Smuzhiyun val |= S_DATA;
1213*4882a593Smuzhiyun else
1214*4882a593Smuzhiyun val &= ~S_DATA;
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun udelay(50);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun /* Set SCLOCK low */
1219*4882a593Smuzhiyun val &= ~S_CLOCK;
1220*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun udelay(50);
1223*4882a593Smuzhiyun
1224*4882a593Smuzhiyun /* Write SCLOCK high */
1225*4882a593Smuzhiyun val |= S_CLOCK;
1226*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1227*4882a593Smuzhiyun
1228*4882a593Smuzhiyun }
1229*4882a593Smuzhiyun }
1230*4882a593Smuzhiyun
t1_clock(struct adapter * adapter,int mode)1231*4882a593Smuzhiyun static int t1_clock(struct adapter *adapter, int mode)
1232*4882a593Smuzhiyun {
1233*4882a593Smuzhiyun u32 val;
1234*4882a593Smuzhiyun int M_CORE_VAL;
1235*4882a593Smuzhiyun int M_MEM_VAL;
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun enum {
1238*4882a593Smuzhiyun M_CORE_BITS = 9,
1239*4882a593Smuzhiyun T_CORE_VAL = 0,
1240*4882a593Smuzhiyun T_CORE_BITS = 2,
1241*4882a593Smuzhiyun N_CORE_VAL = 0,
1242*4882a593Smuzhiyun N_CORE_BITS = 2,
1243*4882a593Smuzhiyun M_MEM_BITS = 9,
1244*4882a593Smuzhiyun T_MEM_VAL = 0,
1245*4882a593Smuzhiyun T_MEM_BITS = 2,
1246*4882a593Smuzhiyun N_MEM_VAL = 0,
1247*4882a593Smuzhiyun N_MEM_BITS = 2,
1248*4882a593Smuzhiyun NP_LOAD = 1 << 17,
1249*4882a593Smuzhiyun S_LOAD_MEM = 1 << 5,
1250*4882a593Smuzhiyun S_LOAD_CORE = 1 << 6,
1251*4882a593Smuzhiyun S_CLOCK = 1 << 3
1252*4882a593Smuzhiyun };
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun if (!t1_is_T1B(adapter))
1255*4882a593Smuzhiyun return -ENODEV; /* Can't re-clock this chip. */
1256*4882a593Smuzhiyun
1257*4882a593Smuzhiyun if (mode & 2)
1258*4882a593Smuzhiyun return 0; /* show current mode. */
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if ((adapter->t1powersave & 1) == (mode & 1))
1261*4882a593Smuzhiyun return -EALREADY; /* ASIC already running in mode. */
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun if ((mode & 1) == HCLOCK) {
1264*4882a593Smuzhiyun M_CORE_VAL = 0x14;
1265*4882a593Smuzhiyun M_MEM_VAL = 0x18;
1266*4882a593Smuzhiyun adapter->t1powersave = HCLOCK; /* overclock */
1267*4882a593Smuzhiyun } else {
1268*4882a593Smuzhiyun M_CORE_VAL = 0xe;
1269*4882a593Smuzhiyun M_MEM_VAL = 0x10;
1270*4882a593Smuzhiyun adapter->t1powersave = LCLOCK; /* underclock */
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun
1273*4882a593Smuzhiyun /* Don't interrupt this serial stream! */
1274*4882a593Smuzhiyun spin_lock(&adapter->tpi_lock);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun /* Initialize for ASIC core */
1277*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1278*4882a593Smuzhiyun val |= NP_LOAD;
1279*4882a593Smuzhiyun udelay(50);
1280*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1281*4882a593Smuzhiyun udelay(50);
1282*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1283*4882a593Smuzhiyun val &= ~S_LOAD_CORE;
1284*4882a593Smuzhiyun val &= ~S_CLOCK;
1285*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1286*4882a593Smuzhiyun udelay(50);
1287*4882a593Smuzhiyun
1288*4882a593Smuzhiyun /* Serial program the ASIC clock synthesizer */
1289*4882a593Smuzhiyun bit_bang(adapter, T_CORE_VAL, T_CORE_BITS);
1290*4882a593Smuzhiyun bit_bang(adapter, N_CORE_VAL, N_CORE_BITS);
1291*4882a593Smuzhiyun bit_bang(adapter, M_CORE_VAL, M_CORE_BITS);
1292*4882a593Smuzhiyun udelay(50);
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /* Finish ASIC core */
1295*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1296*4882a593Smuzhiyun val |= S_LOAD_CORE;
1297*4882a593Smuzhiyun udelay(50);
1298*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1299*4882a593Smuzhiyun udelay(50);
1300*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1301*4882a593Smuzhiyun val &= ~S_LOAD_CORE;
1302*4882a593Smuzhiyun udelay(50);
1303*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1304*4882a593Smuzhiyun udelay(50);
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun /* Initialize for memory */
1307*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1308*4882a593Smuzhiyun val |= NP_LOAD;
1309*4882a593Smuzhiyun udelay(50);
1310*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1311*4882a593Smuzhiyun udelay(50);
1312*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1313*4882a593Smuzhiyun val &= ~S_LOAD_MEM;
1314*4882a593Smuzhiyun val &= ~S_CLOCK;
1315*4882a593Smuzhiyun udelay(50);
1316*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1317*4882a593Smuzhiyun udelay(50);
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* Serial program the memory clock synthesizer */
1320*4882a593Smuzhiyun bit_bang(adapter, T_MEM_VAL, T_MEM_BITS);
1321*4882a593Smuzhiyun bit_bang(adapter, N_MEM_VAL, N_MEM_BITS);
1322*4882a593Smuzhiyun bit_bang(adapter, M_MEM_VAL, M_MEM_BITS);
1323*4882a593Smuzhiyun udelay(50);
1324*4882a593Smuzhiyun
1325*4882a593Smuzhiyun /* Finish memory */
1326*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1327*4882a593Smuzhiyun val |= S_LOAD_MEM;
1328*4882a593Smuzhiyun udelay(50);
1329*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1330*4882a593Smuzhiyun udelay(50);
1331*4882a593Smuzhiyun __t1_tpi_read(adapter, A_ELMER0_GPO, &val);
1332*4882a593Smuzhiyun val &= ~S_LOAD_MEM;
1333*4882a593Smuzhiyun udelay(50);
1334*4882a593Smuzhiyun __t1_tpi_write(adapter, A_ELMER0_GPO, val);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun spin_unlock(&adapter->tpi_lock);
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun return 0;
1339*4882a593Smuzhiyun }
1340*4882a593Smuzhiyun
t1_sw_reset(struct pci_dev * pdev)1341*4882a593Smuzhiyun static inline void t1_sw_reset(struct pci_dev *pdev)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 3);
1344*4882a593Smuzhiyun pci_write_config_dword(pdev, A_PCICFG_PM_CSR, 0);
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun
remove_one(struct pci_dev * pdev)1347*4882a593Smuzhiyun static void remove_one(struct pci_dev *pdev)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
1350*4882a593Smuzhiyun struct adapter *adapter = dev->ml_priv;
1351*4882a593Smuzhiyun int i;
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun for_each_port(adapter, i) {
1354*4882a593Smuzhiyun if (test_bit(i, &adapter->registered_device_map))
1355*4882a593Smuzhiyun unregister_netdev(adapter->port[i].dev);
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun t1_free_sw_modules(adapter);
1359*4882a593Smuzhiyun iounmap(adapter->regs);
1360*4882a593Smuzhiyun
1361*4882a593Smuzhiyun while (--i >= 0) {
1362*4882a593Smuzhiyun if (adapter->port[i].dev)
1363*4882a593Smuzhiyun free_netdev(adapter->port[i].dev);
1364*4882a593Smuzhiyun }
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun pci_release_regions(pdev);
1367*4882a593Smuzhiyun pci_disable_device(pdev);
1368*4882a593Smuzhiyun t1_sw_reset(pdev);
1369*4882a593Smuzhiyun }
1370*4882a593Smuzhiyun
1371*4882a593Smuzhiyun static struct pci_driver cxgb_pci_driver = {
1372*4882a593Smuzhiyun .name = DRV_NAME,
1373*4882a593Smuzhiyun .id_table = t1_pci_tbl,
1374*4882a593Smuzhiyun .probe = init_one,
1375*4882a593Smuzhiyun .remove = remove_one,
1376*4882a593Smuzhiyun };
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun module_pci_driver(cxgb_pci_driver);
1379