1*4882a593Smuzhiyun /* epic100.c: A SMC 83c170 EPIC/100 Fast Ethernet driver for Linux. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun Written/copyright 1997-2001 by Donald Becker.
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun This software may be used and distributed according to the terms of
6*4882a593Smuzhiyun the GNU General Public License (GPL), incorporated herein by reference.
7*4882a593Smuzhiyun Drivers based on or derived from this code fall under the GPL and must
8*4882a593Smuzhiyun retain the authorship, copyright and license notice. This file is not
9*4882a593Smuzhiyun a complete program and may only be used when the entire operating
10*4882a593Smuzhiyun system is licensed under the GPL.
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun This driver is for the SMC83c170/175 "EPIC" series, as used on the
13*4882a593Smuzhiyun SMC EtherPower II 9432 PCI adapter, and several CardBus cards.
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun The author may be reached as becker@scyld.com, or C/O
16*4882a593Smuzhiyun Scyld Computing Corporation
17*4882a593Smuzhiyun 410 Severn Ave., Suite 210
18*4882a593Smuzhiyun Annapolis MD 21403
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun Information and updates available at
21*4882a593Smuzhiyun http://www.scyld.com/network/epic100.html
22*4882a593Smuzhiyun [this link no longer provides anything useful -jgarzik]
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun ---------------------------------------------------------------------
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun */
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #define DRV_NAME "epic100"
29*4882a593Smuzhiyun #define DRV_VERSION "2.1"
30*4882a593Smuzhiyun #define DRV_RELDATE "Sept 11, 2006"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* The user-configurable values.
33*4882a593Smuzhiyun These may be modified when a driver module is loaded.*/
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* Used to pass the full-duplex flag, etc. */
38*4882a593Smuzhiyun #define MAX_UNITS 8 /* More are supported, limit only on options */
39*4882a593Smuzhiyun static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
40*4882a593Smuzhiyun static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
43*4882a593Smuzhiyun Setting to > 1518 effectively disables this feature. */
44*4882a593Smuzhiyun static int rx_copybreak;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* Operational parameters that are set at compile time. */
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* Keep the ring sizes a power of two for operational efficiency.
49*4882a593Smuzhiyun The compiler will convert <unsigned>'%'<2^N> into a bit mask.
50*4882a593Smuzhiyun Making the Tx ring too large decreases the effectiveness of channel
51*4882a593Smuzhiyun bonding and packet priority.
52*4882a593Smuzhiyun There are no ill effects from too-large receive rings. */
53*4882a593Smuzhiyun #define TX_RING_SIZE 256
54*4882a593Smuzhiyun #define TX_QUEUE_LEN 240 /* Limit ring entries actually used. */
55*4882a593Smuzhiyun #define RX_RING_SIZE 256
56*4882a593Smuzhiyun #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct epic_tx_desc)
57*4882a593Smuzhiyun #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct epic_rx_desc)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Operational parameters that usually are not changed. */
60*4882a593Smuzhiyun /* Time in jiffies before concluding the transmitter is hung. */
61*4882a593Smuzhiyun #define TX_TIMEOUT (2*HZ)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Bytes transferred to chip before transmission starts. */
66*4882a593Smuzhiyun /* Initial threshold, increased on underflow, rounded down to 4 byte units. */
67*4882a593Smuzhiyun #define TX_FIFO_THRESH 256
68*4882a593Smuzhiyun #define RX_FIFO_THRESH 1 /* 0-3, 0==32, 64,96, or 3==128 bytes */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #include <linux/module.h>
71*4882a593Smuzhiyun #include <linux/kernel.h>
72*4882a593Smuzhiyun #include <linux/string.h>
73*4882a593Smuzhiyun #include <linux/timer.h>
74*4882a593Smuzhiyun #include <linux/errno.h>
75*4882a593Smuzhiyun #include <linux/ioport.h>
76*4882a593Smuzhiyun #include <linux/interrupt.h>
77*4882a593Smuzhiyun #include <linux/pci.h>
78*4882a593Smuzhiyun #include <linux/delay.h>
79*4882a593Smuzhiyun #include <linux/netdevice.h>
80*4882a593Smuzhiyun #include <linux/etherdevice.h>
81*4882a593Smuzhiyun #include <linux/skbuff.h>
82*4882a593Smuzhiyun #include <linux/init.h>
83*4882a593Smuzhiyun #include <linux/spinlock.h>
84*4882a593Smuzhiyun #include <linux/ethtool.h>
85*4882a593Smuzhiyun #include <linux/mii.h>
86*4882a593Smuzhiyun #include <linux/crc32.h>
87*4882a593Smuzhiyun #include <linux/bitops.h>
88*4882a593Smuzhiyun #include <asm/io.h>
89*4882a593Smuzhiyun #include <linux/uaccess.h>
90*4882a593Smuzhiyun #include <asm/byteorder.h>
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* These identify the driver base version and may not be removed. */
93*4882a593Smuzhiyun static char version[] =
94*4882a593Smuzhiyun DRV_NAME ".c:v1.11 1/7/2001 Written by Donald Becker <becker@scyld.com>";
95*4882a593Smuzhiyun static char version2[] =
96*4882a593Smuzhiyun " (unofficial 2.4.x kernel port, version " DRV_VERSION ", " DRV_RELDATE ")";
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
99*4882a593Smuzhiyun MODULE_DESCRIPTION("SMC 83c170 EPIC series Ethernet driver");
100*4882a593Smuzhiyun MODULE_LICENSE("GPL");
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun module_param(debug, int, 0);
103*4882a593Smuzhiyun module_param(rx_copybreak, int, 0);
104*4882a593Smuzhiyun module_param_array(options, int, NULL, 0);
105*4882a593Smuzhiyun module_param_array(full_duplex, int, NULL, 0);
106*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "EPIC/100 debug level (0-5)");
107*4882a593Smuzhiyun MODULE_PARM_DESC(options, "EPIC/100: Bits 0-3: media type, bit 4: full duplex");
108*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak, "EPIC/100 copy breakpoint for copy-only-tiny-frames");
109*4882a593Smuzhiyun MODULE_PARM_DESC(full_duplex, "EPIC/100 full duplex setting(s) (1)");
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun /*
112*4882a593Smuzhiyun Theory of Operation
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun I. Board Compatibility
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun This device driver is designed for the SMC "EPIC/100", the SMC
117*4882a593Smuzhiyun single-chip Ethernet controllers for PCI. This chip is used on
118*4882a593Smuzhiyun the SMC EtherPower II boards.
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun II. Board-specific settings
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun PCI bus devices are configured by the system at boot time, so no jumpers
123*4882a593Smuzhiyun need to be set on the board. The system BIOS will assign the
124*4882a593Smuzhiyun PCI INTA signal to a (preferably otherwise unused) system IRQ line.
125*4882a593Smuzhiyun Note: Kernel versions earlier than 1.3.73 do not support shared PCI
126*4882a593Smuzhiyun interrupt lines.
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun III. Driver operation
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun IIIa. Ring buffers
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun IVb. References
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun http://www.smsc.com/media/Downloads_Public/discontinued/83c171.pdf
135*4882a593Smuzhiyun http://www.smsc.com/media/Downloads_Public/discontinued/83c175.pdf
136*4882a593Smuzhiyun http://scyld.com/expert/NWay.html
137*4882a593Smuzhiyun http://www.national.com/pf/DP/DP83840A.html
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun IVc. Errata
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun */
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun enum chip_capability_flags { MII_PWRDWN=1, TYPE2_INTR=2, NO_MII=4 };
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #define EPIC_TOTAL_SIZE 0x100
147*4882a593Smuzhiyun #define USE_IO_OPS 1
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #ifdef USE_IO_OPS
150*4882a593Smuzhiyun #define EPIC_BAR 0
151*4882a593Smuzhiyun #else
152*4882a593Smuzhiyun #define EPIC_BAR 1
153*4882a593Smuzhiyun #endif
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun typedef enum {
156*4882a593Smuzhiyun SMSC_83C170_0,
157*4882a593Smuzhiyun SMSC_83C170,
158*4882a593Smuzhiyun SMSC_83C175,
159*4882a593Smuzhiyun } chip_t;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun struct epic_chip_info {
163*4882a593Smuzhiyun const char *name;
164*4882a593Smuzhiyun int drv_flags; /* Driver use, intended as capability flags. */
165*4882a593Smuzhiyun };
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /* indexed by chip_t */
169*4882a593Smuzhiyun static const struct epic_chip_info pci_id_tbl[] = {
170*4882a593Smuzhiyun { "SMSC EPIC/100 83c170", TYPE2_INTR | NO_MII | MII_PWRDWN },
171*4882a593Smuzhiyun { "SMSC EPIC/100 83c170", TYPE2_INTR },
172*4882a593Smuzhiyun { "SMSC EPIC/C 83c175", TYPE2_INTR | MII_PWRDWN },
173*4882a593Smuzhiyun };
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun static const struct pci_device_id epic_pci_tbl[] = {
177*4882a593Smuzhiyun { 0x10B8, 0x0005, 0x1092, 0x0AB4, 0, 0, SMSC_83C170_0 },
178*4882a593Smuzhiyun { 0x10B8, 0x0005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SMSC_83C170 },
179*4882a593Smuzhiyun { 0x10B8, 0x0006, PCI_ANY_ID, PCI_ANY_ID,
180*4882a593Smuzhiyun PCI_CLASS_NETWORK_ETHERNET << 8, 0xffff00, SMSC_83C175 },
181*4882a593Smuzhiyun { 0,}
182*4882a593Smuzhiyun };
183*4882a593Smuzhiyun MODULE_DEVICE_TABLE (pci, epic_pci_tbl);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun #define ew16(reg, val) iowrite16(val, ioaddr + (reg))
186*4882a593Smuzhiyun #define ew32(reg, val) iowrite32(val, ioaddr + (reg))
187*4882a593Smuzhiyun #define er8(reg) ioread8(ioaddr + (reg))
188*4882a593Smuzhiyun #define er16(reg) ioread16(ioaddr + (reg))
189*4882a593Smuzhiyun #define er32(reg) ioread32(ioaddr + (reg))
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Offsets to registers, using the (ugh) SMC names. */
192*4882a593Smuzhiyun enum epic_registers {
193*4882a593Smuzhiyun COMMAND=0, INTSTAT=4, INTMASK=8, GENCTL=0x0C, NVCTL=0x10, EECTL=0x14,
194*4882a593Smuzhiyun PCIBurstCnt=0x18,
195*4882a593Smuzhiyun TEST1=0x1C, CRCCNT=0x20, ALICNT=0x24, MPCNT=0x28, /* Rx error counters. */
196*4882a593Smuzhiyun MIICtrl=0x30, MIIData=0x34, MIICfg=0x38,
197*4882a593Smuzhiyun LAN0=64, /* MAC address. */
198*4882a593Smuzhiyun MC0=80, /* Multicast filter table. */
199*4882a593Smuzhiyun RxCtrl=96, TxCtrl=112, TxSTAT=0x74,
200*4882a593Smuzhiyun PRxCDAR=0x84, RxSTAT=0xA4, EarlyRx=0xB0, PTxCDAR=0xC4, TxThresh=0xDC,
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* Interrupt register bits, using my own meaningful names. */
204*4882a593Smuzhiyun enum IntrStatus {
205*4882a593Smuzhiyun TxIdle=0x40000, RxIdle=0x20000, IntrSummary=0x010000,
206*4882a593Smuzhiyun PCIBusErr170=0x7000, PCIBusErr175=0x1000, PhyEvent175=0x8000,
207*4882a593Smuzhiyun RxStarted=0x0800, RxEarlyWarn=0x0400, CntFull=0x0200, TxUnderrun=0x0100,
208*4882a593Smuzhiyun TxEmpty=0x0080, TxDone=0x0020, RxError=0x0010,
209*4882a593Smuzhiyun RxOverflow=0x0008, RxFull=0x0004, RxHeader=0x0002, RxDone=0x0001,
210*4882a593Smuzhiyun };
211*4882a593Smuzhiyun enum CommandBits {
212*4882a593Smuzhiyun StopRx=1, StartRx=2, TxQueued=4, RxQueued=8,
213*4882a593Smuzhiyun StopTxDMA=0x20, StopRxDMA=0x40, RestartTx=0x80,
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun #define EpicRemoved 0xffffffff /* Chip failed or removed (CardBus) */
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun #define EpicNapiEvent (TxEmpty | TxDone | \
219*4882a593Smuzhiyun RxDone | RxStarted | RxEarlyWarn | RxOverflow | RxFull)
220*4882a593Smuzhiyun #define EpicNormalEvent (0x0000ffff & ~EpicNapiEvent)
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun static const u16 media2miictl[16] = {
223*4882a593Smuzhiyun 0, 0x0C00, 0x0C00, 0x2000, 0x0100, 0x2100, 0, 0,
224*4882a593Smuzhiyun 0, 0, 0, 0, 0, 0, 0, 0 };
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * The EPIC100 Rx and Tx buffer descriptors. Note that these
228*4882a593Smuzhiyun * really ARE host-endian; it's not a misannotation. We tell
229*4882a593Smuzhiyun * the card to byteswap them internally on big-endian hosts -
230*4882a593Smuzhiyun * look for #ifdef __BIG_ENDIAN in epic_open().
231*4882a593Smuzhiyun */
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun struct epic_tx_desc {
234*4882a593Smuzhiyun u32 txstatus;
235*4882a593Smuzhiyun u32 bufaddr;
236*4882a593Smuzhiyun u32 buflength;
237*4882a593Smuzhiyun u32 next;
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun struct epic_rx_desc {
241*4882a593Smuzhiyun u32 rxstatus;
242*4882a593Smuzhiyun u32 bufaddr;
243*4882a593Smuzhiyun u32 buflength;
244*4882a593Smuzhiyun u32 next;
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun enum desc_status_bits {
248*4882a593Smuzhiyun DescOwn=0x8000,
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun #define PRIV_ALIGN 15 /* Required alignment mask */
252*4882a593Smuzhiyun struct epic_private {
253*4882a593Smuzhiyun struct epic_rx_desc *rx_ring;
254*4882a593Smuzhiyun struct epic_tx_desc *tx_ring;
255*4882a593Smuzhiyun /* The saved address of a sent-in-place packet/buffer, for skfree(). */
256*4882a593Smuzhiyun struct sk_buff* tx_skbuff[TX_RING_SIZE];
257*4882a593Smuzhiyun /* The addresses of receive-in-place skbuffs. */
258*4882a593Smuzhiyun struct sk_buff* rx_skbuff[RX_RING_SIZE];
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun dma_addr_t tx_ring_dma;
261*4882a593Smuzhiyun dma_addr_t rx_ring_dma;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Ring pointers. */
264*4882a593Smuzhiyun spinlock_t lock; /* Group with Tx control cache line. */
265*4882a593Smuzhiyun spinlock_t napi_lock;
266*4882a593Smuzhiyun struct napi_struct napi;
267*4882a593Smuzhiyun unsigned int cur_tx, dirty_tx;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun unsigned int cur_rx, dirty_rx;
270*4882a593Smuzhiyun u32 irq_mask;
271*4882a593Smuzhiyun unsigned int rx_buf_sz; /* Based on MTU+slack. */
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun void __iomem *ioaddr;
274*4882a593Smuzhiyun struct pci_dev *pci_dev; /* PCI bus location. */
275*4882a593Smuzhiyun int chip_id, chip_flags;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun struct timer_list timer; /* Media selection timer. */
278*4882a593Smuzhiyun int tx_threshold;
279*4882a593Smuzhiyun unsigned char mc_filter[8];
280*4882a593Smuzhiyun signed char phys[4]; /* MII device addresses. */
281*4882a593Smuzhiyun u16 advertising; /* NWay media advertisement */
282*4882a593Smuzhiyun int mii_phy_cnt;
283*4882a593Smuzhiyun u32 ethtool_ops_nesting;
284*4882a593Smuzhiyun struct mii_if_info mii;
285*4882a593Smuzhiyun unsigned int tx_full:1; /* The Tx queue is full. */
286*4882a593Smuzhiyun unsigned int default_port:4; /* Last dev->if_port value. */
287*4882a593Smuzhiyun };
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun static int epic_open(struct net_device *dev);
290*4882a593Smuzhiyun static int read_eeprom(struct epic_private *, int);
291*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int location);
292*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int loc, int val);
293*4882a593Smuzhiyun static void epic_restart(struct net_device *dev);
294*4882a593Smuzhiyun static void epic_timer(struct timer_list *t);
295*4882a593Smuzhiyun static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue);
296*4882a593Smuzhiyun static void epic_init_ring(struct net_device *dev);
297*4882a593Smuzhiyun static netdev_tx_t epic_start_xmit(struct sk_buff *skb,
298*4882a593Smuzhiyun struct net_device *dev);
299*4882a593Smuzhiyun static int epic_rx(struct net_device *dev, int budget);
300*4882a593Smuzhiyun static int epic_poll(struct napi_struct *napi, int budget);
301*4882a593Smuzhiyun static irqreturn_t epic_interrupt(int irq, void *dev_instance);
302*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
303*4882a593Smuzhiyun static const struct ethtool_ops netdev_ethtool_ops;
304*4882a593Smuzhiyun static int epic_close(struct net_device *dev);
305*4882a593Smuzhiyun static struct net_device_stats *epic_get_stats(struct net_device *dev);
306*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun static const struct net_device_ops epic_netdev_ops = {
309*4882a593Smuzhiyun .ndo_open = epic_open,
310*4882a593Smuzhiyun .ndo_stop = epic_close,
311*4882a593Smuzhiyun .ndo_start_xmit = epic_start_xmit,
312*4882a593Smuzhiyun .ndo_tx_timeout = epic_tx_timeout,
313*4882a593Smuzhiyun .ndo_get_stats = epic_get_stats,
314*4882a593Smuzhiyun .ndo_set_rx_mode = set_rx_mode,
315*4882a593Smuzhiyun .ndo_do_ioctl = netdev_ioctl,
316*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
317*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
318*4882a593Smuzhiyun };
319*4882a593Smuzhiyun
epic_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)320*4882a593Smuzhiyun static int epic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun static int card_idx = -1;
323*4882a593Smuzhiyun void __iomem *ioaddr;
324*4882a593Smuzhiyun int chip_idx = (int) ent->driver_data;
325*4882a593Smuzhiyun struct net_device *dev;
326*4882a593Smuzhiyun struct epic_private *ep;
327*4882a593Smuzhiyun int i, ret, option = 0, duplex = 0;
328*4882a593Smuzhiyun void *ring_space;
329*4882a593Smuzhiyun dma_addr_t ring_dma;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* when built into the kernel, we only print version if device is found */
332*4882a593Smuzhiyun #ifndef MODULE
333*4882a593Smuzhiyun pr_info_once("%s%s\n", version, version2);
334*4882a593Smuzhiyun #endif
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun card_idx++;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun ret = pci_enable_device(pdev);
339*4882a593Smuzhiyun if (ret)
340*4882a593Smuzhiyun goto out;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun if (pci_resource_len(pdev, 0) < EPIC_TOTAL_SIZE) {
343*4882a593Smuzhiyun dev_err(&pdev->dev, "no PCI region space\n");
344*4882a593Smuzhiyun ret = -ENODEV;
345*4882a593Smuzhiyun goto err_out_disable;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun pci_set_master(pdev);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun ret = pci_request_regions(pdev, DRV_NAME);
351*4882a593Smuzhiyun if (ret < 0)
352*4882a593Smuzhiyun goto err_out_disable;
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun ret = -ENOMEM;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun dev = alloc_etherdev(sizeof (*ep));
357*4882a593Smuzhiyun if (!dev)
358*4882a593Smuzhiyun goto err_out_free_res;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &pdev->dev);
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun ioaddr = pci_iomap(pdev, EPIC_BAR, 0);
363*4882a593Smuzhiyun if (!ioaddr) {
364*4882a593Smuzhiyun dev_err(&pdev->dev, "ioremap failed\n");
365*4882a593Smuzhiyun goto err_out_free_netdev;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun pci_set_drvdata(pdev, dev);
369*4882a593Smuzhiyun ep = netdev_priv(dev);
370*4882a593Smuzhiyun ep->ioaddr = ioaddr;
371*4882a593Smuzhiyun ep->mii.dev = dev;
372*4882a593Smuzhiyun ep->mii.mdio_read = mdio_read;
373*4882a593Smuzhiyun ep->mii.mdio_write = mdio_write;
374*4882a593Smuzhiyun ep->mii.phy_id_mask = 0x1f;
375*4882a593Smuzhiyun ep->mii.reg_num_mask = 0x1f;
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma,
378*4882a593Smuzhiyun GFP_KERNEL);
379*4882a593Smuzhiyun if (!ring_space)
380*4882a593Smuzhiyun goto err_out_iounmap;
381*4882a593Smuzhiyun ep->tx_ring = ring_space;
382*4882a593Smuzhiyun ep->tx_ring_dma = ring_dma;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma,
385*4882a593Smuzhiyun GFP_KERNEL);
386*4882a593Smuzhiyun if (!ring_space)
387*4882a593Smuzhiyun goto err_out_unmap_tx;
388*4882a593Smuzhiyun ep->rx_ring = ring_space;
389*4882a593Smuzhiyun ep->rx_ring_dma = ring_dma;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (dev->mem_start) {
392*4882a593Smuzhiyun option = dev->mem_start;
393*4882a593Smuzhiyun duplex = (dev->mem_start & 16) ? 1 : 0;
394*4882a593Smuzhiyun } else if (card_idx >= 0 && card_idx < MAX_UNITS) {
395*4882a593Smuzhiyun if (options[card_idx] >= 0)
396*4882a593Smuzhiyun option = options[card_idx];
397*4882a593Smuzhiyun if (full_duplex[card_idx] >= 0)
398*4882a593Smuzhiyun duplex = full_duplex[card_idx];
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun spin_lock_init(&ep->lock);
402*4882a593Smuzhiyun spin_lock_init(&ep->napi_lock);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun /* Bring the chip out of low-power mode. */
405*4882a593Smuzhiyun ew32(GENCTL, 0x4200);
406*4882a593Smuzhiyun /* Magic?! If we don't set this bit the MII interface won't work. */
407*4882a593Smuzhiyun /* This magic is documented in SMSC app note 7.15 */
408*4882a593Smuzhiyun for (i = 16; i > 0; i--)
409*4882a593Smuzhiyun ew32(TEST1, 0x0008);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /* Turn on the MII transceiver. */
412*4882a593Smuzhiyun ew32(MIICfg, 0x12);
413*4882a593Smuzhiyun if (chip_idx == 1)
414*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
415*4882a593Smuzhiyun ew32(GENCTL, 0x0200);
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun /* Note: the '175 does not have a serial EEPROM. */
418*4882a593Smuzhiyun for (i = 0; i < 3; i++)
419*4882a593Smuzhiyun ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(er16(LAN0 + i*4));
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (debug > 2) {
422*4882a593Smuzhiyun dev_dbg(&pdev->dev, "EEPROM contents:\n");
423*4882a593Smuzhiyun for (i = 0; i < 64; i++)
424*4882a593Smuzhiyun pr_cont(" %4.4x%s", read_eeprom(ep, i),
425*4882a593Smuzhiyun i % 16 == 15 ? "\n" : "");
426*4882a593Smuzhiyun }
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun ep->pci_dev = pdev;
429*4882a593Smuzhiyun ep->chip_id = chip_idx;
430*4882a593Smuzhiyun ep->chip_flags = pci_id_tbl[chip_idx].drv_flags;
431*4882a593Smuzhiyun ep->irq_mask =
432*4882a593Smuzhiyun (ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
433*4882a593Smuzhiyun | CntFull | TxUnderrun | EpicNapiEvent;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* Find the connected MII xcvrs.
436*4882a593Smuzhiyun Doing this in open() would allow detecting external xcvrs later, but
437*4882a593Smuzhiyun takes much time and no cards have external MII. */
438*4882a593Smuzhiyun {
439*4882a593Smuzhiyun int phy, phy_idx = 0;
440*4882a593Smuzhiyun for (phy = 1; phy < 32 && phy_idx < sizeof(ep->phys); phy++) {
441*4882a593Smuzhiyun int mii_status = mdio_read(dev, phy, MII_BMSR);
442*4882a593Smuzhiyun if (mii_status != 0xffff && mii_status != 0x0000) {
443*4882a593Smuzhiyun ep->phys[phy_idx++] = phy;
444*4882a593Smuzhiyun dev_info(&pdev->dev,
445*4882a593Smuzhiyun "MII transceiver #%d control "
446*4882a593Smuzhiyun "%4.4x status %4.4x.\n",
447*4882a593Smuzhiyun phy, mdio_read(dev, phy, 0), mii_status);
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun ep->mii_phy_cnt = phy_idx;
451*4882a593Smuzhiyun if (phy_idx != 0) {
452*4882a593Smuzhiyun phy = ep->phys[0];
453*4882a593Smuzhiyun ep->mii.advertising = mdio_read(dev, phy, MII_ADVERTISE);
454*4882a593Smuzhiyun dev_info(&pdev->dev,
455*4882a593Smuzhiyun "Autonegotiation advertising %4.4x link "
456*4882a593Smuzhiyun "partner %4.4x.\n",
457*4882a593Smuzhiyun ep->mii.advertising, mdio_read(dev, phy, 5));
458*4882a593Smuzhiyun } else if ( ! (ep->chip_flags & NO_MII)) {
459*4882a593Smuzhiyun dev_warn(&pdev->dev,
460*4882a593Smuzhiyun "***WARNING***: No MII transceiver found!\n");
461*4882a593Smuzhiyun /* Use the known PHY address of the EPII. */
462*4882a593Smuzhiyun ep->phys[0] = 3;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun ep->mii.phy_id = ep->phys[0];
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* Turn off the MII xcvr (175 only!), leave the chip in low-power mode. */
468*4882a593Smuzhiyun if (ep->chip_flags & MII_PWRDWN)
469*4882a593Smuzhiyun ew32(NVCTL, er32(NVCTL) & ~0x483c);
470*4882a593Smuzhiyun ew32(GENCTL, 0x0008);
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* The lower four bits are the media type. */
473*4882a593Smuzhiyun if (duplex) {
474*4882a593Smuzhiyun ep->mii.force_media = ep->mii.full_duplex = 1;
475*4882a593Smuzhiyun dev_info(&pdev->dev, "Forced full duplex requested.\n");
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun dev->if_port = ep->default_port = option;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* The Epic-specific entries in the device structure. */
480*4882a593Smuzhiyun dev->netdev_ops = &epic_netdev_ops;
481*4882a593Smuzhiyun dev->ethtool_ops = &netdev_ethtool_ops;
482*4882a593Smuzhiyun dev->watchdog_timeo = TX_TIMEOUT;
483*4882a593Smuzhiyun netif_napi_add(dev, &ep->napi, epic_poll, 64);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun ret = register_netdev(dev);
486*4882a593Smuzhiyun if (ret < 0)
487*4882a593Smuzhiyun goto err_out_unmap_rx;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun netdev_info(dev, "%s at %lx, IRQ %d, %pM\n",
490*4882a593Smuzhiyun pci_id_tbl[chip_idx].name,
491*4882a593Smuzhiyun (long)pci_resource_start(pdev, EPIC_BAR), pdev->irq,
492*4882a593Smuzhiyun dev->dev_addr);
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun out:
495*4882a593Smuzhiyun return ret;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun err_out_unmap_rx:
498*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
499*4882a593Smuzhiyun ep->rx_ring_dma);
500*4882a593Smuzhiyun err_out_unmap_tx:
501*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
502*4882a593Smuzhiyun ep->tx_ring_dma);
503*4882a593Smuzhiyun err_out_iounmap:
504*4882a593Smuzhiyun pci_iounmap(pdev, ioaddr);
505*4882a593Smuzhiyun err_out_free_netdev:
506*4882a593Smuzhiyun free_netdev(dev);
507*4882a593Smuzhiyun err_out_free_res:
508*4882a593Smuzhiyun pci_release_regions(pdev);
509*4882a593Smuzhiyun err_out_disable:
510*4882a593Smuzhiyun pci_disable_device(pdev);
511*4882a593Smuzhiyun goto out;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /* Serial EEPROM section. */
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* EEPROM_Ctrl bits. */
517*4882a593Smuzhiyun #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
518*4882a593Smuzhiyun #define EE_CS 0x02 /* EEPROM chip select. */
519*4882a593Smuzhiyun #define EE_DATA_WRITE 0x08 /* EEPROM chip data in. */
520*4882a593Smuzhiyun #define EE_WRITE_0 0x01
521*4882a593Smuzhiyun #define EE_WRITE_1 0x09
522*4882a593Smuzhiyun #define EE_DATA_READ 0x10 /* EEPROM chip data out. */
523*4882a593Smuzhiyun #define EE_ENB (0x0001 | EE_CS)
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Delay between EEPROM clock transitions.
526*4882a593Smuzhiyun This serves to flush the operation to the PCI bus.
527*4882a593Smuzhiyun */
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun #define eeprom_delay() er32(EECTL)
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* The EEPROM commands include the alway-set leading bit. */
532*4882a593Smuzhiyun #define EE_WRITE_CMD (5 << 6)
533*4882a593Smuzhiyun #define EE_READ64_CMD (6 << 6)
534*4882a593Smuzhiyun #define EE_READ256_CMD (6 << 8)
535*4882a593Smuzhiyun #define EE_ERASE_CMD (7 << 6)
536*4882a593Smuzhiyun
epic_disable_int(struct net_device * dev,struct epic_private * ep)537*4882a593Smuzhiyun static void epic_disable_int(struct net_device *dev, struct epic_private *ep)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun ew32(INTMASK, 0x00000000);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
__epic_pci_commit(void __iomem * ioaddr)544*4882a593Smuzhiyun static inline void __epic_pci_commit(void __iomem *ioaddr)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun #ifndef USE_IO_OPS
547*4882a593Smuzhiyun er32(INTMASK);
548*4882a593Smuzhiyun #endif
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun
epic_napi_irq_off(struct net_device * dev,struct epic_private * ep)551*4882a593Smuzhiyun static inline void epic_napi_irq_off(struct net_device *dev,
552*4882a593Smuzhiyun struct epic_private *ep)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun ew32(INTMASK, ep->irq_mask & ~EpicNapiEvent);
557*4882a593Smuzhiyun __epic_pci_commit(ioaddr);
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
epic_napi_irq_on(struct net_device * dev,struct epic_private * ep)560*4882a593Smuzhiyun static inline void epic_napi_irq_on(struct net_device *dev,
561*4882a593Smuzhiyun struct epic_private *ep)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun /* No need to commit possible posted write */
566*4882a593Smuzhiyun ew32(INTMASK, ep->irq_mask | EpicNapiEvent);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
read_eeprom(struct epic_private * ep,int location)569*4882a593Smuzhiyun static int read_eeprom(struct epic_private *ep, int location)
570*4882a593Smuzhiyun {
571*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
572*4882a593Smuzhiyun int i;
573*4882a593Smuzhiyun int retval = 0;
574*4882a593Smuzhiyun int read_cmd = location |
575*4882a593Smuzhiyun (er32(EECTL) & 0x40 ? EE_READ64_CMD : EE_READ256_CMD);
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun ew32(EECTL, EE_ENB & ~EE_CS);
578*4882a593Smuzhiyun ew32(EECTL, EE_ENB);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* Shift the read command bits out. */
581*4882a593Smuzhiyun for (i = 12; i >= 0; i--) {
582*4882a593Smuzhiyun short dataval = (read_cmd & (1 << i)) ? EE_WRITE_1 : EE_WRITE_0;
583*4882a593Smuzhiyun ew32(EECTL, EE_ENB | dataval);
584*4882a593Smuzhiyun eeprom_delay();
585*4882a593Smuzhiyun ew32(EECTL, EE_ENB | dataval | EE_SHIFT_CLK);
586*4882a593Smuzhiyun eeprom_delay();
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun ew32(EECTL, EE_ENB);
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun for (i = 16; i > 0; i--) {
591*4882a593Smuzhiyun ew32(EECTL, EE_ENB | EE_SHIFT_CLK);
592*4882a593Smuzhiyun eeprom_delay();
593*4882a593Smuzhiyun retval = (retval << 1) | ((er32(EECTL) & EE_DATA_READ) ? 1 : 0);
594*4882a593Smuzhiyun ew32(EECTL, EE_ENB);
595*4882a593Smuzhiyun eeprom_delay();
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* Terminate the EEPROM access. */
599*4882a593Smuzhiyun ew32(EECTL, EE_ENB & ~EE_CS);
600*4882a593Smuzhiyun return retval;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun #define MII_READOP 1
604*4882a593Smuzhiyun #define MII_WRITEOP 2
mdio_read(struct net_device * dev,int phy_id,int location)605*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int location)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
608*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
609*4882a593Smuzhiyun int read_cmd = (phy_id << 9) | (location << 4) | MII_READOP;
610*4882a593Smuzhiyun int i;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun ew32(MIICtrl, read_cmd);
613*4882a593Smuzhiyun /* Typical operation takes 25 loops. */
614*4882a593Smuzhiyun for (i = 400; i > 0; i--) {
615*4882a593Smuzhiyun barrier();
616*4882a593Smuzhiyun if ((er32(MIICtrl) & MII_READOP) == 0) {
617*4882a593Smuzhiyun /* Work around read failure bug. */
618*4882a593Smuzhiyun if (phy_id == 1 && location < 6 &&
619*4882a593Smuzhiyun er16(MIIData) == 0xffff) {
620*4882a593Smuzhiyun ew32(MIICtrl, read_cmd);
621*4882a593Smuzhiyun continue;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun return er16(MIIData);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun return 0xffff;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
mdio_write(struct net_device * dev,int phy_id,int loc,int value)629*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int loc, int value)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
632*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
633*4882a593Smuzhiyun int i;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun ew16(MIIData, value);
636*4882a593Smuzhiyun ew32(MIICtrl, (phy_id << 9) | (loc << 4) | MII_WRITEOP);
637*4882a593Smuzhiyun for (i = 10000; i > 0; i--) {
638*4882a593Smuzhiyun barrier();
639*4882a593Smuzhiyun if ((er32(MIICtrl) & MII_WRITEOP) == 0)
640*4882a593Smuzhiyun break;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun
epic_open(struct net_device * dev)645*4882a593Smuzhiyun static int epic_open(struct net_device *dev)
646*4882a593Smuzhiyun {
647*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
648*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
649*4882a593Smuzhiyun const int irq = ep->pci_dev->irq;
650*4882a593Smuzhiyun int rc, i;
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* Soft reset the chip. */
653*4882a593Smuzhiyun ew32(GENCTL, 0x4001);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun napi_enable(&ep->napi);
656*4882a593Smuzhiyun rc = request_irq(irq, epic_interrupt, IRQF_SHARED, dev->name, dev);
657*4882a593Smuzhiyun if (rc) {
658*4882a593Smuzhiyun napi_disable(&ep->napi);
659*4882a593Smuzhiyun return rc;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun epic_init_ring(dev);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun ew32(GENCTL, 0x4000);
665*4882a593Smuzhiyun /* This magic is documented in SMSC app note 7.15 */
666*4882a593Smuzhiyun for (i = 16; i > 0; i--)
667*4882a593Smuzhiyun ew32(TEST1, 0x0008);
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* Pull the chip out of low-power mode, enable interrupts, and set for
670*4882a593Smuzhiyun PCI read multiple. The MIIcfg setting and strange write order are
671*4882a593Smuzhiyun required by the details of which bits are reset and the transceiver
672*4882a593Smuzhiyun wiring on the Ositech CardBus card.
673*4882a593Smuzhiyun */
674*4882a593Smuzhiyun #if 0
675*4882a593Smuzhiyun ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
676*4882a593Smuzhiyun #endif
677*4882a593Smuzhiyun if (ep->chip_flags & MII_PWRDWN)
678*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* Tell the chip to byteswap descriptors on big-endian hosts */
681*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
682*4882a593Smuzhiyun ew32(GENCTL, 0x4432 | (RX_FIFO_THRESH << 8));
683*4882a593Smuzhiyun er32(GENCTL);
684*4882a593Smuzhiyun ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
685*4882a593Smuzhiyun #else
686*4882a593Smuzhiyun ew32(GENCTL, 0x4412 | (RX_FIFO_THRESH << 8));
687*4882a593Smuzhiyun er32(GENCTL);
688*4882a593Smuzhiyun ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
689*4882a593Smuzhiyun #endif
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun for (i = 0; i < 3; i++)
694*4882a593Smuzhiyun ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun ep->tx_threshold = TX_FIFO_THRESH;
697*4882a593Smuzhiyun ew32(TxThresh, ep->tx_threshold);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun if (media2miictl[dev->if_port & 15]) {
700*4882a593Smuzhiyun if (ep->mii_phy_cnt)
701*4882a593Smuzhiyun mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
702*4882a593Smuzhiyun if (dev->if_port == 1) {
703*4882a593Smuzhiyun if (debug > 1)
704*4882a593Smuzhiyun netdev_info(dev, "Using the 10base2 transceiver, MII status %4.4x.\n",
705*4882a593Smuzhiyun mdio_read(dev, ep->phys[0], MII_BMSR));
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun } else {
708*4882a593Smuzhiyun int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
709*4882a593Smuzhiyun if (mii_lpa != 0xffff) {
710*4882a593Smuzhiyun if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
711*4882a593Smuzhiyun ep->mii.full_duplex = 1;
712*4882a593Smuzhiyun else if (! (mii_lpa & LPA_LPACK))
713*4882a593Smuzhiyun mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
714*4882a593Smuzhiyun if (debug > 1)
715*4882a593Smuzhiyun netdev_info(dev, "Setting %s-duplex based on MII xcvr %d register read of %4.4x.\n",
716*4882a593Smuzhiyun ep->mii.full_duplex ? "full"
717*4882a593Smuzhiyun : "half",
718*4882a593Smuzhiyun ep->phys[0], mii_lpa);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
723*4882a593Smuzhiyun ew32(PRxCDAR, ep->rx_ring_dma);
724*4882a593Smuzhiyun ew32(PTxCDAR, ep->tx_ring_dma);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Start the chip's Rx process. */
727*4882a593Smuzhiyun set_rx_mode(dev);
728*4882a593Smuzhiyun ew32(COMMAND, StartRx | RxQueued);
729*4882a593Smuzhiyun
730*4882a593Smuzhiyun netif_start_queue(dev);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Enable interrupts by setting the interrupt mask. */
733*4882a593Smuzhiyun ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
734*4882a593Smuzhiyun ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
735*4882a593Smuzhiyun TxUnderrun);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun if (debug > 1) {
738*4882a593Smuzhiyun netdev_dbg(dev, "epic_open() ioaddr %p IRQ %d status %4.4x %s-duplex.\n",
739*4882a593Smuzhiyun ioaddr, irq, er32(GENCTL),
740*4882a593Smuzhiyun ep->mii.full_duplex ? "full" : "half");
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
743*4882a593Smuzhiyun /* Set the timer to switch to check for link beat and perhaps switch
744*4882a593Smuzhiyun to an alternate media type. */
745*4882a593Smuzhiyun timer_setup(&ep->timer, epic_timer, 0);
746*4882a593Smuzhiyun ep->timer.expires = jiffies + 3*HZ;
747*4882a593Smuzhiyun add_timer(&ep->timer);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun return rc;
750*4882a593Smuzhiyun }
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun /* Reset the chip to recover from a PCI transaction error.
753*4882a593Smuzhiyun This may occur at interrupt time. */
epic_pause(struct net_device * dev)754*4882a593Smuzhiyun static void epic_pause(struct net_device *dev)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
757*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
758*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun netif_stop_queue (dev);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun /* Disable interrupts by clearing the interrupt mask. */
763*4882a593Smuzhiyun ew32(INTMASK, 0x00000000);
764*4882a593Smuzhiyun /* Stop the chip's Tx and Rx DMA processes. */
765*4882a593Smuzhiyun ew16(COMMAND, StopRx | StopTxDMA | StopRxDMA);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* Update the error counts. */
768*4882a593Smuzhiyun if (er16(COMMAND) != 0xffff) {
769*4882a593Smuzhiyun stats->rx_missed_errors += er8(MPCNT);
770*4882a593Smuzhiyun stats->rx_frame_errors += er8(ALICNT);
771*4882a593Smuzhiyun stats->rx_crc_errors += er8(CRCCNT);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun /* Remove the packets on the Rx queue. */
775*4882a593Smuzhiyun epic_rx(dev, RX_RING_SIZE);
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
epic_restart(struct net_device * dev)778*4882a593Smuzhiyun static void epic_restart(struct net_device *dev)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
781*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
782*4882a593Smuzhiyun int i;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /* Soft reset the chip. */
785*4882a593Smuzhiyun ew32(GENCTL, 0x4001);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun netdev_dbg(dev, "Restarting the EPIC chip, Rx %d/%d Tx %d/%d.\n",
788*4882a593Smuzhiyun ep->cur_rx, ep->dirty_rx, ep->dirty_tx, ep->cur_tx);
789*4882a593Smuzhiyun udelay(1);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun /* This magic is documented in SMSC app note 7.15 */
792*4882a593Smuzhiyun for (i = 16; i > 0; i--)
793*4882a593Smuzhiyun ew32(TEST1, 0x0008);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
796*4882a593Smuzhiyun ew32(GENCTL, 0x0432 | (RX_FIFO_THRESH << 8));
797*4882a593Smuzhiyun #else
798*4882a593Smuzhiyun ew32(GENCTL, 0x0412 | (RX_FIFO_THRESH << 8));
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun ew32(MIICfg, dev->if_port == 1 ? 0x13 : 0x12);
801*4882a593Smuzhiyun if (ep->chip_flags & MII_PWRDWN)
802*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun for (i = 0; i < 3; i++)
805*4882a593Smuzhiyun ew32(LAN0 + i*4, le16_to_cpu(((__le16*)dev->dev_addr)[i]));
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun ep->tx_threshold = TX_FIFO_THRESH;
808*4882a593Smuzhiyun ew32(TxThresh, ep->tx_threshold);
809*4882a593Smuzhiyun ew32(TxCtrl, ep->mii.full_duplex ? 0x7f : 0x79);
810*4882a593Smuzhiyun ew32(PRxCDAR, ep->rx_ring_dma +
811*4882a593Smuzhiyun (ep->cur_rx % RX_RING_SIZE) * sizeof(struct epic_rx_desc));
812*4882a593Smuzhiyun ew32(PTxCDAR, ep->tx_ring_dma +
813*4882a593Smuzhiyun (ep->dirty_tx % TX_RING_SIZE) * sizeof(struct epic_tx_desc));
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* Start the chip's Rx process. */
816*4882a593Smuzhiyun set_rx_mode(dev);
817*4882a593Smuzhiyun ew32(COMMAND, StartRx | RxQueued);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun /* Enable interrupts by setting the interrupt mask. */
820*4882a593Smuzhiyun ew32(INTMASK, RxError | RxHeader | EpicNapiEvent | CntFull |
821*4882a593Smuzhiyun ((ep->chip_flags & TYPE2_INTR) ? PCIBusErr175 : PCIBusErr170) |
822*4882a593Smuzhiyun TxUnderrun);
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun netdev_dbg(dev, "epic_restart() done, cmd status %4.4x, ctl %4.4x interrupt %4.4x.\n",
825*4882a593Smuzhiyun er32(COMMAND), er32(GENCTL), er32(INTSTAT));
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun
check_media(struct net_device * dev)828*4882a593Smuzhiyun static void check_media(struct net_device *dev)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
831*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
832*4882a593Smuzhiyun int mii_lpa = ep->mii_phy_cnt ? mdio_read(dev, ep->phys[0], MII_LPA) : 0;
833*4882a593Smuzhiyun int negotiated = mii_lpa & ep->mii.advertising;
834*4882a593Smuzhiyun int duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
835*4882a593Smuzhiyun
836*4882a593Smuzhiyun if (ep->mii.force_media)
837*4882a593Smuzhiyun return;
838*4882a593Smuzhiyun if (mii_lpa == 0xffff) /* Bogus read */
839*4882a593Smuzhiyun return;
840*4882a593Smuzhiyun if (ep->mii.full_duplex != duplex) {
841*4882a593Smuzhiyun ep->mii.full_duplex = duplex;
842*4882a593Smuzhiyun netdev_info(dev, "Setting %s-duplex based on MII #%d link partner capability of %4.4x.\n",
843*4882a593Smuzhiyun ep->mii.full_duplex ? "full" : "half",
844*4882a593Smuzhiyun ep->phys[0], mii_lpa);
845*4882a593Smuzhiyun ew32(TxCtrl, ep->mii.full_duplex ? 0x7F : 0x79);
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun
epic_timer(struct timer_list * t)849*4882a593Smuzhiyun static void epic_timer(struct timer_list *t)
850*4882a593Smuzhiyun {
851*4882a593Smuzhiyun struct epic_private *ep = from_timer(ep, t, timer);
852*4882a593Smuzhiyun struct net_device *dev = ep->mii.dev;
853*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
854*4882a593Smuzhiyun int next_tick = 5*HZ;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun if (debug > 3) {
857*4882a593Smuzhiyun netdev_dbg(dev, "Media monitor tick, Tx status %8.8x.\n",
858*4882a593Smuzhiyun er32(TxSTAT));
859*4882a593Smuzhiyun netdev_dbg(dev, "Other registers are IntMask %4.4x IntStatus %4.4x RxStatus %4.4x.\n",
860*4882a593Smuzhiyun er32(INTMASK), er32(INTSTAT), er32(RxSTAT));
861*4882a593Smuzhiyun }
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun check_media(dev);
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun ep->timer.expires = jiffies + next_tick;
866*4882a593Smuzhiyun add_timer(&ep->timer);
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
epic_tx_timeout(struct net_device * dev,unsigned int txqueue)869*4882a593Smuzhiyun static void epic_tx_timeout(struct net_device *dev, unsigned int txqueue)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
872*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (debug > 0) {
875*4882a593Smuzhiyun netdev_warn(dev, "Transmit timeout using MII device, Tx status %4.4x.\n",
876*4882a593Smuzhiyun er16(TxSTAT));
877*4882a593Smuzhiyun if (debug > 1) {
878*4882a593Smuzhiyun netdev_dbg(dev, "Tx indices: dirty_tx %d, cur_tx %d.\n",
879*4882a593Smuzhiyun ep->dirty_tx, ep->cur_tx);
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun if (er16(TxSTAT) & 0x10) { /* Tx FIFO underflow. */
883*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
884*4882a593Smuzhiyun ew32(COMMAND, RestartTx);
885*4882a593Smuzhiyun } else {
886*4882a593Smuzhiyun epic_restart(dev);
887*4882a593Smuzhiyun ew32(COMMAND, TxQueued);
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun netif_trans_update(dev); /* prevent tx timeout */
891*4882a593Smuzhiyun dev->stats.tx_errors++;
892*4882a593Smuzhiyun if (!ep->tx_full)
893*4882a593Smuzhiyun netif_wake_queue(dev);
894*4882a593Smuzhiyun }
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
epic_init_ring(struct net_device * dev)897*4882a593Smuzhiyun static void epic_init_ring(struct net_device *dev)
898*4882a593Smuzhiyun {
899*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
900*4882a593Smuzhiyun int i;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun ep->tx_full = 0;
903*4882a593Smuzhiyun ep->dirty_tx = ep->cur_tx = 0;
904*4882a593Smuzhiyun ep->cur_rx = ep->dirty_rx = 0;
905*4882a593Smuzhiyun ep->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun /* Initialize all Rx descriptors. */
908*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
909*4882a593Smuzhiyun ep->rx_ring[i].rxstatus = 0;
910*4882a593Smuzhiyun ep->rx_ring[i].buflength = ep->rx_buf_sz;
911*4882a593Smuzhiyun ep->rx_ring[i].next = ep->rx_ring_dma +
912*4882a593Smuzhiyun (i+1)*sizeof(struct epic_rx_desc);
913*4882a593Smuzhiyun ep->rx_skbuff[i] = NULL;
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun /* Mark the last entry as wrapping the ring. */
916*4882a593Smuzhiyun ep->rx_ring[i-1].next = ep->rx_ring_dma;
917*4882a593Smuzhiyun
918*4882a593Smuzhiyun /* Fill in the Rx buffers. Handle allocation failure gracefully. */
919*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
920*4882a593Smuzhiyun struct sk_buff *skb = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
921*4882a593Smuzhiyun ep->rx_skbuff[i] = skb;
922*4882a593Smuzhiyun if (skb == NULL)
923*4882a593Smuzhiyun break;
924*4882a593Smuzhiyun skb_reserve(skb, 2); /* 16 byte align the IP header. */
925*4882a593Smuzhiyun ep->rx_ring[i].bufaddr = dma_map_single(&ep->pci_dev->dev,
926*4882a593Smuzhiyun skb->data,
927*4882a593Smuzhiyun ep->rx_buf_sz,
928*4882a593Smuzhiyun DMA_FROM_DEVICE);
929*4882a593Smuzhiyun ep->rx_ring[i].rxstatus = DescOwn;
930*4882a593Smuzhiyun }
931*4882a593Smuzhiyun ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun /* The Tx buffer descriptor is filled in as needed, but we
934*4882a593Smuzhiyun do need to clear the ownership bit. */
935*4882a593Smuzhiyun for (i = 0; i < TX_RING_SIZE; i++) {
936*4882a593Smuzhiyun ep->tx_skbuff[i] = NULL;
937*4882a593Smuzhiyun ep->tx_ring[i].txstatus = 0x0000;
938*4882a593Smuzhiyun ep->tx_ring[i].next = ep->tx_ring_dma +
939*4882a593Smuzhiyun (i+1)*sizeof(struct epic_tx_desc);
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun ep->tx_ring[i-1].next = ep->tx_ring_dma;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
epic_start_xmit(struct sk_buff * skb,struct net_device * dev)944*4882a593Smuzhiyun static netdev_tx_t epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
947*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
948*4882a593Smuzhiyun int entry, free_count;
949*4882a593Smuzhiyun u32 ctrl_word;
950*4882a593Smuzhiyun unsigned long flags;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun if (skb_padto(skb, ETH_ZLEN))
953*4882a593Smuzhiyun return NETDEV_TX_OK;
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun /* Caution: the write order is important here, set the field with the
956*4882a593Smuzhiyun "ownership" bit last. */
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /* Calculate the next Tx descriptor entry. */
959*4882a593Smuzhiyun spin_lock_irqsave(&ep->lock, flags);
960*4882a593Smuzhiyun free_count = ep->cur_tx - ep->dirty_tx;
961*4882a593Smuzhiyun entry = ep->cur_tx % TX_RING_SIZE;
962*4882a593Smuzhiyun
963*4882a593Smuzhiyun ep->tx_skbuff[entry] = skb;
964*4882a593Smuzhiyun ep->tx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
965*4882a593Smuzhiyun skb->data, skb->len,
966*4882a593Smuzhiyun DMA_TO_DEVICE);
967*4882a593Smuzhiyun if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
968*4882a593Smuzhiyun ctrl_word = 0x100000; /* No interrupt */
969*4882a593Smuzhiyun } else if (free_count == TX_QUEUE_LEN/2) {
970*4882a593Smuzhiyun ctrl_word = 0x140000; /* Tx-done intr. */
971*4882a593Smuzhiyun } else if (free_count < TX_QUEUE_LEN - 1) {
972*4882a593Smuzhiyun ctrl_word = 0x100000; /* No Tx-done intr. */
973*4882a593Smuzhiyun } else {
974*4882a593Smuzhiyun /* Leave room for an additional entry. */
975*4882a593Smuzhiyun ctrl_word = 0x140000; /* Tx-done intr. */
976*4882a593Smuzhiyun ep->tx_full = 1;
977*4882a593Smuzhiyun }
978*4882a593Smuzhiyun ep->tx_ring[entry].buflength = ctrl_word | skb->len;
979*4882a593Smuzhiyun ep->tx_ring[entry].txstatus =
980*4882a593Smuzhiyun ((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
981*4882a593Smuzhiyun | DescOwn;
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun ep->cur_tx++;
984*4882a593Smuzhiyun if (ep->tx_full)
985*4882a593Smuzhiyun netif_stop_queue(dev);
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun spin_unlock_irqrestore(&ep->lock, flags);
988*4882a593Smuzhiyun /* Trigger an immediate transmit demand. */
989*4882a593Smuzhiyun ew32(COMMAND, TxQueued);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun if (debug > 4)
992*4882a593Smuzhiyun netdev_dbg(dev, "Queued Tx packet size %d to slot %d, flag %2.2x Tx status %8.8x.\n",
993*4882a593Smuzhiyun skb->len, entry, ctrl_word, er32(TxSTAT));
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun return NETDEV_TX_OK;
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun
epic_tx_error(struct net_device * dev,struct epic_private * ep,int status)998*4882a593Smuzhiyun static void epic_tx_error(struct net_device *dev, struct epic_private *ep,
999*4882a593Smuzhiyun int status)
1000*4882a593Smuzhiyun {
1001*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
1002*4882a593Smuzhiyun
1003*4882a593Smuzhiyun #ifndef final_version
1004*4882a593Smuzhiyun /* There was an major error, log it. */
1005*4882a593Smuzhiyun if (debug > 1)
1006*4882a593Smuzhiyun netdev_dbg(dev, "Transmit error, Tx status %8.8x.\n",
1007*4882a593Smuzhiyun status);
1008*4882a593Smuzhiyun #endif
1009*4882a593Smuzhiyun stats->tx_errors++;
1010*4882a593Smuzhiyun if (status & 0x1050)
1011*4882a593Smuzhiyun stats->tx_aborted_errors++;
1012*4882a593Smuzhiyun if (status & 0x0008)
1013*4882a593Smuzhiyun stats->tx_carrier_errors++;
1014*4882a593Smuzhiyun if (status & 0x0040)
1015*4882a593Smuzhiyun stats->tx_window_errors++;
1016*4882a593Smuzhiyun if (status & 0x0010)
1017*4882a593Smuzhiyun stats->tx_fifo_errors++;
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
epic_tx(struct net_device * dev,struct epic_private * ep)1020*4882a593Smuzhiyun static void epic_tx(struct net_device *dev, struct epic_private *ep)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun unsigned int dirty_tx, cur_tx;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /*
1025*4882a593Smuzhiyun * Note: if this lock becomes a problem we can narrow the locked
1026*4882a593Smuzhiyun * region at the cost of occasionally grabbing the lock more times.
1027*4882a593Smuzhiyun */
1028*4882a593Smuzhiyun cur_tx = ep->cur_tx;
1029*4882a593Smuzhiyun for (dirty_tx = ep->dirty_tx; cur_tx - dirty_tx > 0; dirty_tx++) {
1030*4882a593Smuzhiyun struct sk_buff *skb;
1031*4882a593Smuzhiyun int entry = dirty_tx % TX_RING_SIZE;
1032*4882a593Smuzhiyun int txstatus = ep->tx_ring[entry].txstatus;
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun if (txstatus & DescOwn)
1035*4882a593Smuzhiyun break; /* It still hasn't been Txed */
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun if (likely(txstatus & 0x0001)) {
1038*4882a593Smuzhiyun dev->stats.collisions += (txstatus >> 8) & 15;
1039*4882a593Smuzhiyun dev->stats.tx_packets++;
1040*4882a593Smuzhiyun dev->stats.tx_bytes += ep->tx_skbuff[entry]->len;
1041*4882a593Smuzhiyun } else
1042*4882a593Smuzhiyun epic_tx_error(dev, ep, txstatus);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun /* Free the original skb. */
1045*4882a593Smuzhiyun skb = ep->tx_skbuff[entry];
1046*4882a593Smuzhiyun dma_unmap_single(&ep->pci_dev->dev,
1047*4882a593Smuzhiyun ep->tx_ring[entry].bufaddr, skb->len,
1048*4882a593Smuzhiyun DMA_TO_DEVICE);
1049*4882a593Smuzhiyun dev_consume_skb_irq(skb);
1050*4882a593Smuzhiyun ep->tx_skbuff[entry] = NULL;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun #ifndef final_version
1054*4882a593Smuzhiyun if (cur_tx - dirty_tx > TX_RING_SIZE) {
1055*4882a593Smuzhiyun netdev_warn(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
1056*4882a593Smuzhiyun dirty_tx, cur_tx, ep->tx_full);
1057*4882a593Smuzhiyun dirty_tx += TX_RING_SIZE;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun #endif
1060*4882a593Smuzhiyun ep->dirty_tx = dirty_tx;
1061*4882a593Smuzhiyun if (ep->tx_full && cur_tx - dirty_tx < TX_QUEUE_LEN - 4) {
1062*4882a593Smuzhiyun /* The ring is no longer full, allow new TX entries. */
1063*4882a593Smuzhiyun ep->tx_full = 0;
1064*4882a593Smuzhiyun netif_wake_queue(dev);
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun
1068*4882a593Smuzhiyun /* The interrupt handler does all of the Rx thread work and cleans up
1069*4882a593Smuzhiyun after the Tx thread. */
epic_interrupt(int irq,void * dev_instance)1070*4882a593Smuzhiyun static irqreturn_t epic_interrupt(int irq, void *dev_instance)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct net_device *dev = dev_instance;
1073*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1074*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1075*4882a593Smuzhiyun unsigned int handled = 0;
1076*4882a593Smuzhiyun int status;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun status = er32(INTSTAT);
1079*4882a593Smuzhiyun /* Acknowledge all of the current interrupt sources ASAP. */
1080*4882a593Smuzhiyun ew32(INTSTAT, status & EpicNormalEvent);
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun if (debug > 4) {
1083*4882a593Smuzhiyun netdev_dbg(dev, "Interrupt, status=%#8.8x new intstat=%#8.8x.\n",
1084*4882a593Smuzhiyun status, er32(INTSTAT));
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun if ((status & IntrSummary) == 0)
1088*4882a593Smuzhiyun goto out;
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun handled = 1;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun if (status & EpicNapiEvent) {
1093*4882a593Smuzhiyun spin_lock(&ep->napi_lock);
1094*4882a593Smuzhiyun if (napi_schedule_prep(&ep->napi)) {
1095*4882a593Smuzhiyun epic_napi_irq_off(dev, ep);
1096*4882a593Smuzhiyun __napi_schedule(&ep->napi);
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun spin_unlock(&ep->napi_lock);
1099*4882a593Smuzhiyun }
1100*4882a593Smuzhiyun status &= ~EpicNapiEvent;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun /* Check uncommon events all at once. */
1103*4882a593Smuzhiyun if (status & (CntFull | TxUnderrun | PCIBusErr170 | PCIBusErr175)) {
1104*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
1105*4882a593Smuzhiyun
1106*4882a593Smuzhiyun if (status == EpicRemoved)
1107*4882a593Smuzhiyun goto out;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun /* Always update the error counts to avoid overhead later. */
1110*4882a593Smuzhiyun stats->rx_missed_errors += er8(MPCNT);
1111*4882a593Smuzhiyun stats->rx_frame_errors += er8(ALICNT);
1112*4882a593Smuzhiyun stats->rx_crc_errors += er8(CRCCNT);
1113*4882a593Smuzhiyun
1114*4882a593Smuzhiyun if (status & TxUnderrun) { /* Tx FIFO underflow. */
1115*4882a593Smuzhiyun stats->tx_fifo_errors++;
1116*4882a593Smuzhiyun ew32(TxThresh, ep->tx_threshold += 128);
1117*4882a593Smuzhiyun /* Restart the transmit process. */
1118*4882a593Smuzhiyun ew32(COMMAND, RestartTx);
1119*4882a593Smuzhiyun }
1120*4882a593Smuzhiyun if (status & PCIBusErr170) {
1121*4882a593Smuzhiyun netdev_err(dev, "PCI Bus Error! status %4.4x.\n",
1122*4882a593Smuzhiyun status);
1123*4882a593Smuzhiyun epic_pause(dev);
1124*4882a593Smuzhiyun epic_restart(dev);
1125*4882a593Smuzhiyun }
1126*4882a593Smuzhiyun /* Clear all error sources. */
1127*4882a593Smuzhiyun ew32(INTSTAT, status & 0x7f18);
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun out:
1131*4882a593Smuzhiyun if (debug > 3) {
1132*4882a593Smuzhiyun netdev_dbg(dev, "exit interrupt, intr_status=%#4.4x.\n",
1133*4882a593Smuzhiyun status);
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun return IRQ_RETVAL(handled);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun
epic_rx(struct net_device * dev,int budget)1139*4882a593Smuzhiyun static int epic_rx(struct net_device *dev, int budget)
1140*4882a593Smuzhiyun {
1141*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1142*4882a593Smuzhiyun int entry = ep->cur_rx % RX_RING_SIZE;
1143*4882a593Smuzhiyun int rx_work_limit = ep->dirty_rx + RX_RING_SIZE - ep->cur_rx;
1144*4882a593Smuzhiyun int work_done = 0;
1145*4882a593Smuzhiyun
1146*4882a593Smuzhiyun if (debug > 4)
1147*4882a593Smuzhiyun netdev_dbg(dev, " In epic_rx(), entry %d %8.8x.\n", entry,
1148*4882a593Smuzhiyun ep->rx_ring[entry].rxstatus);
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun if (rx_work_limit > budget)
1151*4882a593Smuzhiyun rx_work_limit = budget;
1152*4882a593Smuzhiyun
1153*4882a593Smuzhiyun /* If we own the next entry, it's a new packet. Send it up. */
1154*4882a593Smuzhiyun while ((ep->rx_ring[entry].rxstatus & DescOwn) == 0) {
1155*4882a593Smuzhiyun int status = ep->rx_ring[entry].rxstatus;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun if (debug > 4)
1158*4882a593Smuzhiyun netdev_dbg(dev, " epic_rx() status was %8.8x.\n",
1159*4882a593Smuzhiyun status);
1160*4882a593Smuzhiyun if (--rx_work_limit < 0)
1161*4882a593Smuzhiyun break;
1162*4882a593Smuzhiyun if (status & 0x2006) {
1163*4882a593Smuzhiyun if (debug > 2)
1164*4882a593Smuzhiyun netdev_dbg(dev, "epic_rx() error status was %8.8x.\n",
1165*4882a593Smuzhiyun status);
1166*4882a593Smuzhiyun if (status & 0x2000) {
1167*4882a593Smuzhiyun netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %4.4x!\n",
1168*4882a593Smuzhiyun status);
1169*4882a593Smuzhiyun dev->stats.rx_length_errors++;
1170*4882a593Smuzhiyun } else if (status & 0x0006)
1171*4882a593Smuzhiyun /* Rx Frame errors are counted in hardware. */
1172*4882a593Smuzhiyun dev->stats.rx_errors++;
1173*4882a593Smuzhiyun } else {
1174*4882a593Smuzhiyun /* Malloc up new buffer, compatible with net-2e. */
1175*4882a593Smuzhiyun /* Omit the four octet CRC from the length. */
1176*4882a593Smuzhiyun short pkt_len = (status >> 16) - 4;
1177*4882a593Smuzhiyun struct sk_buff *skb;
1178*4882a593Smuzhiyun
1179*4882a593Smuzhiyun if (pkt_len > PKT_BUF_SZ - 4) {
1180*4882a593Smuzhiyun netdev_err(dev, "Oversized Ethernet frame, status %x %d bytes.\n",
1181*4882a593Smuzhiyun status, pkt_len);
1182*4882a593Smuzhiyun pkt_len = 1514;
1183*4882a593Smuzhiyun }
1184*4882a593Smuzhiyun /* Check if the packet is long enough to accept without copying
1185*4882a593Smuzhiyun to a minimally-sized skbuff. */
1186*4882a593Smuzhiyun if (pkt_len < rx_copybreak &&
1187*4882a593Smuzhiyun (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
1188*4882a593Smuzhiyun skb_reserve(skb, 2); /* 16 byte align the IP header */
1189*4882a593Smuzhiyun dma_sync_single_for_cpu(&ep->pci_dev->dev,
1190*4882a593Smuzhiyun ep->rx_ring[entry].bufaddr,
1191*4882a593Smuzhiyun ep->rx_buf_sz,
1192*4882a593Smuzhiyun DMA_FROM_DEVICE);
1193*4882a593Smuzhiyun skb_copy_to_linear_data(skb, ep->rx_skbuff[entry]->data, pkt_len);
1194*4882a593Smuzhiyun skb_put(skb, pkt_len);
1195*4882a593Smuzhiyun dma_sync_single_for_device(&ep->pci_dev->dev,
1196*4882a593Smuzhiyun ep->rx_ring[entry].bufaddr,
1197*4882a593Smuzhiyun ep->rx_buf_sz,
1198*4882a593Smuzhiyun DMA_FROM_DEVICE);
1199*4882a593Smuzhiyun } else {
1200*4882a593Smuzhiyun dma_unmap_single(&ep->pci_dev->dev,
1201*4882a593Smuzhiyun ep->rx_ring[entry].bufaddr,
1202*4882a593Smuzhiyun ep->rx_buf_sz,
1203*4882a593Smuzhiyun DMA_FROM_DEVICE);
1204*4882a593Smuzhiyun skb_put(skb = ep->rx_skbuff[entry], pkt_len);
1205*4882a593Smuzhiyun ep->rx_skbuff[entry] = NULL;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
1208*4882a593Smuzhiyun netif_receive_skb(skb);
1209*4882a593Smuzhiyun dev->stats.rx_packets++;
1210*4882a593Smuzhiyun dev->stats.rx_bytes += pkt_len;
1211*4882a593Smuzhiyun }
1212*4882a593Smuzhiyun work_done++;
1213*4882a593Smuzhiyun entry = (++ep->cur_rx) % RX_RING_SIZE;
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun /* Refill the Rx ring buffers. */
1217*4882a593Smuzhiyun for (; ep->cur_rx - ep->dirty_rx > 0; ep->dirty_rx++) {
1218*4882a593Smuzhiyun entry = ep->dirty_rx % RX_RING_SIZE;
1219*4882a593Smuzhiyun if (ep->rx_skbuff[entry] == NULL) {
1220*4882a593Smuzhiyun struct sk_buff *skb;
1221*4882a593Smuzhiyun skb = ep->rx_skbuff[entry] = netdev_alloc_skb(dev, ep->rx_buf_sz + 2);
1222*4882a593Smuzhiyun if (skb == NULL)
1223*4882a593Smuzhiyun break;
1224*4882a593Smuzhiyun skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1225*4882a593Smuzhiyun ep->rx_ring[entry].bufaddr = dma_map_single(&ep->pci_dev->dev,
1226*4882a593Smuzhiyun skb->data,
1227*4882a593Smuzhiyun ep->rx_buf_sz,
1228*4882a593Smuzhiyun DMA_FROM_DEVICE);
1229*4882a593Smuzhiyun work_done++;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun /* AV: shouldn't we add a barrier here? */
1232*4882a593Smuzhiyun ep->rx_ring[entry].rxstatus = DescOwn;
1233*4882a593Smuzhiyun }
1234*4882a593Smuzhiyun return work_done;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
epic_rx_err(struct net_device * dev,struct epic_private * ep)1237*4882a593Smuzhiyun static void epic_rx_err(struct net_device *dev, struct epic_private *ep)
1238*4882a593Smuzhiyun {
1239*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1240*4882a593Smuzhiyun int status;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun status = er32(INTSTAT);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun if (status == EpicRemoved)
1245*4882a593Smuzhiyun return;
1246*4882a593Smuzhiyun if (status & RxOverflow) /* Missed a Rx frame. */
1247*4882a593Smuzhiyun dev->stats.rx_errors++;
1248*4882a593Smuzhiyun if (status & (RxOverflow | RxFull))
1249*4882a593Smuzhiyun ew16(COMMAND, RxQueued);
1250*4882a593Smuzhiyun }
1251*4882a593Smuzhiyun
epic_poll(struct napi_struct * napi,int budget)1252*4882a593Smuzhiyun static int epic_poll(struct napi_struct *napi, int budget)
1253*4882a593Smuzhiyun {
1254*4882a593Smuzhiyun struct epic_private *ep = container_of(napi, struct epic_private, napi);
1255*4882a593Smuzhiyun struct net_device *dev = ep->mii.dev;
1256*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1257*4882a593Smuzhiyun int work_done;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun epic_tx(dev, ep);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun work_done = epic_rx(dev, budget);
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun epic_rx_err(dev, ep);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun if (work_done < budget && napi_complete_done(napi, work_done)) {
1266*4882a593Smuzhiyun unsigned long flags;
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun spin_lock_irqsave(&ep->napi_lock, flags);
1269*4882a593Smuzhiyun
1270*4882a593Smuzhiyun ew32(INTSTAT, EpicNapiEvent);
1271*4882a593Smuzhiyun epic_napi_irq_on(dev, ep);
1272*4882a593Smuzhiyun spin_unlock_irqrestore(&ep->napi_lock, flags);
1273*4882a593Smuzhiyun }
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun return work_done;
1276*4882a593Smuzhiyun }
1277*4882a593Smuzhiyun
epic_close(struct net_device * dev)1278*4882a593Smuzhiyun static int epic_close(struct net_device *dev)
1279*4882a593Smuzhiyun {
1280*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1281*4882a593Smuzhiyun struct pci_dev *pdev = ep->pci_dev;
1282*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1283*4882a593Smuzhiyun struct sk_buff *skb;
1284*4882a593Smuzhiyun int i;
1285*4882a593Smuzhiyun
1286*4882a593Smuzhiyun netif_stop_queue(dev);
1287*4882a593Smuzhiyun napi_disable(&ep->napi);
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun if (debug > 1)
1290*4882a593Smuzhiyun netdev_dbg(dev, "Shutting down ethercard, status was %2.2x.\n",
1291*4882a593Smuzhiyun er32(INTSTAT));
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun del_timer_sync(&ep->timer);
1294*4882a593Smuzhiyun
1295*4882a593Smuzhiyun epic_disable_int(dev, ep);
1296*4882a593Smuzhiyun
1297*4882a593Smuzhiyun free_irq(pdev->irq, dev);
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun epic_pause(dev);
1300*4882a593Smuzhiyun
1301*4882a593Smuzhiyun /* Free all the skbuffs in the Rx queue. */
1302*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
1303*4882a593Smuzhiyun skb = ep->rx_skbuff[i];
1304*4882a593Smuzhiyun ep->rx_skbuff[i] = NULL;
1305*4882a593Smuzhiyun ep->rx_ring[i].rxstatus = 0; /* Not owned by Epic chip. */
1306*4882a593Smuzhiyun ep->rx_ring[i].buflength = 0;
1307*4882a593Smuzhiyun if (skb) {
1308*4882a593Smuzhiyun dma_unmap_single(&pdev->dev, ep->rx_ring[i].bufaddr,
1309*4882a593Smuzhiyun ep->rx_buf_sz, DMA_FROM_DEVICE);
1310*4882a593Smuzhiyun dev_kfree_skb(skb);
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun ep->rx_ring[i].bufaddr = 0xBADF00D0; /* An invalid address. */
1313*4882a593Smuzhiyun }
1314*4882a593Smuzhiyun for (i = 0; i < TX_RING_SIZE; i++) {
1315*4882a593Smuzhiyun skb = ep->tx_skbuff[i];
1316*4882a593Smuzhiyun ep->tx_skbuff[i] = NULL;
1317*4882a593Smuzhiyun if (!skb)
1318*4882a593Smuzhiyun continue;
1319*4882a593Smuzhiyun dma_unmap_single(&pdev->dev, ep->tx_ring[i].bufaddr, skb->len,
1320*4882a593Smuzhiyun DMA_TO_DEVICE);
1321*4882a593Smuzhiyun dev_kfree_skb(skb);
1322*4882a593Smuzhiyun }
1323*4882a593Smuzhiyun
1324*4882a593Smuzhiyun /* Green! Leave the chip in low-power mode. */
1325*4882a593Smuzhiyun ew32(GENCTL, 0x0008);
1326*4882a593Smuzhiyun
1327*4882a593Smuzhiyun return 0;
1328*4882a593Smuzhiyun }
1329*4882a593Smuzhiyun
epic_get_stats(struct net_device * dev)1330*4882a593Smuzhiyun static struct net_device_stats *epic_get_stats(struct net_device *dev)
1331*4882a593Smuzhiyun {
1332*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1333*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun if (netif_running(dev)) {
1336*4882a593Smuzhiyun struct net_device_stats *stats = &dev->stats;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun stats->rx_missed_errors += er8(MPCNT);
1339*4882a593Smuzhiyun stats->rx_frame_errors += er8(ALICNT);
1340*4882a593Smuzhiyun stats->rx_crc_errors += er8(CRCCNT);
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun return &dev->stats;
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /* Set or clear the multicast filter for this adaptor.
1347*4882a593Smuzhiyun Note that we only use exclusion around actually queueing the
1348*4882a593Smuzhiyun new frame, not around filling ep->setup_frame. This is non-deterministic
1349*4882a593Smuzhiyun when re-entered but still correct. */
1350*4882a593Smuzhiyun
set_rx_mode(struct net_device * dev)1351*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev)
1352*4882a593Smuzhiyun {
1353*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1354*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1355*4882a593Smuzhiyun unsigned char mc_filter[8]; /* Multicast hash filter */
1356*4882a593Smuzhiyun int i;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1359*4882a593Smuzhiyun ew32(RxCtrl, 0x002c);
1360*4882a593Smuzhiyun /* Unconditionally log net taps. */
1361*4882a593Smuzhiyun memset(mc_filter, 0xff, sizeof(mc_filter));
1362*4882a593Smuzhiyun } else if ((!netdev_mc_empty(dev)) || (dev->flags & IFF_ALLMULTI)) {
1363*4882a593Smuzhiyun /* There is apparently a chip bug, so the multicast filter
1364*4882a593Smuzhiyun is never enabled. */
1365*4882a593Smuzhiyun /* Too many to filter perfectly -- accept all multicasts. */
1366*4882a593Smuzhiyun memset(mc_filter, 0xff, sizeof(mc_filter));
1367*4882a593Smuzhiyun ew32(RxCtrl, 0x000c);
1368*4882a593Smuzhiyun } else if (netdev_mc_empty(dev)) {
1369*4882a593Smuzhiyun ew32(RxCtrl, 0x0004);
1370*4882a593Smuzhiyun return;
1371*4882a593Smuzhiyun } else { /* Never executed, for now. */
1372*4882a593Smuzhiyun struct netdev_hw_addr *ha;
1373*4882a593Smuzhiyun
1374*4882a593Smuzhiyun memset(mc_filter, 0, sizeof(mc_filter));
1375*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
1376*4882a593Smuzhiyun unsigned int bit_nr =
1377*4882a593Smuzhiyun ether_crc_le(ETH_ALEN, ha->addr) & 0x3f;
1378*4882a593Smuzhiyun mc_filter[bit_nr >> 3] |= (1 << bit_nr);
1379*4882a593Smuzhiyun }
1380*4882a593Smuzhiyun }
1381*4882a593Smuzhiyun /* ToDo: perhaps we need to stop the Tx and Rx process here? */
1382*4882a593Smuzhiyun if (memcmp(mc_filter, ep->mc_filter, sizeof(mc_filter))) {
1383*4882a593Smuzhiyun for (i = 0; i < 4; i++)
1384*4882a593Smuzhiyun ew16(MC0 + i*4, ((u16 *)mc_filter)[i]);
1385*4882a593Smuzhiyun memcpy(ep->mc_filter, mc_filter, sizeof(mc_filter));
1386*4882a593Smuzhiyun }
1387*4882a593Smuzhiyun }
1388*4882a593Smuzhiyun
netdev_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1389*4882a593Smuzhiyun static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1390*4882a593Smuzhiyun {
1391*4882a593Smuzhiyun struct epic_private *np = netdev_priv(dev);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1394*4882a593Smuzhiyun strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1395*4882a593Smuzhiyun strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
1396*4882a593Smuzhiyun }
1397*4882a593Smuzhiyun
netdev_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)1398*4882a593Smuzhiyun static int netdev_get_link_ksettings(struct net_device *dev,
1399*4882a593Smuzhiyun struct ethtool_link_ksettings *cmd)
1400*4882a593Smuzhiyun {
1401*4882a593Smuzhiyun struct epic_private *np = netdev_priv(dev);
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun spin_lock_irq(&np->lock);
1404*4882a593Smuzhiyun mii_ethtool_get_link_ksettings(&np->mii, cmd);
1405*4882a593Smuzhiyun spin_unlock_irq(&np->lock);
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun return 0;
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
netdev_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)1410*4882a593Smuzhiyun static int netdev_set_link_ksettings(struct net_device *dev,
1411*4882a593Smuzhiyun const struct ethtool_link_ksettings *cmd)
1412*4882a593Smuzhiyun {
1413*4882a593Smuzhiyun struct epic_private *np = netdev_priv(dev);
1414*4882a593Smuzhiyun int rc;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun spin_lock_irq(&np->lock);
1417*4882a593Smuzhiyun rc = mii_ethtool_set_link_ksettings(&np->mii, cmd);
1418*4882a593Smuzhiyun spin_unlock_irq(&np->lock);
1419*4882a593Smuzhiyun
1420*4882a593Smuzhiyun return rc;
1421*4882a593Smuzhiyun }
1422*4882a593Smuzhiyun
netdev_nway_reset(struct net_device * dev)1423*4882a593Smuzhiyun static int netdev_nway_reset(struct net_device *dev)
1424*4882a593Smuzhiyun {
1425*4882a593Smuzhiyun struct epic_private *np = netdev_priv(dev);
1426*4882a593Smuzhiyun return mii_nway_restart(&np->mii);
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
netdev_get_link(struct net_device * dev)1429*4882a593Smuzhiyun static u32 netdev_get_link(struct net_device *dev)
1430*4882a593Smuzhiyun {
1431*4882a593Smuzhiyun struct epic_private *np = netdev_priv(dev);
1432*4882a593Smuzhiyun return mii_link_ok(&np->mii);
1433*4882a593Smuzhiyun }
1434*4882a593Smuzhiyun
netdev_get_msglevel(struct net_device * dev)1435*4882a593Smuzhiyun static u32 netdev_get_msglevel(struct net_device *dev)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun return debug;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun
netdev_set_msglevel(struct net_device * dev,u32 value)1440*4882a593Smuzhiyun static void netdev_set_msglevel(struct net_device *dev, u32 value)
1441*4882a593Smuzhiyun {
1442*4882a593Smuzhiyun debug = value;
1443*4882a593Smuzhiyun }
1444*4882a593Smuzhiyun
ethtool_begin(struct net_device * dev)1445*4882a593Smuzhiyun static int ethtool_begin(struct net_device *dev)
1446*4882a593Smuzhiyun {
1447*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1448*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1449*4882a593Smuzhiyun
1450*4882a593Smuzhiyun if (ep->ethtool_ops_nesting == U32_MAX)
1451*4882a593Smuzhiyun return -EBUSY;
1452*4882a593Smuzhiyun /* power-up, if interface is down */
1453*4882a593Smuzhiyun if (!ep->ethtool_ops_nesting++ && !netif_running(dev)) {
1454*4882a593Smuzhiyun ew32(GENCTL, 0x0200);
1455*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1456*4882a593Smuzhiyun }
1457*4882a593Smuzhiyun return 0;
1458*4882a593Smuzhiyun }
1459*4882a593Smuzhiyun
ethtool_complete(struct net_device * dev)1460*4882a593Smuzhiyun static void ethtool_complete(struct net_device *dev)
1461*4882a593Smuzhiyun {
1462*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1463*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1464*4882a593Smuzhiyun
1465*4882a593Smuzhiyun /* power-down, if interface is down */
1466*4882a593Smuzhiyun if (!--ep->ethtool_ops_nesting && !netif_running(dev)) {
1467*4882a593Smuzhiyun ew32(GENCTL, 0x0008);
1468*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun }
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun static const struct ethtool_ops netdev_ethtool_ops = {
1473*4882a593Smuzhiyun .get_drvinfo = netdev_get_drvinfo,
1474*4882a593Smuzhiyun .nway_reset = netdev_nway_reset,
1475*4882a593Smuzhiyun .get_link = netdev_get_link,
1476*4882a593Smuzhiyun .get_msglevel = netdev_get_msglevel,
1477*4882a593Smuzhiyun .set_msglevel = netdev_set_msglevel,
1478*4882a593Smuzhiyun .begin = ethtool_begin,
1479*4882a593Smuzhiyun .complete = ethtool_complete,
1480*4882a593Smuzhiyun .get_link_ksettings = netdev_get_link_ksettings,
1481*4882a593Smuzhiyun .set_link_ksettings = netdev_set_link_ksettings,
1482*4882a593Smuzhiyun };
1483*4882a593Smuzhiyun
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)1484*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1485*4882a593Smuzhiyun {
1486*4882a593Smuzhiyun struct epic_private *np = netdev_priv(dev);
1487*4882a593Smuzhiyun void __iomem *ioaddr = np->ioaddr;
1488*4882a593Smuzhiyun struct mii_ioctl_data *data = if_mii(rq);
1489*4882a593Smuzhiyun int rc;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun /* power-up, if interface is down */
1492*4882a593Smuzhiyun if (! netif_running(dev)) {
1493*4882a593Smuzhiyun ew32(GENCTL, 0x0200);
1494*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x003c) | 0x4800);
1495*4882a593Smuzhiyun }
1496*4882a593Smuzhiyun
1497*4882a593Smuzhiyun /* all non-ethtool ioctls (the SIOC[GS]MIIxxx ioctls) */
1498*4882a593Smuzhiyun spin_lock_irq(&np->lock);
1499*4882a593Smuzhiyun rc = generic_mii_ioctl(&np->mii, data, cmd, NULL);
1500*4882a593Smuzhiyun spin_unlock_irq(&np->lock);
1501*4882a593Smuzhiyun
1502*4882a593Smuzhiyun /* power-down, if interface is down */
1503*4882a593Smuzhiyun if (! netif_running(dev)) {
1504*4882a593Smuzhiyun ew32(GENCTL, 0x0008);
1505*4882a593Smuzhiyun ew32(NVCTL, (er32(NVCTL) & ~0x483c) | 0x0000);
1506*4882a593Smuzhiyun }
1507*4882a593Smuzhiyun return rc;
1508*4882a593Smuzhiyun }
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun
epic_remove_one(struct pci_dev * pdev)1511*4882a593Smuzhiyun static void epic_remove_one(struct pci_dev *pdev)
1512*4882a593Smuzhiyun {
1513*4882a593Smuzhiyun struct net_device *dev = pci_get_drvdata(pdev);
1514*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun unregister_netdev(dev);
1517*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, ep->tx_ring,
1518*4882a593Smuzhiyun ep->tx_ring_dma);
1519*4882a593Smuzhiyun dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, ep->rx_ring,
1520*4882a593Smuzhiyun ep->rx_ring_dma);
1521*4882a593Smuzhiyun pci_iounmap(pdev, ep->ioaddr);
1522*4882a593Smuzhiyun free_netdev(dev);
1523*4882a593Smuzhiyun pci_release_regions(pdev);
1524*4882a593Smuzhiyun pci_disable_device(pdev);
1525*4882a593Smuzhiyun /* pci_power_off(pdev, -1); */
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun
epic_suspend(struct device * dev_d)1528*4882a593Smuzhiyun static int __maybe_unused epic_suspend(struct device *dev_d)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun struct net_device *dev = dev_get_drvdata(dev_d);
1531*4882a593Smuzhiyun struct epic_private *ep = netdev_priv(dev);
1532*4882a593Smuzhiyun void __iomem *ioaddr = ep->ioaddr;
1533*4882a593Smuzhiyun
1534*4882a593Smuzhiyun if (!netif_running(dev))
1535*4882a593Smuzhiyun return 0;
1536*4882a593Smuzhiyun epic_pause(dev);
1537*4882a593Smuzhiyun /* Put the chip into low-power mode. */
1538*4882a593Smuzhiyun ew32(GENCTL, 0x0008);
1539*4882a593Smuzhiyun /* pci_power_off(pdev, -1); */
1540*4882a593Smuzhiyun return 0;
1541*4882a593Smuzhiyun }
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun
epic_resume(struct device * dev_d)1544*4882a593Smuzhiyun static int __maybe_unused epic_resume(struct device *dev_d)
1545*4882a593Smuzhiyun {
1546*4882a593Smuzhiyun struct net_device *dev = dev_get_drvdata(dev_d);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun if (!netif_running(dev))
1549*4882a593Smuzhiyun return 0;
1550*4882a593Smuzhiyun epic_restart(dev);
1551*4882a593Smuzhiyun /* pci_power_on(pdev); */
1552*4882a593Smuzhiyun return 0;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(epic_pm_ops, epic_suspend, epic_resume);
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun static struct pci_driver epic_driver = {
1558*4882a593Smuzhiyun .name = DRV_NAME,
1559*4882a593Smuzhiyun .id_table = epic_pci_tbl,
1560*4882a593Smuzhiyun .probe = epic_init_one,
1561*4882a593Smuzhiyun .remove = epic_remove_one,
1562*4882a593Smuzhiyun .driver.pm = &epic_pm_ops,
1563*4882a593Smuzhiyun };
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun
epic_init(void)1566*4882a593Smuzhiyun static int __init epic_init (void)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun /* when a module, this is printed whether or not devices are found in probe */
1569*4882a593Smuzhiyun #ifdef MODULE
1570*4882a593Smuzhiyun pr_info("%s%s\n", version, version2);
1571*4882a593Smuzhiyun #endif
1572*4882a593Smuzhiyun
1573*4882a593Smuzhiyun return pci_register_driver(&epic_driver);
1574*4882a593Smuzhiyun }
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun
epic_cleanup(void)1577*4882a593Smuzhiyun static void __exit epic_cleanup (void)
1578*4882a593Smuzhiyun {
1579*4882a593Smuzhiyun pci_unregister_driver (&epic_driver);
1580*4882a593Smuzhiyun }
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun
1583*4882a593Smuzhiyun module_init(epic_init);
1584*4882a593Smuzhiyun module_exit(epic_cleanup);
1585