xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/3com/3c59x.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun 	Written 1996-1999 by Donald Becker.
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 	This software may be used and distributed according to the terms
6*4882a593Smuzhiyun 	of the GNU General Public License, incorporated herein by reference.
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 	This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
9*4882a593Smuzhiyun 	Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
10*4882a593Smuzhiyun 	and the EtherLink XL 3c900 and 3c905 cards.
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun 	Problem reports and questions should be directed to
13*4882a593Smuzhiyun 	vortex@scyld.com
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun 	The author may be reached as becker@scyld.com, or C/O
16*4882a593Smuzhiyun 	Scyld Computing Corporation
17*4882a593Smuzhiyun 	410 Severn Ave., Suite 210
18*4882a593Smuzhiyun 	Annapolis MD 21403
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /*
23*4882a593Smuzhiyun  * FIXME: This driver _could_ support MTU changing, but doesn't.  See Don's hamachi.c implementation
24*4882a593Smuzhiyun  * as well as other drivers
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
27*4882a593Smuzhiyun  * due to dead code elimination.  There will be some performance benefits from this due to
28*4882a593Smuzhiyun  * elimination of all the tests and reduced cache footprint.
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define DRV_NAME	"3c59x"
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun /* A few values that may be tweaked. */
37*4882a593Smuzhiyun /* Keep the ring sizes a power of two for efficiency. */
38*4882a593Smuzhiyun #define TX_RING_SIZE	16
39*4882a593Smuzhiyun #define RX_RING_SIZE	32
40*4882a593Smuzhiyun #define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* "Knobs" that adjust features and parameters. */
43*4882a593Smuzhiyun /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
44*4882a593Smuzhiyun    Setting to > 1512 effectively disables this feature. */
45*4882a593Smuzhiyun #ifndef __arm__
46*4882a593Smuzhiyun static int rx_copybreak = 200;
47*4882a593Smuzhiyun #else
48*4882a593Smuzhiyun /* ARM systems perform better by disregarding the bus-master
49*4882a593Smuzhiyun    transfer capability of these cards. -- rmk */
50*4882a593Smuzhiyun static int rx_copybreak = 1513;
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun /* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
53*4882a593Smuzhiyun static const int mtu = 1500;
54*4882a593Smuzhiyun /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
55*4882a593Smuzhiyun static int max_interrupt_work = 32;
56*4882a593Smuzhiyun /* Tx timeout interval (millisecs) */
57*4882a593Smuzhiyun static int watchdog = 5000;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /* Allow aggregation of Tx interrupts.  Saves CPU load at the cost
60*4882a593Smuzhiyun  * of possible Tx stalls if the system is blocking interrupts
61*4882a593Smuzhiyun  * somewhere else.  Undefine this to disable.
62*4882a593Smuzhiyun  */
63*4882a593Smuzhiyun #define tx_interrupt_mitigation 1
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
66*4882a593Smuzhiyun #define vortex_debug debug
67*4882a593Smuzhiyun #ifdef VORTEX_DEBUG
68*4882a593Smuzhiyun static int vortex_debug = VORTEX_DEBUG;
69*4882a593Smuzhiyun #else
70*4882a593Smuzhiyun static int vortex_debug = 1;
71*4882a593Smuzhiyun #endif
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #include <linux/module.h>
74*4882a593Smuzhiyun #include <linux/kernel.h>
75*4882a593Smuzhiyun #include <linux/string.h>
76*4882a593Smuzhiyun #include <linux/timer.h>
77*4882a593Smuzhiyun #include <linux/errno.h>
78*4882a593Smuzhiyun #include <linux/in.h>
79*4882a593Smuzhiyun #include <linux/ioport.h>
80*4882a593Smuzhiyun #include <linux/interrupt.h>
81*4882a593Smuzhiyun #include <linux/pci.h>
82*4882a593Smuzhiyun #include <linux/mii.h>
83*4882a593Smuzhiyun #include <linux/init.h>
84*4882a593Smuzhiyun #include <linux/netdevice.h>
85*4882a593Smuzhiyun #include <linux/etherdevice.h>
86*4882a593Smuzhiyun #include <linux/skbuff.h>
87*4882a593Smuzhiyun #include <linux/ethtool.h>
88*4882a593Smuzhiyun #include <linux/highmem.h>
89*4882a593Smuzhiyun #include <linux/eisa.h>
90*4882a593Smuzhiyun #include <linux/bitops.h>
91*4882a593Smuzhiyun #include <linux/jiffies.h>
92*4882a593Smuzhiyun #include <linux/gfp.h>
93*4882a593Smuzhiyun #include <asm/irq.h>			/* For nr_irqs only. */
94*4882a593Smuzhiyun #include <asm/io.h>
95*4882a593Smuzhiyun #include <linux/uaccess.h>
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
98*4882a593Smuzhiyun    This is only in the support-all-kernels source code. */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun #define RUN_AT(x) (jiffies + (x))
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun #include <linux/delay.h>
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun static const char version[] =
106*4882a593Smuzhiyun 	DRV_NAME ": Donald Becker and others.\n";
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
109*4882a593Smuzhiyun MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver ");
110*4882a593Smuzhiyun MODULE_LICENSE("GPL");
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /* Operational parameter that usually are not changed. */
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* The Vortex size is twice that of the original EtherLinkIII series: the
116*4882a593Smuzhiyun    runtime register window, window 1, is now always mapped in.
117*4882a593Smuzhiyun    The Boomerang size is twice as large as the Vortex -- it has additional
118*4882a593Smuzhiyun    bus master control registers. */
119*4882a593Smuzhiyun #define VORTEX_TOTAL_SIZE 0x20
120*4882a593Smuzhiyun #define BOOMERANG_TOTAL_SIZE 0x40
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* Set iff a MII transceiver on any interface requires mdio preamble.
123*4882a593Smuzhiyun    This only set with the original DP83840 on older 3c905 boards, so the extra
124*4882a593Smuzhiyun    code size of a per-interface flag is not worthwhile. */
125*4882a593Smuzhiyun static char mii_preamble_required;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define PFX DRV_NAME ": "
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun /*
132*4882a593Smuzhiyun 				Theory of Operation
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun I. Board Compatibility
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun This device driver is designed for the 3Com FastEtherLink and FastEtherLink
137*4882a593Smuzhiyun XL, 3Com's PCI to 10/100baseT adapters.  It also works with the 10Mbs
138*4882a593Smuzhiyun versions of the FastEtherLink cards.  The supported product IDs are
139*4882a593Smuzhiyun   3c590, 3c592, 3c595, 3c597, 3c900, 3c905
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun The related ISA 3c515 is supported with a separate driver, 3c515.c, included
142*4882a593Smuzhiyun with the kernel source or available from
143*4882a593Smuzhiyun     cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun II. Board-specific settings
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun PCI bus devices are configured by the system at boot time, so no jumpers
148*4882a593Smuzhiyun need to be set on the board.  The system BIOS should be set to assign the
149*4882a593Smuzhiyun PCI INTA signal to an otherwise unused system IRQ line.
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun The EEPROM settings for media type and forced-full-duplex are observed.
152*4882a593Smuzhiyun The EEPROM media type should be left at the default "autoselect" unless using
153*4882a593Smuzhiyun 10base2 or AUI connections which cannot be reliably detected.
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun III. Driver operation
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun The 3c59x series use an interface that's very similar to the previous 3c5x9
158*4882a593Smuzhiyun series.  The primary interface is two programmed-I/O FIFOs, with an
159*4882a593Smuzhiyun alternate single-contiguous-region bus-master transfer (see next).
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun The 3c900 "Boomerang" series uses a full-bus-master interface with separate
162*4882a593Smuzhiyun lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
163*4882a593Smuzhiyun DEC Tulip and Intel Speedo3.  The first chip version retains a compatible
164*4882a593Smuzhiyun programmed-I/O interface that has been removed in 'B' and subsequent board
165*4882a593Smuzhiyun revisions.
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun One extension that is advertised in a very large font is that the adapters
168*4882a593Smuzhiyun are capable of being bus masters.  On the Vortex chip this capability was
169*4882a593Smuzhiyun only for a single contiguous region making it far less useful than the full
170*4882a593Smuzhiyun bus master capability.  There is a significant performance impact of taking
171*4882a593Smuzhiyun an extra interrupt or polling for the completion of each transfer, as well
172*4882a593Smuzhiyun as difficulty sharing the single transfer engine between the transmit and
173*4882a593Smuzhiyun receive threads.  Using DMA transfers is a win only with large blocks or
174*4882a593Smuzhiyun with the flawed versions of the Intel Orion motherboard PCI controller.
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun The Boomerang chip's full-bus-master interface is useful, and has the
177*4882a593Smuzhiyun currently-unused advantages over other similar chips that queued transmit
178*4882a593Smuzhiyun packets may be reordered and receive buffer groups are associated with a
179*4882a593Smuzhiyun single frame.
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
182*4882a593Smuzhiyun Rather than a fixed intermediate receive buffer, this scheme allocates
183*4882a593Smuzhiyun full-sized skbuffs as receive buffers.  The value RX_COPYBREAK is used as
184*4882a593Smuzhiyun the copying breakpoint: it is chosen to trade-off the memory wasted by
185*4882a593Smuzhiyun passing the full-sized skbuff to the queue layer for all frames vs. the
186*4882a593Smuzhiyun copying cost of copying a frame to a correctly-sized skbuff.
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun IIIC. Synchronization
189*4882a593Smuzhiyun The driver runs as two independent, single-threaded flows of control.  One
190*4882a593Smuzhiyun is the send-packet routine, which enforces single-threaded use by the
191*4882a593Smuzhiyun dev->tbusy flag.  The other thread is the interrupt handler, which is single
192*4882a593Smuzhiyun threaded by the hardware and other software.
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun IV. Notes
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
197*4882a593Smuzhiyun 3c590, 3c595, and 3c900 boards.
198*4882a593Smuzhiyun The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
199*4882a593Smuzhiyun the EISA version is called "Demon".  According to Terry these names come
200*4882a593Smuzhiyun from rides at the local amusement park.
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
203*4882a593Smuzhiyun This driver only supports ethernet packets because of the skbuff allocation
204*4882a593Smuzhiyun limit of 4K.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /* This table drives the PCI probe routines.  It's mostly boilerplate in all
208*4882a593Smuzhiyun    of the drivers, and will likely be provided by some future kernel.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun enum pci_flags_bit {
211*4882a593Smuzhiyun 	PCI_USES_MASTER=4,
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun enum {	IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
215*4882a593Smuzhiyun 	EEPROM_8BIT=0x10,	/* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
216*4882a593Smuzhiyun 	HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
217*4882a593Smuzhiyun 	INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
218*4882a593Smuzhiyun 	EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000, WNO_XCVR_PWR=0x4000,
219*4882a593Smuzhiyun 	EXTRA_PREAMBLE=0x8000, EEPROM_RESET=0x10000, };
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun enum vortex_chips {
222*4882a593Smuzhiyun 	CH_3C590 = 0,
223*4882a593Smuzhiyun 	CH_3C592,
224*4882a593Smuzhiyun 	CH_3C597,
225*4882a593Smuzhiyun 	CH_3C595_1,
226*4882a593Smuzhiyun 	CH_3C595_2,
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	CH_3C595_3,
229*4882a593Smuzhiyun 	CH_3C900_1,
230*4882a593Smuzhiyun 	CH_3C900_2,
231*4882a593Smuzhiyun 	CH_3C900_3,
232*4882a593Smuzhiyun 	CH_3C900_4,
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	CH_3C900_5,
235*4882a593Smuzhiyun 	CH_3C900B_FL,
236*4882a593Smuzhiyun 	CH_3C905_1,
237*4882a593Smuzhiyun 	CH_3C905_2,
238*4882a593Smuzhiyun 	CH_3C905B_TX,
239*4882a593Smuzhiyun 	CH_3C905B_1,
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	CH_3C905B_2,
242*4882a593Smuzhiyun 	CH_3C905B_FX,
243*4882a593Smuzhiyun 	CH_3C905C,
244*4882a593Smuzhiyun 	CH_3C9202,
245*4882a593Smuzhiyun 	CH_3C980,
246*4882a593Smuzhiyun 	CH_3C9805,
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	CH_3CSOHO100_TX,
249*4882a593Smuzhiyun 	CH_3C555,
250*4882a593Smuzhiyun 	CH_3C556,
251*4882a593Smuzhiyun 	CH_3C556B,
252*4882a593Smuzhiyun 	CH_3C575,
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	CH_3C575_1,
255*4882a593Smuzhiyun 	CH_3CCFE575,
256*4882a593Smuzhiyun 	CH_3CCFE575CT,
257*4882a593Smuzhiyun 	CH_3CCFE656,
258*4882a593Smuzhiyun 	CH_3CCFEM656,
259*4882a593Smuzhiyun 
260*4882a593Smuzhiyun 	CH_3CCFEM656_1,
261*4882a593Smuzhiyun 	CH_3C450,
262*4882a593Smuzhiyun 	CH_3C920,
263*4882a593Smuzhiyun 	CH_3C982A,
264*4882a593Smuzhiyun 	CH_3C982B,
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	CH_905BT4,
267*4882a593Smuzhiyun 	CH_920B_EMB_WNM,
268*4882a593Smuzhiyun };
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun /* note: this array directly indexed by above enums, and MUST
272*4882a593Smuzhiyun  * be kept in sync with both the enums above, and the PCI device
273*4882a593Smuzhiyun  * table below
274*4882a593Smuzhiyun  */
275*4882a593Smuzhiyun static struct vortex_chip_info {
276*4882a593Smuzhiyun 	const char *name;
277*4882a593Smuzhiyun 	int flags;
278*4882a593Smuzhiyun 	int drv_flags;
279*4882a593Smuzhiyun 	int io_size;
280*4882a593Smuzhiyun } vortex_info_tbl[] = {
281*4882a593Smuzhiyun 	{"3c590 Vortex 10Mbps",
282*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_VORTEX, 32, },
283*4882a593Smuzhiyun 	{"3c592 EISA 10Mbps Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
284*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_VORTEX, 32, },
285*4882a593Smuzhiyun 	{"3c597 EISA Fast Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
286*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_VORTEX, 32, },
287*4882a593Smuzhiyun 	{"3c595 Vortex 100baseTx",
288*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_VORTEX, 32, },
289*4882a593Smuzhiyun 	{"3c595 Vortex 100baseT4",
290*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_VORTEX, 32, },
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	{"3c595 Vortex 100base-MII",
293*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_VORTEX, 32, },
294*4882a593Smuzhiyun 	{"3c900 Boomerang 10baseT",
295*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
296*4882a593Smuzhiyun 	{"3c900 Boomerang 10Mbps Combo",
297*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_BOOMERANG|EEPROM_RESET, 64, },
298*4882a593Smuzhiyun 	{"3c900 Cyclone 10Mbps TPO",						/* AKPM: from Don's 0.99M */
299*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
300*4882a593Smuzhiyun 	{"3c900 Cyclone 10Mbps Combo",
301*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	{"3c900 Cyclone 10Mbps TPC",						/* AKPM: from Don's 0.99M */
304*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
305*4882a593Smuzhiyun 	{"3c900B-FL Cyclone 10base-FL",
306*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
307*4882a593Smuzhiyun 	{"3c905 Boomerang 100baseTx",
308*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
309*4882a593Smuzhiyun 	{"3c905 Boomerang 100baseT4",
310*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_RESET, 64, },
311*4882a593Smuzhiyun 	{"3C905B-TX Fast Etherlink XL PCI",
312*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
313*4882a593Smuzhiyun 	{"3c905B Cyclone 100baseTx",
314*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	{"3c905B Cyclone 10/100/BNC",
317*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
318*4882a593Smuzhiyun 	{"3c905B-FX Cyclone 100baseFx",
319*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
320*4882a593Smuzhiyun 	{"3c905C Tornado",
321*4882a593Smuzhiyun 	PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
322*4882a593Smuzhiyun 	{"3c920B-EMB-WNM (ATI Radeon 9100 IGP)",
323*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_MII|HAS_HWCKSM, 128, },
324*4882a593Smuzhiyun 	{"3c980 Cyclone",
325*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	{"3c980C Python-T",
328*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
329*4882a593Smuzhiyun 	{"3cSOHO100-TX Hurricane",
330*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
331*4882a593Smuzhiyun 	{"3c555 Laptop Hurricane",
332*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
333*4882a593Smuzhiyun 	{"3c556 Laptop Tornado",
334*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
335*4882a593Smuzhiyun 									HAS_HWCKSM, 128, },
336*4882a593Smuzhiyun 	{"3c556B Laptop Hurricane",
337*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
338*4882a593Smuzhiyun 	                                WNO_XCVR_PWR|HAS_HWCKSM, 128, },
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	{"3c575 [Megahertz] 10/100 LAN 	CardBus",
341*4882a593Smuzhiyun 	PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
342*4882a593Smuzhiyun 	{"3c575 Boomerang CardBus",
343*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
344*4882a593Smuzhiyun 	{"3CCFE575BT Cyclone CardBus",
345*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
346*4882a593Smuzhiyun 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
347*4882a593Smuzhiyun 	{"3CCFE575CT Tornado CardBus",
348*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
349*4882a593Smuzhiyun 									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
350*4882a593Smuzhiyun 	{"3CCFE656 Cyclone CardBus",
351*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
352*4882a593Smuzhiyun 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	{"3CCFEM656B Cyclone+Winmodem CardBus",
355*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
356*4882a593Smuzhiyun 									INVERT_LED_PWR|HAS_HWCKSM, 128, },
357*4882a593Smuzhiyun 	{"3CXFEM656C Tornado+Winmodem CardBus",			/* From pcmcia-cs-3.1.5 */
358*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
359*4882a593Smuzhiyun 									MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
360*4882a593Smuzhiyun 	{"3c450 HomePNA Tornado",						/* AKPM: from Don's 0.99Q */
361*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
362*4882a593Smuzhiyun 	{"3c920 Tornado",
363*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
364*4882a593Smuzhiyun 	{"3c982 Hydra Dual Port A",
365*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	{"3c982 Hydra Dual Port B",
368*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_HWCKSM|HAS_NWAY, 128, },
369*4882a593Smuzhiyun 	{"3c905B-T4",
370*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM|EXTRA_PREAMBLE, 128, },
371*4882a593Smuzhiyun 	{"3c920B-EMB-WNM Tornado",
372*4882a593Smuzhiyun 	 PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	{NULL,}, /* NULL terminated list. */
375*4882a593Smuzhiyun };
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun static const struct pci_device_id vortex_pci_tbl[] = {
379*4882a593Smuzhiyun 	{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
380*4882a593Smuzhiyun 	{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
381*4882a593Smuzhiyun 	{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
382*4882a593Smuzhiyun 	{ 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
383*4882a593Smuzhiyun 	{ 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	{ 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
386*4882a593Smuzhiyun 	{ 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
387*4882a593Smuzhiyun 	{ 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
388*4882a593Smuzhiyun 	{ 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
389*4882a593Smuzhiyun 	{ 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	{ 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
392*4882a593Smuzhiyun 	{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
393*4882a593Smuzhiyun 	{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
394*4882a593Smuzhiyun 	{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
395*4882a593Smuzhiyun 	{ 0x10B7, 0x9054, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_TX },
396*4882a593Smuzhiyun 	{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
399*4882a593Smuzhiyun 	{ 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
400*4882a593Smuzhiyun 	{ 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
401*4882a593Smuzhiyun 	{ 0x10B7, 0x9202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9202 },
402*4882a593Smuzhiyun 	{ 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
403*4882a593Smuzhiyun 	{ 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	{ 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
406*4882a593Smuzhiyun 	{ 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
407*4882a593Smuzhiyun 	{ 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
408*4882a593Smuzhiyun 	{ 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
409*4882a593Smuzhiyun 	{ 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	{ 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
412*4882a593Smuzhiyun 	{ 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
413*4882a593Smuzhiyun 	{ 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
414*4882a593Smuzhiyun 	{ 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
415*4882a593Smuzhiyun 	{ 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	{ 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
418*4882a593Smuzhiyun 	{ 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
419*4882a593Smuzhiyun 	{ 0x10B7, 0x9201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C920 },
420*4882a593Smuzhiyun 	{ 0x10B7, 0x1201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982A },
421*4882a593Smuzhiyun 	{ 0x10B7, 0x1202, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C982B },
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	{ 0x10B7, 0x9056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_905BT4 },
424*4882a593Smuzhiyun 	{ 0x10B7, 0x9210, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_920B_EMB_WNM },
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	{0,}						/* 0 terminated list. */
427*4882a593Smuzhiyun };
428*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun /* Operational definitions.
432*4882a593Smuzhiyun    These are not used by other compilation units and thus are not
433*4882a593Smuzhiyun    exported in a ".h" file.
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun    First the windows.  There are eight register windows, with the command
436*4882a593Smuzhiyun    and status registers available in each.
437*4882a593Smuzhiyun    */
438*4882a593Smuzhiyun #define EL3_CMD 0x0e
439*4882a593Smuzhiyun #define EL3_STATUS 0x0e
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun /* The top five bits written to EL3_CMD are a command, the lower
442*4882a593Smuzhiyun    11 bits are the parameter, if applicable.
443*4882a593Smuzhiyun    Note that 11 parameters bits was fine for ethernet, but the new chip
444*4882a593Smuzhiyun    can handle FDDI length frames (~4500 octets) and now parameters count
445*4882a593Smuzhiyun    32-bit 'Dwords' rather than octets. */
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun enum vortex_cmd {
448*4882a593Smuzhiyun 	TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
449*4882a593Smuzhiyun 	RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
450*4882a593Smuzhiyun 	UpStall = 6<<11, UpUnstall = (6<<11)+1,
451*4882a593Smuzhiyun 	DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
452*4882a593Smuzhiyun 	RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
453*4882a593Smuzhiyun 	FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
454*4882a593Smuzhiyun 	SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
455*4882a593Smuzhiyun 	SetTxThreshold = 18<<11, SetTxStart = 19<<11,
456*4882a593Smuzhiyun 	StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
457*4882a593Smuzhiyun 	StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun /* The SetRxFilter command accepts the following classes: */
460*4882a593Smuzhiyun enum RxFilter {
461*4882a593Smuzhiyun 	RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
462*4882a593Smuzhiyun 
463*4882a593Smuzhiyun /* Bits in the general status register. */
464*4882a593Smuzhiyun enum vortex_status {
465*4882a593Smuzhiyun 	IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
466*4882a593Smuzhiyun 	TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
467*4882a593Smuzhiyun 	IntReq = 0x0040, StatsFull = 0x0080,
468*4882a593Smuzhiyun 	DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
469*4882a593Smuzhiyun 	DMAInProgress = 1<<11,			/* DMA controller is still busy.*/
470*4882a593Smuzhiyun 	CmdInProgress = 1<<12,			/* EL3_CMD is still busy.*/
471*4882a593Smuzhiyun };
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun /* Register window 1 offsets, the window used in normal operation.
474*4882a593Smuzhiyun    On the Vortex this window is always mapped at offsets 0x10-0x1f. */
475*4882a593Smuzhiyun enum Window1 {
476*4882a593Smuzhiyun 	TX_FIFO = 0x10,  RX_FIFO = 0x10,  RxErrors = 0x14,
477*4882a593Smuzhiyun 	RxStatus = 0x18,  Timer=0x1A, TxStatus = 0x1B,
478*4882a593Smuzhiyun 	TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
479*4882a593Smuzhiyun };
480*4882a593Smuzhiyun enum Window0 {
481*4882a593Smuzhiyun 	Wn0EepromCmd = 10,		/* Window 0: EEPROM command register. */
482*4882a593Smuzhiyun 	Wn0EepromData = 12,		/* Window 0: EEPROM results register. */
483*4882a593Smuzhiyun 	IntrStatus=0x0E,		/* Valid in all windows. */
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun enum Win0_EEPROM_bits {
486*4882a593Smuzhiyun 	EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
487*4882a593Smuzhiyun 	EEPROM_EWENB = 0x30,		/* Enable erasing/writing for 10 msec. */
488*4882a593Smuzhiyun 	EEPROM_EWDIS = 0x00,		/* Disable EWENB before 10 msec timeout. */
489*4882a593Smuzhiyun };
490*4882a593Smuzhiyun /* EEPROM locations. */
491*4882a593Smuzhiyun enum eeprom_offset {
492*4882a593Smuzhiyun 	PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
493*4882a593Smuzhiyun 	EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
494*4882a593Smuzhiyun 	NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
495*4882a593Smuzhiyun 	DriverTune=13, Checksum=15};
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun enum Window2 {			/* Window 2. */
498*4882a593Smuzhiyun 	Wn2_ResetOptions=12,
499*4882a593Smuzhiyun };
500*4882a593Smuzhiyun enum Window3 {			/* Window 3: MAC/config bits. */
501*4882a593Smuzhiyun 	Wn3_Config=0, Wn3_MaxPktSize=4, Wn3_MAC_Ctrl=6, Wn3_Options=8,
502*4882a593Smuzhiyun };
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun #define BFEXT(value, offset, bitcount)  \
505*4882a593Smuzhiyun     ((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun #define BFINS(lhs, rhs, offset, bitcount)					\
508*4882a593Smuzhiyun 	(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) |	\
509*4882a593Smuzhiyun 	(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun #define RAM_SIZE(v)		BFEXT(v, 0, 3)
512*4882a593Smuzhiyun #define RAM_WIDTH(v)	BFEXT(v, 3, 1)
513*4882a593Smuzhiyun #define RAM_SPEED(v)	BFEXT(v, 4, 2)
514*4882a593Smuzhiyun #define ROM_SIZE(v)		BFEXT(v, 6, 2)
515*4882a593Smuzhiyun #define RAM_SPLIT(v)	BFEXT(v, 16, 2)
516*4882a593Smuzhiyun #define XCVR(v)			BFEXT(v, 20, 4)
517*4882a593Smuzhiyun #define AUTOSELECT(v)	BFEXT(v, 24, 1)
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun enum Window4 {		/* Window 4: Xcvr/media bits. */
520*4882a593Smuzhiyun 	Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
521*4882a593Smuzhiyun };
522*4882a593Smuzhiyun enum Win4_Media_bits {
523*4882a593Smuzhiyun 	Media_SQE = 0x0008,		/* Enable SQE error counting for AUI. */
524*4882a593Smuzhiyun 	Media_10TP = 0x00C0,	/* Enable link beat and jabber for 10baseT. */
525*4882a593Smuzhiyun 	Media_Lnk = 0x0080,		/* Enable just link beat for 100TX/100FX. */
526*4882a593Smuzhiyun 	Media_LnkBeat = 0x0800,
527*4882a593Smuzhiyun };
528*4882a593Smuzhiyun enum Window7 {					/* Window 7: Bus Master control. */
529*4882a593Smuzhiyun 	Wn7_MasterAddr = 0, Wn7_VlanEtherType=4, Wn7_MasterLen = 6,
530*4882a593Smuzhiyun 	Wn7_MasterStatus = 12,
531*4882a593Smuzhiyun };
532*4882a593Smuzhiyun /* Boomerang bus master control registers. */
533*4882a593Smuzhiyun enum MasterCtrl {
534*4882a593Smuzhiyun 	PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
535*4882a593Smuzhiyun 	TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
536*4882a593Smuzhiyun };
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun /* The Rx and Tx descriptor lists.
539*4882a593Smuzhiyun    Caution Alpha hackers: these types are 32 bits!  Note also the 8 byte
540*4882a593Smuzhiyun    alignment contraint on tx_ring[] and rx_ring[]. */
541*4882a593Smuzhiyun #define LAST_FRAG 	0x80000000			/* Last Addr/Len pair in descriptor. */
542*4882a593Smuzhiyun #define DN_COMPLETE	0x00010000			/* This packet has been downloaded */
543*4882a593Smuzhiyun struct boom_rx_desc {
544*4882a593Smuzhiyun 	__le32 next;					/* Last entry points to 0.   */
545*4882a593Smuzhiyun 	__le32 status;
546*4882a593Smuzhiyun 	__le32 addr;					/* Up to 63 addr/len pairs possible. */
547*4882a593Smuzhiyun 	__le32 length;					/* Set LAST_FRAG to indicate last pair. */
548*4882a593Smuzhiyun };
549*4882a593Smuzhiyun /* Values for the Rx status entry. */
550*4882a593Smuzhiyun enum rx_desc_status {
551*4882a593Smuzhiyun 	RxDComplete=0x00008000, RxDError=0x4000,
552*4882a593Smuzhiyun 	/* See boomerang_rx() for actual error bits */
553*4882a593Smuzhiyun 	IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
554*4882a593Smuzhiyun 	IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
555*4882a593Smuzhiyun };
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun #ifdef MAX_SKB_FRAGS
558*4882a593Smuzhiyun #define DO_ZEROCOPY 1
559*4882a593Smuzhiyun #else
560*4882a593Smuzhiyun #define DO_ZEROCOPY 0
561*4882a593Smuzhiyun #endif
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun struct boom_tx_desc {
564*4882a593Smuzhiyun 	__le32 next;					/* Last entry points to 0.   */
565*4882a593Smuzhiyun 	__le32 status;					/* bits 0:12 length, others see below.  */
566*4882a593Smuzhiyun #if DO_ZEROCOPY
567*4882a593Smuzhiyun 	struct {
568*4882a593Smuzhiyun 		__le32 addr;
569*4882a593Smuzhiyun 		__le32 length;
570*4882a593Smuzhiyun 	} frag[1+MAX_SKB_FRAGS];
571*4882a593Smuzhiyun #else
572*4882a593Smuzhiyun 		__le32 addr;
573*4882a593Smuzhiyun 		__le32 length;
574*4882a593Smuzhiyun #endif
575*4882a593Smuzhiyun };
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun /* Values for the Tx status entry. */
578*4882a593Smuzhiyun enum tx_desc_status {
579*4882a593Smuzhiyun 	CRCDisable=0x2000, TxDComplete=0x8000,
580*4882a593Smuzhiyun 	AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
581*4882a593Smuzhiyun 	TxIntrUploaded=0x80000000,		/* IRQ when in FIFO, but maybe not sent. */
582*4882a593Smuzhiyun };
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun /* Chip features we care about in vp->capabilities, read from the EEPROM. */
585*4882a593Smuzhiyun enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun struct vortex_extra_stats {
588*4882a593Smuzhiyun 	unsigned long tx_deferred;
589*4882a593Smuzhiyun 	unsigned long tx_max_collisions;
590*4882a593Smuzhiyun 	unsigned long tx_multiple_collisions;
591*4882a593Smuzhiyun 	unsigned long tx_single_collisions;
592*4882a593Smuzhiyun 	unsigned long rx_bad_ssd;
593*4882a593Smuzhiyun };
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun struct vortex_private {
596*4882a593Smuzhiyun 	/* The Rx and Tx rings should be quad-word-aligned. */
597*4882a593Smuzhiyun 	struct boom_rx_desc* rx_ring;
598*4882a593Smuzhiyun 	struct boom_tx_desc* tx_ring;
599*4882a593Smuzhiyun 	dma_addr_t rx_ring_dma;
600*4882a593Smuzhiyun 	dma_addr_t tx_ring_dma;
601*4882a593Smuzhiyun 	/* The addresses of transmit- and receive-in-place skbuffs. */
602*4882a593Smuzhiyun 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
603*4882a593Smuzhiyun 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
604*4882a593Smuzhiyun 	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
605*4882a593Smuzhiyun 	unsigned int dirty_tx;	/* The ring entries to be free()ed. */
606*4882a593Smuzhiyun 	struct vortex_extra_stats xstats;	/* NIC-specific extra stats */
607*4882a593Smuzhiyun 	struct sk_buff *tx_skb;				/* Packet being eaten by bus master ctrl.  */
608*4882a593Smuzhiyun 	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	/* PCI configuration space information. */
611*4882a593Smuzhiyun 	struct device *gendev;
612*4882a593Smuzhiyun 	void __iomem *ioaddr;			/* IO address space */
613*4882a593Smuzhiyun 	void __iomem *cb_fn_base;		/* CardBus function status addr space. */
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	/* Some values here only for performance evaluation and path-coverage */
616*4882a593Smuzhiyun 	int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
617*4882a593Smuzhiyun 	int card_idx;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	/* The remainder are related to chip state, mostly media selection. */
620*4882a593Smuzhiyun 	struct timer_list timer;			/* Media selection timer. */
621*4882a593Smuzhiyun 	int options;						/* User-settable misc. driver options. */
622*4882a593Smuzhiyun 	unsigned int media_override:4, 		/* Passed-in media type. */
623*4882a593Smuzhiyun 		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
624*4882a593Smuzhiyun 		full_duplex:1, autoselect:1,
625*4882a593Smuzhiyun 		bus_master:1,					/* Vortex can only do a fragment bus-m. */
626*4882a593Smuzhiyun 		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
627*4882a593Smuzhiyun 		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
628*4882a593Smuzhiyun 		partner_flow_ctrl:1,			/* Partner supports flow control */
629*4882a593Smuzhiyun 		has_nway:1,
630*4882a593Smuzhiyun 		enable_wol:1,					/* Wake-on-LAN is enabled */
631*4882a593Smuzhiyun 		pm_state_valid:1,				/* pci_dev->saved_config_space has sane contents */
632*4882a593Smuzhiyun 		open:1,
633*4882a593Smuzhiyun 		medialock:1,
634*4882a593Smuzhiyun 		large_frames:1,			/* accept large frames */
635*4882a593Smuzhiyun 		handling_irq:1;			/* private in_irq indicator */
636*4882a593Smuzhiyun 	/* {get|set}_wol operations are already serialized by rtnl.
637*4882a593Smuzhiyun 	 * no additional locking is required for the enable_wol and acpi_set_WOL()
638*4882a593Smuzhiyun 	 */
639*4882a593Smuzhiyun 	int drv_flags;
640*4882a593Smuzhiyun 	u16 status_enable;
641*4882a593Smuzhiyun 	u16 intr_enable;
642*4882a593Smuzhiyun 	u16 available_media;				/* From Wn3_Options. */
643*4882a593Smuzhiyun 	u16 capabilities, info1, info2;		/* Various, from EEPROM. */
644*4882a593Smuzhiyun 	u16 advertising;					/* NWay media advertisement */
645*4882a593Smuzhiyun 	unsigned char phys[2];				/* MII device addresses. */
646*4882a593Smuzhiyun 	u16 deferred;						/* Resend these interrupts when we
647*4882a593Smuzhiyun 										 * bale from the ISR */
648*4882a593Smuzhiyun 	u16 io_size;						/* Size of PCI region (for release_region) */
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* Serialises access to hardware other than MII and variables below.
651*4882a593Smuzhiyun 	 * The lock hierarchy is rtnl_lock > {lock, mii_lock} > window_lock. */
652*4882a593Smuzhiyun 	spinlock_t lock;
653*4882a593Smuzhiyun 
654*4882a593Smuzhiyun 	spinlock_t mii_lock;		/* Serialises access to MII */
655*4882a593Smuzhiyun 	struct mii_if_info mii;		/* MII lib hooks/info */
656*4882a593Smuzhiyun 	spinlock_t window_lock;		/* Serialises access to windowed regs */
657*4882a593Smuzhiyun 	int window;			/* Register window */
658*4882a593Smuzhiyun };
659*4882a593Smuzhiyun 
window_set(struct vortex_private * vp,int window)660*4882a593Smuzhiyun static void window_set(struct vortex_private *vp, int window)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun 	if (window != vp->window) {
663*4882a593Smuzhiyun 		iowrite16(SelectWindow + window, vp->ioaddr + EL3_CMD);
664*4882a593Smuzhiyun 		vp->window = window;
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun #define DEFINE_WINDOW_IO(size)						\
669*4882a593Smuzhiyun static u ## size							\
670*4882a593Smuzhiyun window_read ## size(struct vortex_private *vp, int window, int addr)	\
671*4882a593Smuzhiyun {									\
672*4882a593Smuzhiyun 	unsigned long flags;						\
673*4882a593Smuzhiyun 	u ## size ret;							\
674*4882a593Smuzhiyun 	spin_lock_irqsave(&vp->window_lock, flags);			\
675*4882a593Smuzhiyun 	window_set(vp, window);						\
676*4882a593Smuzhiyun 	ret = ioread ## size(vp->ioaddr + addr);			\
677*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vp->window_lock, flags);		\
678*4882a593Smuzhiyun 	return ret;							\
679*4882a593Smuzhiyun }									\
680*4882a593Smuzhiyun static void								\
681*4882a593Smuzhiyun window_write ## size(struct vortex_private *vp, u ## size value,	\
682*4882a593Smuzhiyun 		     int window, int addr)				\
683*4882a593Smuzhiyun {									\
684*4882a593Smuzhiyun 	unsigned long flags;						\
685*4882a593Smuzhiyun 	spin_lock_irqsave(&vp->window_lock, flags);			\
686*4882a593Smuzhiyun 	window_set(vp, window);						\
687*4882a593Smuzhiyun 	iowrite ## size(value, vp->ioaddr + addr);			\
688*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vp->window_lock, flags);		\
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun DEFINE_WINDOW_IO(8)
691*4882a593Smuzhiyun DEFINE_WINDOW_IO(16)
692*4882a593Smuzhiyun DEFINE_WINDOW_IO(32)
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun #ifdef CONFIG_PCI
695*4882a593Smuzhiyun #define DEVICE_PCI(dev) ((dev_is_pci(dev)) ? to_pci_dev((dev)) : NULL)
696*4882a593Smuzhiyun #else
697*4882a593Smuzhiyun #define DEVICE_PCI(dev) NULL
698*4882a593Smuzhiyun #endif
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun #define VORTEX_PCI(vp)							\
701*4882a593Smuzhiyun 	((struct pci_dev *) (((vp)->gendev) ? DEVICE_PCI((vp)->gendev) : NULL))
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun #ifdef CONFIG_EISA
704*4882a593Smuzhiyun #define DEVICE_EISA(dev) (((dev)->bus == &eisa_bus_type) ? to_eisa_device((dev)) : NULL)
705*4882a593Smuzhiyun #else
706*4882a593Smuzhiyun #define DEVICE_EISA(dev) NULL
707*4882a593Smuzhiyun #endif
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun #define VORTEX_EISA(vp)							\
710*4882a593Smuzhiyun 	((struct eisa_device *) (((vp)->gendev) ? DEVICE_EISA((vp)->gendev) : NULL))
711*4882a593Smuzhiyun 
712*4882a593Smuzhiyun /* The action to take with a media selection timer tick.
713*4882a593Smuzhiyun    Note that we deviate from the 3Com order by checking 10base2 before AUI.
714*4882a593Smuzhiyun  */
715*4882a593Smuzhiyun enum xcvr_types {
716*4882a593Smuzhiyun 	XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
717*4882a593Smuzhiyun 	XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
718*4882a593Smuzhiyun };
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun static const struct media_table {
721*4882a593Smuzhiyun 	char *name;
722*4882a593Smuzhiyun 	unsigned int media_bits:16,		/* Bits to set in Wn4_Media register. */
723*4882a593Smuzhiyun 		mask:8,						/* The transceiver-present bit in Wn3_Config.*/
724*4882a593Smuzhiyun 		next:8;						/* The media type to try next. */
725*4882a593Smuzhiyun 	int wait;						/* Time before we check media status. */
726*4882a593Smuzhiyun } media_tbl[] = {
727*4882a593Smuzhiyun   {	"10baseT",   Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
728*4882a593Smuzhiyun   { "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
729*4882a593Smuzhiyun   { "undefined", 0,			0x80, XCVR_10baseT, 10000},
730*4882a593Smuzhiyun   { "10base2",   0,			0x10, XCVR_AUI,		(1*HZ)/10},
731*4882a593Smuzhiyun   { "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
732*4882a593Smuzhiyun   { "100baseFX", Media_Lnk, 0x04, XCVR_MII,		(14*HZ)/10},
733*4882a593Smuzhiyun   { "MII",		 0,			0x41, XCVR_10baseT, 3*HZ },
734*4882a593Smuzhiyun   { "undefined", 0,			0x01, XCVR_10baseT, 10000},
735*4882a593Smuzhiyun   { "Autonegotiate", 0,		0x41, XCVR_10baseT, 3*HZ},
736*4882a593Smuzhiyun   { "MII-External",	 0,		0x41, XCVR_10baseT, 3*HZ },
737*4882a593Smuzhiyun   { "Default",	 0,			0xFF, XCVR_10baseT, 10000},
738*4882a593Smuzhiyun };
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun static struct {
741*4882a593Smuzhiyun 	const char str[ETH_GSTRING_LEN];
742*4882a593Smuzhiyun } ethtool_stats_keys[] = {
743*4882a593Smuzhiyun 	{ "tx_deferred" },
744*4882a593Smuzhiyun 	{ "tx_max_collisions" },
745*4882a593Smuzhiyun 	{ "tx_multiple_collisions" },
746*4882a593Smuzhiyun 	{ "tx_single_collisions" },
747*4882a593Smuzhiyun 	{ "rx_bad_ssd" },
748*4882a593Smuzhiyun };
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun /* number of ETHTOOL_GSTATS u64's */
751*4882a593Smuzhiyun #define VORTEX_NUM_STATS    5
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
754*4882a593Smuzhiyun 				   int chip_idx, int card_idx);
755*4882a593Smuzhiyun static int vortex_up(struct net_device *dev);
756*4882a593Smuzhiyun static void vortex_down(struct net_device *dev, int final);
757*4882a593Smuzhiyun static int vortex_open(struct net_device *dev);
758*4882a593Smuzhiyun static void mdio_sync(struct vortex_private *vp, int bits);
759*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int location);
760*4882a593Smuzhiyun static void mdio_write(struct net_device *vp, int phy_id, int location, int value);
761*4882a593Smuzhiyun static void vortex_timer(struct timer_list *t);
762*4882a593Smuzhiyun static netdev_tx_t vortex_start_xmit(struct sk_buff *skb,
763*4882a593Smuzhiyun 				     struct net_device *dev);
764*4882a593Smuzhiyun static netdev_tx_t boomerang_start_xmit(struct sk_buff *skb,
765*4882a593Smuzhiyun 					struct net_device *dev);
766*4882a593Smuzhiyun static int vortex_rx(struct net_device *dev);
767*4882a593Smuzhiyun static int boomerang_rx(struct net_device *dev);
768*4882a593Smuzhiyun static irqreturn_t vortex_boomerang_interrupt(int irq, void *dev_id);
769*4882a593Smuzhiyun static irqreturn_t _vortex_interrupt(int irq, struct net_device *dev);
770*4882a593Smuzhiyun static irqreturn_t _boomerang_interrupt(int irq, struct net_device *dev);
771*4882a593Smuzhiyun static int vortex_close(struct net_device *dev);
772*4882a593Smuzhiyun static void dump_tx_ring(struct net_device *dev);
773*4882a593Smuzhiyun static void update_stats(void __iomem *ioaddr, struct net_device *dev);
774*4882a593Smuzhiyun static struct net_device_stats *vortex_get_stats(struct net_device *dev);
775*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev);
776*4882a593Smuzhiyun #ifdef CONFIG_PCI
777*4882a593Smuzhiyun static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
778*4882a593Smuzhiyun #endif
779*4882a593Smuzhiyun static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue);
780*4882a593Smuzhiyun static void acpi_set_WOL(struct net_device *dev);
781*4882a593Smuzhiyun static const struct ethtool_ops vortex_ethtool_ops;
782*4882a593Smuzhiyun static void set_8021q_mode(struct net_device *dev, int enable);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
785*4882a593Smuzhiyun /* Option count limit only -- unlimited interfaces are supported. */
786*4882a593Smuzhiyun #define MAX_UNITS 8
787*4882a593Smuzhiyun static int options[MAX_UNITS] = { [0 ... MAX_UNITS-1] = -1 };
788*4882a593Smuzhiyun static int full_duplex[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
789*4882a593Smuzhiyun static int hw_checksums[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
790*4882a593Smuzhiyun static int flow_ctrl[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
791*4882a593Smuzhiyun static int enable_wol[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
792*4882a593Smuzhiyun static int use_mmio[MAX_UNITS] = {[0 ... MAX_UNITS-1] = -1 };
793*4882a593Smuzhiyun static int global_options = -1;
794*4882a593Smuzhiyun static int global_full_duplex = -1;
795*4882a593Smuzhiyun static int global_enable_wol = -1;
796*4882a593Smuzhiyun static int global_use_mmio = -1;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun /* Variables to work-around the Compaq PCI BIOS32 problem. */
799*4882a593Smuzhiyun static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
800*4882a593Smuzhiyun static struct net_device *compaq_net_device;
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun static int vortex_cards_found;
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun module_param(debug, int, 0);
805*4882a593Smuzhiyun module_param(global_options, int, 0);
806*4882a593Smuzhiyun module_param_array(options, int, NULL, 0);
807*4882a593Smuzhiyun module_param(global_full_duplex, int, 0);
808*4882a593Smuzhiyun module_param_array(full_duplex, int, NULL, 0);
809*4882a593Smuzhiyun module_param_array(hw_checksums, int, NULL, 0);
810*4882a593Smuzhiyun module_param_array(flow_ctrl, int, NULL, 0);
811*4882a593Smuzhiyun module_param(global_enable_wol, int, 0);
812*4882a593Smuzhiyun module_param_array(enable_wol, int, NULL, 0);
813*4882a593Smuzhiyun module_param(rx_copybreak, int, 0);
814*4882a593Smuzhiyun module_param(max_interrupt_work, int, 0);
815*4882a593Smuzhiyun module_param_hw(compaq_ioaddr, int, ioport, 0);
816*4882a593Smuzhiyun module_param_hw(compaq_irq, int, irq, 0);
817*4882a593Smuzhiyun module_param(compaq_device_id, int, 0);
818*4882a593Smuzhiyun module_param(watchdog, int, 0);
819*4882a593Smuzhiyun module_param(global_use_mmio, int, 0);
820*4882a593Smuzhiyun module_param_array(use_mmio, int, NULL, 0);
821*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
822*4882a593Smuzhiyun MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
823*4882a593Smuzhiyun MODULE_PARM_DESC(global_options, "3c59x: same as options, but applies to all NICs if options is unset");
824*4882a593Smuzhiyun MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
825*4882a593Smuzhiyun MODULE_PARM_DESC(global_full_duplex, "3c59x: same as full_duplex, but applies to all NICs if full_duplex is unset");
826*4882a593Smuzhiyun MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
827*4882a593Smuzhiyun MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
828*4882a593Smuzhiyun MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
829*4882a593Smuzhiyun MODULE_PARM_DESC(global_enable_wol, "3c59x: same as enable_wol, but applies to all NICs if enable_wol is unset");
830*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
831*4882a593Smuzhiyun MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
832*4882a593Smuzhiyun MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
833*4882a593Smuzhiyun MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
834*4882a593Smuzhiyun MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
835*4882a593Smuzhiyun MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
836*4882a593Smuzhiyun MODULE_PARM_DESC(global_use_mmio, "3c59x: same as use_mmio, but applies to all NICs if options is unset");
837*4882a593Smuzhiyun MODULE_PARM_DESC(use_mmio, "3c59x: use memory-mapped PCI I/O resource (0-1)");
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
poll_vortex(struct net_device * dev)840*4882a593Smuzhiyun static void poll_vortex(struct net_device *dev)
841*4882a593Smuzhiyun {
842*4882a593Smuzhiyun 	vortex_boomerang_interrupt(dev->irq, dev);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun #endif
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun #ifdef CONFIG_PM
847*4882a593Smuzhiyun 
vortex_suspend(struct device * dev)848*4882a593Smuzhiyun static int vortex_suspend(struct device *dev)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	if (!ndev || !netif_running(ndev))
853*4882a593Smuzhiyun 		return 0;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 	netif_device_detach(ndev);
856*4882a593Smuzhiyun 	vortex_down(ndev, 1);
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	return 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
vortex_resume(struct device * dev)861*4882a593Smuzhiyun static int vortex_resume(struct device *dev)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	struct net_device *ndev = dev_get_drvdata(dev);
864*4882a593Smuzhiyun 	int err;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	if (!ndev || !netif_running(ndev))
867*4882a593Smuzhiyun 		return 0;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	err = vortex_up(ndev);
870*4882a593Smuzhiyun 	if (err)
871*4882a593Smuzhiyun 		return err;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	netif_device_attach(ndev);
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 	return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun static const struct dev_pm_ops vortex_pm_ops = {
879*4882a593Smuzhiyun 	.suspend = vortex_suspend,
880*4882a593Smuzhiyun 	.resume = vortex_resume,
881*4882a593Smuzhiyun 	.freeze = vortex_suspend,
882*4882a593Smuzhiyun 	.thaw = vortex_resume,
883*4882a593Smuzhiyun 	.poweroff = vortex_suspend,
884*4882a593Smuzhiyun 	.restore = vortex_resume,
885*4882a593Smuzhiyun };
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun #define VORTEX_PM_OPS (&vortex_pm_ops)
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun #else /* !CONFIG_PM */
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun #define VORTEX_PM_OPS NULL
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun #endif /* !CONFIG_PM */
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun #ifdef CONFIG_EISA
896*4882a593Smuzhiyun static const struct eisa_device_id vortex_eisa_ids[] = {
897*4882a593Smuzhiyun 	{ "TCM5920", CH_3C592 },
898*4882a593Smuzhiyun 	{ "TCM5970", CH_3C597 },
899*4882a593Smuzhiyun 	{ "" }
900*4882a593Smuzhiyun };
901*4882a593Smuzhiyun MODULE_DEVICE_TABLE(eisa, vortex_eisa_ids);
902*4882a593Smuzhiyun 
vortex_eisa_probe(struct device * device)903*4882a593Smuzhiyun static int vortex_eisa_probe(struct device *device)
904*4882a593Smuzhiyun {
905*4882a593Smuzhiyun 	void __iomem *ioaddr;
906*4882a593Smuzhiyun 	struct eisa_device *edev;
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun 	edev = to_eisa_device(device);
909*4882a593Smuzhiyun 
910*4882a593Smuzhiyun 	if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME))
911*4882a593Smuzhiyun 		return -EBUSY;
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun 	ioaddr = ioport_map(edev->base_addr, VORTEX_TOTAL_SIZE);
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 	if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12,
916*4882a593Smuzhiyun 					  edev->id.driver_data, vortex_cards_found)) {
917*4882a593Smuzhiyun 		release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
918*4882a593Smuzhiyun 		return -ENODEV;
919*4882a593Smuzhiyun 	}
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	vortex_cards_found++;
922*4882a593Smuzhiyun 
923*4882a593Smuzhiyun 	return 0;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
vortex_eisa_remove(struct device * device)926*4882a593Smuzhiyun static int vortex_eisa_remove(struct device *device)
927*4882a593Smuzhiyun {
928*4882a593Smuzhiyun 	struct eisa_device *edev;
929*4882a593Smuzhiyun 	struct net_device *dev;
930*4882a593Smuzhiyun 	struct vortex_private *vp;
931*4882a593Smuzhiyun 	void __iomem *ioaddr;
932*4882a593Smuzhiyun 
933*4882a593Smuzhiyun 	edev = to_eisa_device(device);
934*4882a593Smuzhiyun 	dev = eisa_get_drvdata(edev);
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 	if (!dev) {
937*4882a593Smuzhiyun 		pr_err("vortex_eisa_remove called for Compaq device!\n");
938*4882a593Smuzhiyun 		BUG();
939*4882a593Smuzhiyun 	}
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	vp = netdev_priv(dev);
942*4882a593Smuzhiyun 	ioaddr = vp->ioaddr;
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	unregister_netdev(dev);
945*4882a593Smuzhiyun 	iowrite16(TotalReset|0x14, ioaddr + EL3_CMD);
946*4882a593Smuzhiyun 	release_region(edev->base_addr, VORTEX_TOTAL_SIZE);
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	free_netdev(dev);
949*4882a593Smuzhiyun 	return 0;
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun static struct eisa_driver vortex_eisa_driver = {
953*4882a593Smuzhiyun 	.id_table = vortex_eisa_ids,
954*4882a593Smuzhiyun 	.driver   = {
955*4882a593Smuzhiyun 		.name    = "3c59x",
956*4882a593Smuzhiyun 		.probe   = vortex_eisa_probe,
957*4882a593Smuzhiyun 		.remove  = vortex_eisa_remove
958*4882a593Smuzhiyun 	}
959*4882a593Smuzhiyun };
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun #endif /* CONFIG_EISA */
962*4882a593Smuzhiyun 
963*4882a593Smuzhiyun /* returns count found (>= 0), or negative on error */
vortex_eisa_init(void)964*4882a593Smuzhiyun static int __init vortex_eisa_init(void)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun 	int eisa_found = 0;
967*4882a593Smuzhiyun 	int orig_cards_found = vortex_cards_found;
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun #ifdef CONFIG_EISA
970*4882a593Smuzhiyun 	int err;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	err = eisa_driver_register (&vortex_eisa_driver);
973*4882a593Smuzhiyun 	if (!err) {
974*4882a593Smuzhiyun 		/*
975*4882a593Smuzhiyun 		 * Because of the way EISA bus is probed, we cannot assume
976*4882a593Smuzhiyun 		 * any device have been found when we exit from
977*4882a593Smuzhiyun 		 * eisa_driver_register (the bus root driver may not be
978*4882a593Smuzhiyun 		 * initialized yet). So we blindly assume something was
979*4882a593Smuzhiyun 		 * found, and let the sysfs magic happened...
980*4882a593Smuzhiyun 		 */
981*4882a593Smuzhiyun 		eisa_found = 1;
982*4882a593Smuzhiyun 	}
983*4882a593Smuzhiyun #endif
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	/* Special code to work-around the Compaq PCI BIOS32 problem. */
986*4882a593Smuzhiyun 	if (compaq_ioaddr) {
987*4882a593Smuzhiyun 		vortex_probe1(NULL, ioport_map(compaq_ioaddr, VORTEX_TOTAL_SIZE),
988*4882a593Smuzhiyun 			      compaq_irq, compaq_device_id, vortex_cards_found++);
989*4882a593Smuzhiyun 	}
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	return vortex_cards_found - orig_cards_found + eisa_found;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun /* returns count (>= 0), or negative on error */
vortex_init_one(struct pci_dev * pdev,const struct pci_device_id * ent)995*4882a593Smuzhiyun static int vortex_init_one(struct pci_dev *pdev,
996*4882a593Smuzhiyun 			   const struct pci_device_id *ent)
997*4882a593Smuzhiyun {
998*4882a593Smuzhiyun 	int rc, unit, pci_bar;
999*4882a593Smuzhiyun 	struct vortex_chip_info *vci;
1000*4882a593Smuzhiyun 	void __iomem *ioaddr;
1001*4882a593Smuzhiyun 
1002*4882a593Smuzhiyun 	/* wake up and enable device */
1003*4882a593Smuzhiyun 	rc = pci_enable_device(pdev);
1004*4882a593Smuzhiyun 	if (rc < 0)
1005*4882a593Smuzhiyun 		goto out;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	rc = pci_request_regions(pdev, DRV_NAME);
1008*4882a593Smuzhiyun 	if (rc < 0)
1009*4882a593Smuzhiyun 		goto out_disable;
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	unit = vortex_cards_found;
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 	if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) {
1014*4882a593Smuzhiyun 		/* Determine the default if the user didn't override us */
1015*4882a593Smuzhiyun 		vci = &vortex_info_tbl[ent->driver_data];
1016*4882a593Smuzhiyun 		pci_bar = vci->drv_flags & (IS_CYCLONE | IS_TORNADO) ? 1 : 0;
1017*4882a593Smuzhiyun 	} else if (unit < MAX_UNITS && use_mmio[unit] >= 0)
1018*4882a593Smuzhiyun 		pci_bar = use_mmio[unit] ? 1 : 0;
1019*4882a593Smuzhiyun 	else
1020*4882a593Smuzhiyun 		pci_bar = global_use_mmio ? 1 : 0;
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 	ioaddr = pci_iomap(pdev, pci_bar, 0);
1023*4882a593Smuzhiyun 	if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */
1024*4882a593Smuzhiyun 		ioaddr = pci_iomap(pdev, 0, 0);
1025*4882a593Smuzhiyun 	if (!ioaddr) {
1026*4882a593Smuzhiyun 		rc = -ENOMEM;
1027*4882a593Smuzhiyun 		goto out_release;
1028*4882a593Smuzhiyun 	}
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq,
1031*4882a593Smuzhiyun 			   ent->driver_data, unit);
1032*4882a593Smuzhiyun 	if (rc < 0)
1033*4882a593Smuzhiyun 		goto out_iounmap;
1034*4882a593Smuzhiyun 
1035*4882a593Smuzhiyun 	vortex_cards_found++;
1036*4882a593Smuzhiyun 	goto out;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun out_iounmap:
1039*4882a593Smuzhiyun 	pci_iounmap(pdev, ioaddr);
1040*4882a593Smuzhiyun out_release:
1041*4882a593Smuzhiyun 	pci_release_regions(pdev);
1042*4882a593Smuzhiyun out_disable:
1043*4882a593Smuzhiyun 	pci_disable_device(pdev);
1044*4882a593Smuzhiyun out:
1045*4882a593Smuzhiyun 	return rc;
1046*4882a593Smuzhiyun }
1047*4882a593Smuzhiyun 
1048*4882a593Smuzhiyun static const struct net_device_ops boomrang_netdev_ops = {
1049*4882a593Smuzhiyun 	.ndo_open		= vortex_open,
1050*4882a593Smuzhiyun 	.ndo_stop		= vortex_close,
1051*4882a593Smuzhiyun 	.ndo_start_xmit		= boomerang_start_xmit,
1052*4882a593Smuzhiyun 	.ndo_tx_timeout		= vortex_tx_timeout,
1053*4882a593Smuzhiyun 	.ndo_get_stats		= vortex_get_stats,
1054*4882a593Smuzhiyun #ifdef CONFIG_PCI
1055*4882a593Smuzhiyun 	.ndo_do_ioctl 		= vortex_ioctl,
1056*4882a593Smuzhiyun #endif
1057*4882a593Smuzhiyun 	.ndo_set_rx_mode	= set_rx_mode,
1058*4882a593Smuzhiyun 	.ndo_set_mac_address 	= eth_mac_addr,
1059*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1060*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1061*4882a593Smuzhiyun 	.ndo_poll_controller	= poll_vortex,
1062*4882a593Smuzhiyun #endif
1063*4882a593Smuzhiyun };
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun static const struct net_device_ops vortex_netdev_ops = {
1066*4882a593Smuzhiyun 	.ndo_open		= vortex_open,
1067*4882a593Smuzhiyun 	.ndo_stop		= vortex_close,
1068*4882a593Smuzhiyun 	.ndo_start_xmit		= vortex_start_xmit,
1069*4882a593Smuzhiyun 	.ndo_tx_timeout		= vortex_tx_timeout,
1070*4882a593Smuzhiyun 	.ndo_get_stats		= vortex_get_stats,
1071*4882a593Smuzhiyun #ifdef CONFIG_PCI
1072*4882a593Smuzhiyun 	.ndo_do_ioctl 		= vortex_ioctl,
1073*4882a593Smuzhiyun #endif
1074*4882a593Smuzhiyun 	.ndo_set_rx_mode	= set_rx_mode,
1075*4882a593Smuzhiyun 	.ndo_set_mac_address 	= eth_mac_addr,
1076*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
1077*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
1078*4882a593Smuzhiyun 	.ndo_poll_controller	= poll_vortex,
1079*4882a593Smuzhiyun #endif
1080*4882a593Smuzhiyun };
1081*4882a593Smuzhiyun 
1082*4882a593Smuzhiyun /*
1083*4882a593Smuzhiyun  * Start up the PCI/EISA device which is described by *gendev.
1084*4882a593Smuzhiyun  * Return 0 on success.
1085*4882a593Smuzhiyun  *
1086*4882a593Smuzhiyun  * NOTE: pdev can be NULL, for the case of a Compaq device
1087*4882a593Smuzhiyun  */
vortex_probe1(struct device * gendev,void __iomem * ioaddr,int irq,int chip_idx,int card_idx)1088*4882a593Smuzhiyun static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq,
1089*4882a593Smuzhiyun 			 int chip_idx, int card_idx)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun 	struct vortex_private *vp;
1092*4882a593Smuzhiyun 	int option;
1093*4882a593Smuzhiyun 	unsigned int eeprom[0x40], checksum = 0;		/* EEPROM contents */
1094*4882a593Smuzhiyun 	int i, step;
1095*4882a593Smuzhiyun 	struct net_device *dev;
1096*4882a593Smuzhiyun 	static int printed_version;
1097*4882a593Smuzhiyun 	int retval, print_info;
1098*4882a593Smuzhiyun 	struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
1099*4882a593Smuzhiyun 	const char *print_name = "3c59x";
1100*4882a593Smuzhiyun 	struct pci_dev *pdev = NULL;
1101*4882a593Smuzhiyun 	struct eisa_device *edev = NULL;
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	if (!printed_version) {
1104*4882a593Smuzhiyun 		pr_info("%s", version);
1105*4882a593Smuzhiyun 		printed_version = 1;
1106*4882a593Smuzhiyun 	}
1107*4882a593Smuzhiyun 
1108*4882a593Smuzhiyun 	if (gendev) {
1109*4882a593Smuzhiyun 		if ((pdev = DEVICE_PCI(gendev))) {
1110*4882a593Smuzhiyun 			print_name = pci_name(pdev);
1111*4882a593Smuzhiyun 		}
1112*4882a593Smuzhiyun 
1113*4882a593Smuzhiyun 		if ((edev = DEVICE_EISA(gendev))) {
1114*4882a593Smuzhiyun 			print_name = dev_name(&edev->dev);
1115*4882a593Smuzhiyun 		}
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(*vp));
1119*4882a593Smuzhiyun 	retval = -ENOMEM;
1120*4882a593Smuzhiyun 	if (!dev)
1121*4882a593Smuzhiyun 		goto out;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, gendev);
1124*4882a593Smuzhiyun 	vp = netdev_priv(dev);
1125*4882a593Smuzhiyun 
1126*4882a593Smuzhiyun 	option = global_options;
1127*4882a593Smuzhiyun 
1128*4882a593Smuzhiyun 	/* The lower four bits are the media type. */
1129*4882a593Smuzhiyun 	if (dev->mem_start) {
1130*4882a593Smuzhiyun 		/*
1131*4882a593Smuzhiyun 		 * The 'options' param is passed in as the third arg to the
1132*4882a593Smuzhiyun 		 * LILO 'ether=' argument for non-modular use
1133*4882a593Smuzhiyun 		 */
1134*4882a593Smuzhiyun 		option = dev->mem_start;
1135*4882a593Smuzhiyun 	}
1136*4882a593Smuzhiyun 	else if (card_idx < MAX_UNITS) {
1137*4882a593Smuzhiyun 		if (options[card_idx] >= 0)
1138*4882a593Smuzhiyun 			option = options[card_idx];
1139*4882a593Smuzhiyun 	}
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	if (option > 0) {
1142*4882a593Smuzhiyun 		if (option & 0x8000)
1143*4882a593Smuzhiyun 			vortex_debug = 7;
1144*4882a593Smuzhiyun 		if (option & 0x4000)
1145*4882a593Smuzhiyun 			vortex_debug = 2;
1146*4882a593Smuzhiyun 		if (option & 0x0400)
1147*4882a593Smuzhiyun 			vp->enable_wol = 1;
1148*4882a593Smuzhiyun 	}
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	print_info = (vortex_debug > 1);
1151*4882a593Smuzhiyun 	if (print_info)
1152*4882a593Smuzhiyun 		pr_info("See Documentation/networking/device_drivers/ethernet/3com/vortex.rst\n");
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 	pr_info("%s: 3Com %s %s at %p.\n",
1155*4882a593Smuzhiyun 	       print_name,
1156*4882a593Smuzhiyun 	       pdev ? "PCI" : "EISA",
1157*4882a593Smuzhiyun 	       vci->name,
1158*4882a593Smuzhiyun 	       ioaddr);
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 	dev->base_addr = (unsigned long)ioaddr;
1161*4882a593Smuzhiyun 	dev->irq = irq;
1162*4882a593Smuzhiyun 	dev->mtu = mtu;
1163*4882a593Smuzhiyun 	vp->ioaddr = ioaddr;
1164*4882a593Smuzhiyun 	vp->large_frames = mtu > 1500;
1165*4882a593Smuzhiyun 	vp->drv_flags = vci->drv_flags;
1166*4882a593Smuzhiyun 	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
1167*4882a593Smuzhiyun 	vp->io_size = vci->io_size;
1168*4882a593Smuzhiyun 	vp->card_idx = card_idx;
1169*4882a593Smuzhiyun 	vp->window = -1;
1170*4882a593Smuzhiyun 
1171*4882a593Smuzhiyun 	/* module list only for Compaq device */
1172*4882a593Smuzhiyun 	if (gendev == NULL) {
1173*4882a593Smuzhiyun 		compaq_net_device = dev;
1174*4882a593Smuzhiyun 	}
1175*4882a593Smuzhiyun 
1176*4882a593Smuzhiyun 	/* PCI-only startup logic */
1177*4882a593Smuzhiyun 	if (pdev) {
1178*4882a593Smuzhiyun 		/* enable bus-mastering if necessary */
1179*4882a593Smuzhiyun 		if (vci->flags & PCI_USES_MASTER)
1180*4882a593Smuzhiyun 			pci_set_master(pdev);
1181*4882a593Smuzhiyun 
1182*4882a593Smuzhiyun 		if (vci->drv_flags & IS_VORTEX) {
1183*4882a593Smuzhiyun 			u8 pci_latency;
1184*4882a593Smuzhiyun 			u8 new_latency = 248;
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 			/* Check the PCI latency value.  On the 3c590 series the latency timer
1187*4882a593Smuzhiyun 			   must be set to the maximum value to avoid data corruption that occurs
1188*4882a593Smuzhiyun 			   when the timer expires during a transfer.  This bug exists the Vortex
1189*4882a593Smuzhiyun 			   chip only. */
1190*4882a593Smuzhiyun 			pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
1191*4882a593Smuzhiyun 			if (pci_latency < new_latency) {
1192*4882a593Smuzhiyun 				pr_info("%s: Overriding PCI latency timer (CFLT) setting of %d, new value is %d.\n",
1193*4882a593Smuzhiyun 					print_name, pci_latency, new_latency);
1194*4882a593Smuzhiyun 				pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
1195*4882a593Smuzhiyun 			}
1196*4882a593Smuzhiyun 		}
1197*4882a593Smuzhiyun 	}
1198*4882a593Smuzhiyun 
1199*4882a593Smuzhiyun 	spin_lock_init(&vp->lock);
1200*4882a593Smuzhiyun 	spin_lock_init(&vp->mii_lock);
1201*4882a593Smuzhiyun 	spin_lock_init(&vp->window_lock);
1202*4882a593Smuzhiyun 	vp->gendev = gendev;
1203*4882a593Smuzhiyun 	vp->mii.dev = dev;
1204*4882a593Smuzhiyun 	vp->mii.mdio_read = mdio_read;
1205*4882a593Smuzhiyun 	vp->mii.mdio_write = mdio_write;
1206*4882a593Smuzhiyun 	vp->mii.phy_id_mask = 0x1f;
1207*4882a593Smuzhiyun 	vp->mii.reg_num_mask = 0x1f;
1208*4882a593Smuzhiyun 
1209*4882a593Smuzhiyun 	/* Makes sure rings are at least 16 byte aligned. */
1210*4882a593Smuzhiyun 	vp->rx_ring = dma_alloc_coherent(gendev, sizeof(struct boom_rx_desc) * RX_RING_SIZE
1211*4882a593Smuzhiyun 					   + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1212*4882a593Smuzhiyun 					   &vp->rx_ring_dma, GFP_KERNEL);
1213*4882a593Smuzhiyun 	retval = -ENOMEM;
1214*4882a593Smuzhiyun 	if (!vp->rx_ring)
1215*4882a593Smuzhiyun 		goto free_device;
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
1218*4882a593Smuzhiyun 	vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 	/* if we are a PCI driver, we store info in pdev->driver_data
1221*4882a593Smuzhiyun 	 * instead of a module list */
1222*4882a593Smuzhiyun 	if (pdev)
1223*4882a593Smuzhiyun 		pci_set_drvdata(pdev, dev);
1224*4882a593Smuzhiyun 	if (edev)
1225*4882a593Smuzhiyun 		eisa_set_drvdata(edev, dev);
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	vp->media_override = 7;
1228*4882a593Smuzhiyun 	if (option >= 0) {
1229*4882a593Smuzhiyun 		vp->media_override = ((option & 7) == 2)  ?  0  :  option & 15;
1230*4882a593Smuzhiyun 		if (vp->media_override != 7)
1231*4882a593Smuzhiyun 			vp->medialock = 1;
1232*4882a593Smuzhiyun 		vp->full_duplex = (option & 0x200) ? 1 : 0;
1233*4882a593Smuzhiyun 		vp->bus_master = (option & 16) ? 1 : 0;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	if (global_full_duplex > 0)
1237*4882a593Smuzhiyun 		vp->full_duplex = 1;
1238*4882a593Smuzhiyun 	if (global_enable_wol > 0)
1239*4882a593Smuzhiyun 		vp->enable_wol = 1;
1240*4882a593Smuzhiyun 
1241*4882a593Smuzhiyun 	if (card_idx < MAX_UNITS) {
1242*4882a593Smuzhiyun 		if (full_duplex[card_idx] > 0)
1243*4882a593Smuzhiyun 			vp->full_duplex = 1;
1244*4882a593Smuzhiyun 		if (flow_ctrl[card_idx] > 0)
1245*4882a593Smuzhiyun 			vp->flow_ctrl = 1;
1246*4882a593Smuzhiyun 		if (enable_wol[card_idx] > 0)
1247*4882a593Smuzhiyun 			vp->enable_wol = 1;
1248*4882a593Smuzhiyun 	}
1249*4882a593Smuzhiyun 
1250*4882a593Smuzhiyun 	vp->mii.force_media = vp->full_duplex;
1251*4882a593Smuzhiyun 	vp->options = option;
1252*4882a593Smuzhiyun 	/* Read the station address from the EEPROM. */
1253*4882a593Smuzhiyun 	{
1254*4882a593Smuzhiyun 		int base;
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 		if (vci->drv_flags & EEPROM_8BIT)
1257*4882a593Smuzhiyun 			base = 0x230;
1258*4882a593Smuzhiyun 		else if (vci->drv_flags & EEPROM_OFFSET)
1259*4882a593Smuzhiyun 			base = EEPROM_Read + 0x30;
1260*4882a593Smuzhiyun 		else
1261*4882a593Smuzhiyun 			base = EEPROM_Read;
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 		for (i = 0; i < 0x40; i++) {
1264*4882a593Smuzhiyun 			int timer;
1265*4882a593Smuzhiyun 			window_write16(vp, base + i, 0, Wn0EepromCmd);
1266*4882a593Smuzhiyun 			/* Pause for at least 162 us. for the read to take place. */
1267*4882a593Smuzhiyun 			for (timer = 10; timer >= 0; timer--) {
1268*4882a593Smuzhiyun 				udelay(162);
1269*4882a593Smuzhiyun 				if ((window_read16(vp, 0, Wn0EepromCmd) &
1270*4882a593Smuzhiyun 				     0x8000) == 0)
1271*4882a593Smuzhiyun 					break;
1272*4882a593Smuzhiyun 			}
1273*4882a593Smuzhiyun 			eeprom[i] = window_read16(vp, 0, Wn0EepromData);
1274*4882a593Smuzhiyun 		}
1275*4882a593Smuzhiyun 	}
1276*4882a593Smuzhiyun 	for (i = 0; i < 0x18; i++)
1277*4882a593Smuzhiyun 		checksum ^= eeprom[i];
1278*4882a593Smuzhiyun 	checksum = (checksum ^ (checksum >> 8)) & 0xff;
1279*4882a593Smuzhiyun 	if (checksum != 0x00) {		/* Grrr, needless incompatible change 3Com. */
1280*4882a593Smuzhiyun 		while (i < 0x21)
1281*4882a593Smuzhiyun 			checksum ^= eeprom[i++];
1282*4882a593Smuzhiyun 		checksum = (checksum ^ (checksum >> 8)) & 0xff;
1283*4882a593Smuzhiyun 	}
1284*4882a593Smuzhiyun 	if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
1285*4882a593Smuzhiyun 		pr_cont(" ***INVALID CHECKSUM %4.4x*** ", checksum);
1286*4882a593Smuzhiyun 	for (i = 0; i < 3; i++)
1287*4882a593Smuzhiyun 		((__be16 *)dev->dev_addr)[i] = htons(eeprom[i + 10]);
1288*4882a593Smuzhiyun 	if (print_info)
1289*4882a593Smuzhiyun 		pr_cont(" %pM", dev->dev_addr);
1290*4882a593Smuzhiyun 	/* Unfortunately an all zero eeprom passes the checksum and this
1291*4882a593Smuzhiyun 	   gets found in the wild in failure cases. Crypto is hard 8) */
1292*4882a593Smuzhiyun 	if (!is_valid_ether_addr(dev->dev_addr)) {
1293*4882a593Smuzhiyun 		retval = -EINVAL;
1294*4882a593Smuzhiyun 		pr_err("*** EEPROM MAC address is invalid.\n");
1295*4882a593Smuzhiyun 		goto free_ring;	/* With every pack */
1296*4882a593Smuzhiyun 	}
1297*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
1298*4882a593Smuzhiyun 		window_write8(vp, dev->dev_addr[i], 2, i);
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	if (print_info)
1301*4882a593Smuzhiyun 		pr_cont(", IRQ %d\n", dev->irq);
1302*4882a593Smuzhiyun 	/* Tell them about an invalid IRQ. */
1303*4882a593Smuzhiyun 	if (dev->irq <= 0 || dev->irq >= nr_irqs)
1304*4882a593Smuzhiyun 		pr_warn(" *** Warning: IRQ %d is unlikely to work! ***\n",
1305*4882a593Smuzhiyun 			dev->irq);
1306*4882a593Smuzhiyun 
1307*4882a593Smuzhiyun 	step = (window_read8(vp, 4, Wn4_NetDiag) & 0x1e) >> 1;
1308*4882a593Smuzhiyun 	if (print_info) {
1309*4882a593Smuzhiyun 		pr_info("  product code %02x%02x rev %02x.%d date %02d-%02d-%02d\n",
1310*4882a593Smuzhiyun 			eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
1311*4882a593Smuzhiyun 			step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
1312*4882a593Smuzhiyun 	}
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 
1315*4882a593Smuzhiyun 	if (pdev && vci->drv_flags & HAS_CB_FNS) {
1316*4882a593Smuzhiyun 		unsigned short n;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 		vp->cb_fn_base = pci_iomap(pdev, 2, 0);
1319*4882a593Smuzhiyun 		if (!vp->cb_fn_base) {
1320*4882a593Smuzhiyun 			retval = -ENOMEM;
1321*4882a593Smuzhiyun 			goto free_ring;
1322*4882a593Smuzhiyun 		}
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 		if (print_info) {
1325*4882a593Smuzhiyun 			pr_info("%s: CardBus functions mapped %16.16llx->%p\n",
1326*4882a593Smuzhiyun 				print_name,
1327*4882a593Smuzhiyun 				(unsigned long long)pci_resource_start(pdev, 2),
1328*4882a593Smuzhiyun 				vp->cb_fn_base);
1329*4882a593Smuzhiyun 		}
1330*4882a593Smuzhiyun 
1331*4882a593Smuzhiyun 		n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1332*4882a593Smuzhiyun 		if (vp->drv_flags & INVERT_LED_PWR)
1333*4882a593Smuzhiyun 			n |= 0x10;
1334*4882a593Smuzhiyun 		if (vp->drv_flags & INVERT_MII_PWR)
1335*4882a593Smuzhiyun 			n |= 0x4000;
1336*4882a593Smuzhiyun 		window_write16(vp, n, 2, Wn2_ResetOptions);
1337*4882a593Smuzhiyun 		if (vp->drv_flags & WNO_XCVR_PWR) {
1338*4882a593Smuzhiyun 			window_write16(vp, 0x0800, 0, 0);
1339*4882a593Smuzhiyun 		}
1340*4882a593Smuzhiyun 	}
1341*4882a593Smuzhiyun 
1342*4882a593Smuzhiyun 	/* Extract our information from the EEPROM data. */
1343*4882a593Smuzhiyun 	vp->info1 = eeprom[13];
1344*4882a593Smuzhiyun 	vp->info2 = eeprom[15];
1345*4882a593Smuzhiyun 	vp->capabilities = eeprom[16];
1346*4882a593Smuzhiyun 
1347*4882a593Smuzhiyun 	if (vp->info1 & 0x8000) {
1348*4882a593Smuzhiyun 		vp->full_duplex = 1;
1349*4882a593Smuzhiyun 		if (print_info)
1350*4882a593Smuzhiyun 			pr_info("Full duplex capable\n");
1351*4882a593Smuzhiyun 	}
1352*4882a593Smuzhiyun 
1353*4882a593Smuzhiyun 	{
1354*4882a593Smuzhiyun 		static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
1355*4882a593Smuzhiyun 		unsigned int config;
1356*4882a593Smuzhiyun 		vp->available_media = window_read16(vp, 3, Wn3_Options);
1357*4882a593Smuzhiyun 		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
1358*4882a593Smuzhiyun 			vp->available_media = 0x40;
1359*4882a593Smuzhiyun 		config = window_read32(vp, 3, Wn3_Config);
1360*4882a593Smuzhiyun 		if (print_info) {
1361*4882a593Smuzhiyun 			pr_debug("  Internal config register is %4.4x, transceivers %#x.\n",
1362*4882a593Smuzhiyun 				config, window_read16(vp, 3, Wn3_Options));
1363*4882a593Smuzhiyun 			pr_info("  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
1364*4882a593Smuzhiyun 				   8 << RAM_SIZE(config),
1365*4882a593Smuzhiyun 				   RAM_WIDTH(config) ? "word" : "byte",
1366*4882a593Smuzhiyun 				   ram_split[RAM_SPLIT(config)],
1367*4882a593Smuzhiyun 				   AUTOSELECT(config) ? "autoselect/" : "",
1368*4882a593Smuzhiyun 				   XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
1369*4882a593Smuzhiyun 				   media_tbl[XCVR(config)].name);
1370*4882a593Smuzhiyun 		}
1371*4882a593Smuzhiyun 		vp->default_media = XCVR(config);
1372*4882a593Smuzhiyun 		if (vp->default_media == XCVR_NWAY)
1373*4882a593Smuzhiyun 			vp->has_nway = 1;
1374*4882a593Smuzhiyun 		vp->autoselect = AUTOSELECT(config);
1375*4882a593Smuzhiyun 	}
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	if (vp->media_override != 7) {
1378*4882a593Smuzhiyun 		pr_info("%s:  Media override to transceiver type %d (%s).\n",
1379*4882a593Smuzhiyun 				print_name, vp->media_override,
1380*4882a593Smuzhiyun 				media_tbl[vp->media_override].name);
1381*4882a593Smuzhiyun 		dev->if_port = vp->media_override;
1382*4882a593Smuzhiyun 	} else
1383*4882a593Smuzhiyun 		dev->if_port = vp->default_media;
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	if ((vp->available_media & 0x40) || (vci->drv_flags & HAS_NWAY) ||
1386*4882a593Smuzhiyun 		dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1387*4882a593Smuzhiyun 		int phy, phy_idx = 0;
1388*4882a593Smuzhiyun 		mii_preamble_required++;
1389*4882a593Smuzhiyun 		if (vp->drv_flags & EXTRA_PREAMBLE)
1390*4882a593Smuzhiyun 			mii_preamble_required++;
1391*4882a593Smuzhiyun 		mdio_sync(vp, 32);
1392*4882a593Smuzhiyun 		mdio_read(dev, 24, MII_BMSR);
1393*4882a593Smuzhiyun 		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
1394*4882a593Smuzhiyun 			int mii_status, phyx;
1395*4882a593Smuzhiyun 
1396*4882a593Smuzhiyun 			/*
1397*4882a593Smuzhiyun 			 * For the 3c905CX we look at index 24 first, because it bogusly
1398*4882a593Smuzhiyun 			 * reports an external PHY at all indices
1399*4882a593Smuzhiyun 			 */
1400*4882a593Smuzhiyun 			if (phy == 0)
1401*4882a593Smuzhiyun 				phyx = 24;
1402*4882a593Smuzhiyun 			else if (phy <= 24)
1403*4882a593Smuzhiyun 				phyx = phy - 1;
1404*4882a593Smuzhiyun 			else
1405*4882a593Smuzhiyun 				phyx = phy;
1406*4882a593Smuzhiyun 			mii_status = mdio_read(dev, phyx, MII_BMSR);
1407*4882a593Smuzhiyun 			if (mii_status  &&  mii_status != 0xffff) {
1408*4882a593Smuzhiyun 				vp->phys[phy_idx++] = phyx;
1409*4882a593Smuzhiyun 				if (print_info) {
1410*4882a593Smuzhiyun 					pr_info("  MII transceiver found at address %d, status %4x.\n",
1411*4882a593Smuzhiyun 						phyx, mii_status);
1412*4882a593Smuzhiyun 				}
1413*4882a593Smuzhiyun 				if ((mii_status & 0x0040) == 0)
1414*4882a593Smuzhiyun 					mii_preamble_required++;
1415*4882a593Smuzhiyun 			}
1416*4882a593Smuzhiyun 		}
1417*4882a593Smuzhiyun 		mii_preamble_required--;
1418*4882a593Smuzhiyun 		if (phy_idx == 0) {
1419*4882a593Smuzhiyun 			pr_warn("  ***WARNING*** No MII transceivers found!\n");
1420*4882a593Smuzhiyun 			vp->phys[0] = 24;
1421*4882a593Smuzhiyun 		} else {
1422*4882a593Smuzhiyun 			vp->advertising = mdio_read(dev, vp->phys[0], MII_ADVERTISE);
1423*4882a593Smuzhiyun 			if (vp->full_duplex) {
1424*4882a593Smuzhiyun 				/* Only advertise the FD media types. */
1425*4882a593Smuzhiyun 				vp->advertising &= ~0x02A0;
1426*4882a593Smuzhiyun 				mdio_write(dev, vp->phys[0], 4, vp->advertising);
1427*4882a593Smuzhiyun 			}
1428*4882a593Smuzhiyun 		}
1429*4882a593Smuzhiyun 		vp->mii.phy_id = vp->phys[0];
1430*4882a593Smuzhiyun 	}
1431*4882a593Smuzhiyun 
1432*4882a593Smuzhiyun 	if (vp->capabilities & CapBusMaster) {
1433*4882a593Smuzhiyun 		vp->full_bus_master_tx = 1;
1434*4882a593Smuzhiyun 		if (print_info) {
1435*4882a593Smuzhiyun 			pr_info("  Enabling bus-master transmits and %s receives.\n",
1436*4882a593Smuzhiyun 			(vp->info2 & 1) ? "early" : "whole-frame" );
1437*4882a593Smuzhiyun 		}
1438*4882a593Smuzhiyun 		vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
1439*4882a593Smuzhiyun 		vp->bus_master = 0;		/* AKPM: vortex only */
1440*4882a593Smuzhiyun 	}
1441*4882a593Smuzhiyun 
1442*4882a593Smuzhiyun 	/* The 3c59x-specific entries in the device structure. */
1443*4882a593Smuzhiyun 	if (vp->full_bus_master_tx) {
1444*4882a593Smuzhiyun 		dev->netdev_ops = &boomrang_netdev_ops;
1445*4882a593Smuzhiyun 		/* Actually, it still should work with iommu. */
1446*4882a593Smuzhiyun 		if (card_idx < MAX_UNITS &&
1447*4882a593Smuzhiyun 		    ((hw_checksums[card_idx] == -1 && (vp->drv_flags & HAS_HWCKSM)) ||
1448*4882a593Smuzhiyun 				hw_checksums[card_idx] == 1)) {
1449*4882a593Smuzhiyun 			dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
1450*4882a593Smuzhiyun 		}
1451*4882a593Smuzhiyun 	} else
1452*4882a593Smuzhiyun 		dev->netdev_ops =  &vortex_netdev_ops;
1453*4882a593Smuzhiyun 
1454*4882a593Smuzhiyun 	if (print_info) {
1455*4882a593Smuzhiyun 		pr_info("%s: scatter/gather %sabled. h/w checksums %sabled\n",
1456*4882a593Smuzhiyun 				print_name,
1457*4882a593Smuzhiyun 				(dev->features & NETIF_F_SG) ? "en":"dis",
1458*4882a593Smuzhiyun 				(dev->features & NETIF_F_IP_CSUM) ? "en":"dis");
1459*4882a593Smuzhiyun 	}
1460*4882a593Smuzhiyun 
1461*4882a593Smuzhiyun 	dev->ethtool_ops = &vortex_ethtool_ops;
1462*4882a593Smuzhiyun 	dev->watchdog_timeo = (watchdog * HZ) / 1000;
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	if (pdev) {
1465*4882a593Smuzhiyun 		vp->pm_state_valid = 1;
1466*4882a593Smuzhiyun 		pci_save_state(pdev);
1467*4882a593Smuzhiyun  		acpi_set_WOL(dev);
1468*4882a593Smuzhiyun 	}
1469*4882a593Smuzhiyun 	retval = register_netdev(dev);
1470*4882a593Smuzhiyun 	if (retval == 0)
1471*4882a593Smuzhiyun 		return 0;
1472*4882a593Smuzhiyun 
1473*4882a593Smuzhiyun free_ring:
1474*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev,
1475*4882a593Smuzhiyun 		sizeof(struct boom_rx_desc) * RX_RING_SIZE +
1476*4882a593Smuzhiyun 		sizeof(struct boom_tx_desc) * TX_RING_SIZE,
1477*4882a593Smuzhiyun 		vp->rx_ring, vp->rx_ring_dma);
1478*4882a593Smuzhiyun free_device:
1479*4882a593Smuzhiyun 	free_netdev(dev);
1480*4882a593Smuzhiyun 	pr_err(PFX "vortex_probe1 fails.  Returns %d\n", retval);
1481*4882a593Smuzhiyun out:
1482*4882a593Smuzhiyun 	return retval;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun static void
issue_and_wait(struct net_device * dev,int cmd)1486*4882a593Smuzhiyun issue_and_wait(struct net_device *dev, int cmd)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1489*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
1490*4882a593Smuzhiyun 	int i;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	iowrite16(cmd, ioaddr + EL3_CMD);
1493*4882a593Smuzhiyun 	for (i = 0; i < 2000; i++) {
1494*4882a593Smuzhiyun 		if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
1495*4882a593Smuzhiyun 			return;
1496*4882a593Smuzhiyun 	}
1497*4882a593Smuzhiyun 
1498*4882a593Smuzhiyun 	/* OK, that didn't work.  Do it the slow way.  One second */
1499*4882a593Smuzhiyun 	for (i = 0; i < 100000; i++) {
1500*4882a593Smuzhiyun 		if (!(ioread16(ioaddr + EL3_STATUS) & CmdInProgress)) {
1501*4882a593Smuzhiyun 			if (vortex_debug > 1)
1502*4882a593Smuzhiyun 				pr_info("%s: command 0x%04x took %d usecs\n",
1503*4882a593Smuzhiyun 					   dev->name, cmd, i * 10);
1504*4882a593Smuzhiyun 			return;
1505*4882a593Smuzhiyun 		}
1506*4882a593Smuzhiyun 		udelay(10);
1507*4882a593Smuzhiyun 	}
1508*4882a593Smuzhiyun 	pr_err("%s: command 0x%04x did not complete! Status=0x%x\n",
1509*4882a593Smuzhiyun 			   dev->name, cmd, ioread16(ioaddr + EL3_STATUS));
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun 
1512*4882a593Smuzhiyun static void
vortex_set_duplex(struct net_device * dev)1513*4882a593Smuzhiyun vortex_set_duplex(struct net_device *dev)
1514*4882a593Smuzhiyun {
1515*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1516*4882a593Smuzhiyun 
1517*4882a593Smuzhiyun 	pr_info("%s:  setting %s-duplex.\n",
1518*4882a593Smuzhiyun 		dev->name, (vp->full_duplex) ? "full" : "half");
1519*4882a593Smuzhiyun 
1520*4882a593Smuzhiyun 	/* Set the full-duplex bit. */
1521*4882a593Smuzhiyun 	window_write16(vp,
1522*4882a593Smuzhiyun 		       ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
1523*4882a593Smuzhiyun 		       (vp->large_frames ? 0x40 : 0) |
1524*4882a593Smuzhiyun 		       ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ?
1525*4882a593Smuzhiyun 			0x100 : 0),
1526*4882a593Smuzhiyun 		       3, Wn3_MAC_Ctrl);
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun 
vortex_check_media(struct net_device * dev,unsigned int init)1529*4882a593Smuzhiyun static void vortex_check_media(struct net_device *dev, unsigned int init)
1530*4882a593Smuzhiyun {
1531*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1532*4882a593Smuzhiyun 	unsigned int ok_to_print = 0;
1533*4882a593Smuzhiyun 
1534*4882a593Smuzhiyun 	if (vortex_debug > 3)
1535*4882a593Smuzhiyun 		ok_to_print = 1;
1536*4882a593Smuzhiyun 
1537*4882a593Smuzhiyun 	if (mii_check_media(&vp->mii, ok_to_print, init)) {
1538*4882a593Smuzhiyun 		vp->full_duplex = vp->mii.full_duplex;
1539*4882a593Smuzhiyun 		vortex_set_duplex(dev);
1540*4882a593Smuzhiyun 	} else if (init) {
1541*4882a593Smuzhiyun 		vortex_set_duplex(dev);
1542*4882a593Smuzhiyun 	}
1543*4882a593Smuzhiyun }
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun static int
vortex_up(struct net_device * dev)1546*4882a593Smuzhiyun vortex_up(struct net_device *dev)
1547*4882a593Smuzhiyun {
1548*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1549*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
1550*4882a593Smuzhiyun 	unsigned int config;
1551*4882a593Smuzhiyun 	int i, mii_reg5, err = 0;
1552*4882a593Smuzhiyun 
1553*4882a593Smuzhiyun 	if (VORTEX_PCI(vp)) {
1554*4882a593Smuzhiyun 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);	/* Go active */
1555*4882a593Smuzhiyun 		if (vp->pm_state_valid)
1556*4882a593Smuzhiyun 			pci_restore_state(VORTEX_PCI(vp));
1557*4882a593Smuzhiyun 		err = pci_enable_device(VORTEX_PCI(vp));
1558*4882a593Smuzhiyun 		if (err) {
1559*4882a593Smuzhiyun 			pr_warn("%s: Could not enable device\n", dev->name);
1560*4882a593Smuzhiyun 			goto err_out;
1561*4882a593Smuzhiyun 		}
1562*4882a593Smuzhiyun 	}
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	/* Before initializing select the active media port. */
1565*4882a593Smuzhiyun 	config = window_read32(vp, 3, Wn3_Config);
1566*4882a593Smuzhiyun 
1567*4882a593Smuzhiyun 	if (vp->media_override != 7) {
1568*4882a593Smuzhiyun 		pr_info("%s: Media override to transceiver %d (%s).\n",
1569*4882a593Smuzhiyun 			   dev->name, vp->media_override,
1570*4882a593Smuzhiyun 			   media_tbl[vp->media_override].name);
1571*4882a593Smuzhiyun 		dev->if_port = vp->media_override;
1572*4882a593Smuzhiyun 	} else if (vp->autoselect) {
1573*4882a593Smuzhiyun 		if (vp->has_nway) {
1574*4882a593Smuzhiyun 			if (vortex_debug > 1)
1575*4882a593Smuzhiyun 				pr_info("%s: using NWAY device table, not %d\n",
1576*4882a593Smuzhiyun 								dev->name, dev->if_port);
1577*4882a593Smuzhiyun 			dev->if_port = XCVR_NWAY;
1578*4882a593Smuzhiyun 		} else {
1579*4882a593Smuzhiyun 			/* Find first available media type, starting with 100baseTx. */
1580*4882a593Smuzhiyun 			dev->if_port = XCVR_100baseTx;
1581*4882a593Smuzhiyun 			while (! (vp->available_media & media_tbl[dev->if_port].mask))
1582*4882a593Smuzhiyun 				dev->if_port = media_tbl[dev->if_port].next;
1583*4882a593Smuzhiyun 			if (vortex_debug > 1)
1584*4882a593Smuzhiyun 				pr_info("%s: first available media type: %s\n",
1585*4882a593Smuzhiyun 					dev->name, media_tbl[dev->if_port].name);
1586*4882a593Smuzhiyun 		}
1587*4882a593Smuzhiyun 	} else {
1588*4882a593Smuzhiyun 		dev->if_port = vp->default_media;
1589*4882a593Smuzhiyun 		if (vortex_debug > 1)
1590*4882a593Smuzhiyun 			pr_info("%s: using default media %s\n",
1591*4882a593Smuzhiyun 				dev->name, media_tbl[dev->if_port].name);
1592*4882a593Smuzhiyun 	}
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 	timer_setup(&vp->timer, vortex_timer, 0);
1595*4882a593Smuzhiyun 	mod_timer(&vp->timer, RUN_AT(media_tbl[dev->if_port].wait));
1596*4882a593Smuzhiyun 
1597*4882a593Smuzhiyun 	if (vortex_debug > 1)
1598*4882a593Smuzhiyun 		pr_debug("%s: Initial media type %s.\n",
1599*4882a593Smuzhiyun 			   dev->name, media_tbl[dev->if_port].name);
1600*4882a593Smuzhiyun 
1601*4882a593Smuzhiyun 	vp->full_duplex = vp->mii.force_media;
1602*4882a593Smuzhiyun 	config = BFINS(config, dev->if_port, 20, 4);
1603*4882a593Smuzhiyun 	if (vortex_debug > 6)
1604*4882a593Smuzhiyun 		pr_debug("vortex_up(): writing 0x%x to InternalConfig\n", config);
1605*4882a593Smuzhiyun 	window_write32(vp, config, 3, Wn3_Config);
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) {
1608*4882a593Smuzhiyun 		mdio_read(dev, vp->phys[0], MII_BMSR);
1609*4882a593Smuzhiyun 		mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA);
1610*4882a593Smuzhiyun 		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
1611*4882a593Smuzhiyun 		vp->mii.full_duplex = vp->full_duplex;
1612*4882a593Smuzhiyun 
1613*4882a593Smuzhiyun 		vortex_check_media(dev, 1);
1614*4882a593Smuzhiyun 	}
1615*4882a593Smuzhiyun 	else
1616*4882a593Smuzhiyun 		vortex_set_duplex(dev);
1617*4882a593Smuzhiyun 
1618*4882a593Smuzhiyun 	issue_and_wait(dev, TxReset);
1619*4882a593Smuzhiyun 	/*
1620*4882a593Smuzhiyun 	 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
1621*4882a593Smuzhiyun 	 */
1622*4882a593Smuzhiyun 	issue_and_wait(dev, RxReset|0x04);
1623*4882a593Smuzhiyun 
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
1626*4882a593Smuzhiyun 
1627*4882a593Smuzhiyun 	if (vortex_debug > 1) {
1628*4882a593Smuzhiyun 		pr_debug("%s: vortex_up() irq %d media status %4.4x.\n",
1629*4882a593Smuzhiyun 			   dev->name, dev->irq, window_read16(vp, 4, Wn4_Media));
1630*4882a593Smuzhiyun 	}
1631*4882a593Smuzhiyun 
1632*4882a593Smuzhiyun 	/* Set the station address and mask in window 2 each time opened. */
1633*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
1634*4882a593Smuzhiyun 		window_write8(vp, dev->dev_addr[i], 2, i);
1635*4882a593Smuzhiyun 	for (; i < 12; i+=2)
1636*4882a593Smuzhiyun 		window_write16(vp, 0, 2, i);
1637*4882a593Smuzhiyun 
1638*4882a593Smuzhiyun 	if (vp->cb_fn_base) {
1639*4882a593Smuzhiyun 		unsigned short n = window_read16(vp, 2, Wn2_ResetOptions) & ~0x4010;
1640*4882a593Smuzhiyun 		if (vp->drv_flags & INVERT_LED_PWR)
1641*4882a593Smuzhiyun 			n |= 0x10;
1642*4882a593Smuzhiyun 		if (vp->drv_flags & INVERT_MII_PWR)
1643*4882a593Smuzhiyun 			n |= 0x4000;
1644*4882a593Smuzhiyun 		window_write16(vp, n, 2, Wn2_ResetOptions);
1645*4882a593Smuzhiyun 	}
1646*4882a593Smuzhiyun 
1647*4882a593Smuzhiyun 	if (dev->if_port == XCVR_10base2)
1648*4882a593Smuzhiyun 		/* Start the thinnet transceiver. We should really wait 50ms...*/
1649*4882a593Smuzhiyun 		iowrite16(StartCoax, ioaddr + EL3_CMD);
1650*4882a593Smuzhiyun 	if (dev->if_port != XCVR_NWAY) {
1651*4882a593Smuzhiyun 		window_write16(vp,
1652*4882a593Smuzhiyun 			       (window_read16(vp, 4, Wn4_Media) &
1653*4882a593Smuzhiyun 				~(Media_10TP|Media_SQE)) |
1654*4882a593Smuzhiyun 			       media_tbl[dev->if_port].media_bits,
1655*4882a593Smuzhiyun 			       4, Wn4_Media);
1656*4882a593Smuzhiyun 	}
1657*4882a593Smuzhiyun 
1658*4882a593Smuzhiyun 	/* Switch to the stats window, and clear all stats by reading. */
1659*4882a593Smuzhiyun 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
1660*4882a593Smuzhiyun 	for (i = 0; i < 10; i++)
1661*4882a593Smuzhiyun 		window_read8(vp, 6, i);
1662*4882a593Smuzhiyun 	window_read16(vp, 6, 10);
1663*4882a593Smuzhiyun 	window_read16(vp, 6, 12);
1664*4882a593Smuzhiyun 	/* New: On the Vortex we must also clear the BadSSD counter. */
1665*4882a593Smuzhiyun 	window_read8(vp, 4, 12);
1666*4882a593Smuzhiyun 	/* ..and on the Boomerang we enable the extra statistics bits. */
1667*4882a593Smuzhiyun 	window_write16(vp, 0x0040, 4, Wn4_NetDiag);
1668*4882a593Smuzhiyun 
1669*4882a593Smuzhiyun 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1670*4882a593Smuzhiyun 		vp->cur_rx = 0;
1671*4882a593Smuzhiyun 		/* Initialize the RxEarly register as recommended. */
1672*4882a593Smuzhiyun 		iowrite16(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
1673*4882a593Smuzhiyun 		iowrite32(0x0020, ioaddr + PktStatus);
1674*4882a593Smuzhiyun 		iowrite32(vp->rx_ring_dma, ioaddr + UpListPtr);
1675*4882a593Smuzhiyun 	}
1676*4882a593Smuzhiyun 	if (vp->full_bus_master_tx) { 		/* Boomerang bus master Tx. */
1677*4882a593Smuzhiyun 		vp->cur_tx = vp->dirty_tx = 0;
1678*4882a593Smuzhiyun 		if (vp->drv_flags & IS_BOOMERANG)
1679*4882a593Smuzhiyun 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
1680*4882a593Smuzhiyun 		/* Clear the Rx, Tx rings. */
1681*4882a593Smuzhiyun 		for (i = 0; i < RX_RING_SIZE; i++)	/* AKPM: this is done in vortex_open, too */
1682*4882a593Smuzhiyun 			vp->rx_ring[i].status = 0;
1683*4882a593Smuzhiyun 		for (i = 0; i < TX_RING_SIZE; i++)
1684*4882a593Smuzhiyun 			vp->tx_skbuff[i] = NULL;
1685*4882a593Smuzhiyun 		iowrite32(0, ioaddr + DownListPtr);
1686*4882a593Smuzhiyun 	}
1687*4882a593Smuzhiyun 	/* Set receiver mode: presumably accept b-case and phys addr only. */
1688*4882a593Smuzhiyun 	set_rx_mode(dev);
1689*4882a593Smuzhiyun 	/* enable 802.1q tagged frames */
1690*4882a593Smuzhiyun 	set_8021q_mode(dev, 1);
1691*4882a593Smuzhiyun 	iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
1692*4882a593Smuzhiyun 
1693*4882a593Smuzhiyun 	iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
1694*4882a593Smuzhiyun 	iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
1695*4882a593Smuzhiyun 	/* Allow status bits to be seen. */
1696*4882a593Smuzhiyun 	vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
1697*4882a593Smuzhiyun 		(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
1698*4882a593Smuzhiyun 		(vp->full_bus_master_rx ? UpComplete : RxComplete) |
1699*4882a593Smuzhiyun 		(vp->bus_master ? DMADone : 0);
1700*4882a593Smuzhiyun 	vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
1701*4882a593Smuzhiyun 		(vp->full_bus_master_rx ? 0 : RxComplete) |
1702*4882a593Smuzhiyun 		StatsFull | HostError | TxComplete | IntReq
1703*4882a593Smuzhiyun 		| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
1704*4882a593Smuzhiyun 	iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1705*4882a593Smuzhiyun 	/* Ack all pending events, and set active indicator mask. */
1706*4882a593Smuzhiyun 	iowrite16(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
1707*4882a593Smuzhiyun 		 ioaddr + EL3_CMD);
1708*4882a593Smuzhiyun 	iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1709*4882a593Smuzhiyun 	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
1710*4882a593Smuzhiyun 		iowrite32(0x8000, vp->cb_fn_base + 4);
1711*4882a593Smuzhiyun 	netif_start_queue (dev);
1712*4882a593Smuzhiyun 	netdev_reset_queue(dev);
1713*4882a593Smuzhiyun err_out:
1714*4882a593Smuzhiyun 	return err;
1715*4882a593Smuzhiyun }
1716*4882a593Smuzhiyun 
1717*4882a593Smuzhiyun static int
vortex_open(struct net_device * dev)1718*4882a593Smuzhiyun vortex_open(struct net_device *dev)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1721*4882a593Smuzhiyun 	int i;
1722*4882a593Smuzhiyun 	int retval;
1723*4882a593Smuzhiyun 	dma_addr_t dma;
1724*4882a593Smuzhiyun 
1725*4882a593Smuzhiyun 	/* Use the now-standard shared IRQ implementation. */
1726*4882a593Smuzhiyun 	if ((retval = request_irq(dev->irq, vortex_boomerang_interrupt, IRQF_SHARED, dev->name, dev))) {
1727*4882a593Smuzhiyun 		pr_err("%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
1728*4882a593Smuzhiyun 		goto err;
1729*4882a593Smuzhiyun 	}
1730*4882a593Smuzhiyun 
1731*4882a593Smuzhiyun 	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
1732*4882a593Smuzhiyun 		if (vortex_debug > 2)
1733*4882a593Smuzhiyun 			pr_debug("%s:  Filling in the Rx ring.\n", dev->name);
1734*4882a593Smuzhiyun 		for (i = 0; i < RX_RING_SIZE; i++) {
1735*4882a593Smuzhiyun 			struct sk_buff *skb;
1736*4882a593Smuzhiyun 			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
1737*4882a593Smuzhiyun 			vp->rx_ring[i].status = 0;	/* Clear complete bit. */
1738*4882a593Smuzhiyun 			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 			skb = __netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN,
1741*4882a593Smuzhiyun 						 GFP_KERNEL);
1742*4882a593Smuzhiyun 			vp->rx_skbuff[i] = skb;
1743*4882a593Smuzhiyun 			if (skb == NULL)
1744*4882a593Smuzhiyun 				break;			/* Bad news!  */
1745*4882a593Smuzhiyun 
1746*4882a593Smuzhiyun 			skb_reserve(skb, NET_IP_ALIGN);	/* Align IP on 16 byte boundaries */
1747*4882a593Smuzhiyun 			dma = dma_map_single(vp->gendev, skb->data,
1748*4882a593Smuzhiyun 					     PKT_BUF_SZ, DMA_FROM_DEVICE);
1749*4882a593Smuzhiyun 			if (dma_mapping_error(vp->gendev, dma))
1750*4882a593Smuzhiyun 				break;
1751*4882a593Smuzhiyun 			vp->rx_ring[i].addr = cpu_to_le32(dma);
1752*4882a593Smuzhiyun 		}
1753*4882a593Smuzhiyun 		if (i != RX_RING_SIZE) {
1754*4882a593Smuzhiyun 			pr_emerg("%s: no memory for rx ring\n", dev->name);
1755*4882a593Smuzhiyun 			retval = -ENOMEM;
1756*4882a593Smuzhiyun 			goto err_free_skb;
1757*4882a593Smuzhiyun 		}
1758*4882a593Smuzhiyun 		/* Wrap the ring. */
1759*4882a593Smuzhiyun 		vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
1760*4882a593Smuzhiyun 	}
1761*4882a593Smuzhiyun 
1762*4882a593Smuzhiyun 	retval = vortex_up(dev);
1763*4882a593Smuzhiyun 	if (!retval)
1764*4882a593Smuzhiyun 		goto out;
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun err_free_skb:
1767*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
1768*4882a593Smuzhiyun 		if (vp->rx_skbuff[i]) {
1769*4882a593Smuzhiyun 			dev_kfree_skb(vp->rx_skbuff[i]);
1770*4882a593Smuzhiyun 			vp->rx_skbuff[i] = NULL;
1771*4882a593Smuzhiyun 		}
1772*4882a593Smuzhiyun 	}
1773*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1774*4882a593Smuzhiyun err:
1775*4882a593Smuzhiyun 	if (vortex_debug > 1)
1776*4882a593Smuzhiyun 		pr_err("%s: vortex_open() fails: returning %d\n", dev->name, retval);
1777*4882a593Smuzhiyun out:
1778*4882a593Smuzhiyun 	return retval;
1779*4882a593Smuzhiyun }
1780*4882a593Smuzhiyun 
1781*4882a593Smuzhiyun static void
vortex_timer(struct timer_list * t)1782*4882a593Smuzhiyun vortex_timer(struct timer_list *t)
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun 	struct vortex_private *vp = from_timer(vp, t, timer);
1785*4882a593Smuzhiyun 	struct net_device *dev = vp->mii.dev;
1786*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
1787*4882a593Smuzhiyun 	int next_tick = 60*HZ;
1788*4882a593Smuzhiyun 	int ok = 0;
1789*4882a593Smuzhiyun 	int media_status;
1790*4882a593Smuzhiyun 
1791*4882a593Smuzhiyun 	if (vortex_debug > 2) {
1792*4882a593Smuzhiyun 		pr_debug("%s: Media selection timer tick happened, %s.\n",
1793*4882a593Smuzhiyun 			   dev->name, media_tbl[dev->if_port].name);
1794*4882a593Smuzhiyun 		pr_debug("dev->watchdog_timeo=%d\n", dev->watchdog_timeo);
1795*4882a593Smuzhiyun 	}
1796*4882a593Smuzhiyun 
1797*4882a593Smuzhiyun 	media_status = window_read16(vp, 4, Wn4_Media);
1798*4882a593Smuzhiyun 	switch (dev->if_port) {
1799*4882a593Smuzhiyun 	case XCVR_10baseT:  case XCVR_100baseTx:  case XCVR_100baseFx:
1800*4882a593Smuzhiyun 		if (media_status & Media_LnkBeat) {
1801*4882a593Smuzhiyun 			netif_carrier_on(dev);
1802*4882a593Smuzhiyun 			ok = 1;
1803*4882a593Smuzhiyun 			if (vortex_debug > 1)
1804*4882a593Smuzhiyun 				pr_debug("%s: Media %s has link beat, %x.\n",
1805*4882a593Smuzhiyun 					   dev->name, media_tbl[dev->if_port].name, media_status);
1806*4882a593Smuzhiyun 		} else {
1807*4882a593Smuzhiyun 			netif_carrier_off(dev);
1808*4882a593Smuzhiyun 			if (vortex_debug > 1) {
1809*4882a593Smuzhiyun 				pr_debug("%s: Media %s has no link beat, %x.\n",
1810*4882a593Smuzhiyun 					   dev->name, media_tbl[dev->if_port].name, media_status);
1811*4882a593Smuzhiyun 			}
1812*4882a593Smuzhiyun 		}
1813*4882a593Smuzhiyun 		break;
1814*4882a593Smuzhiyun 	case XCVR_MII: case XCVR_NWAY:
1815*4882a593Smuzhiyun 		{
1816*4882a593Smuzhiyun 			ok = 1;
1817*4882a593Smuzhiyun 			vortex_check_media(dev, 0);
1818*4882a593Smuzhiyun 		}
1819*4882a593Smuzhiyun 		break;
1820*4882a593Smuzhiyun 	  default:					/* Other media types handled by Tx timeouts. */
1821*4882a593Smuzhiyun 		if (vortex_debug > 1)
1822*4882a593Smuzhiyun 		  pr_debug("%s: Media %s has no indication, %x.\n",
1823*4882a593Smuzhiyun 				 dev->name, media_tbl[dev->if_port].name, media_status);
1824*4882a593Smuzhiyun 		ok = 1;
1825*4882a593Smuzhiyun 	}
1826*4882a593Smuzhiyun 
1827*4882a593Smuzhiyun 	if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev))
1828*4882a593Smuzhiyun 		next_tick = 5*HZ;
1829*4882a593Smuzhiyun 
1830*4882a593Smuzhiyun 	if (vp->medialock)
1831*4882a593Smuzhiyun 		goto leave_media_alone;
1832*4882a593Smuzhiyun 
1833*4882a593Smuzhiyun 	if (!ok) {
1834*4882a593Smuzhiyun 		unsigned int config;
1835*4882a593Smuzhiyun 
1836*4882a593Smuzhiyun 		spin_lock_irq(&vp->lock);
1837*4882a593Smuzhiyun 
1838*4882a593Smuzhiyun 		do {
1839*4882a593Smuzhiyun 			dev->if_port = media_tbl[dev->if_port].next;
1840*4882a593Smuzhiyun 		} while ( ! (vp->available_media & media_tbl[dev->if_port].mask));
1841*4882a593Smuzhiyun 		if (dev->if_port == XCVR_Default) { /* Go back to default. */
1842*4882a593Smuzhiyun 		  dev->if_port = vp->default_media;
1843*4882a593Smuzhiyun 		  if (vortex_debug > 1)
1844*4882a593Smuzhiyun 			pr_debug("%s: Media selection failing, using default %s port.\n",
1845*4882a593Smuzhiyun 				   dev->name, media_tbl[dev->if_port].name);
1846*4882a593Smuzhiyun 		} else {
1847*4882a593Smuzhiyun 			if (vortex_debug > 1)
1848*4882a593Smuzhiyun 				pr_debug("%s: Media selection failed, now trying %s port.\n",
1849*4882a593Smuzhiyun 					   dev->name, media_tbl[dev->if_port].name);
1850*4882a593Smuzhiyun 			next_tick = media_tbl[dev->if_port].wait;
1851*4882a593Smuzhiyun 		}
1852*4882a593Smuzhiyun 		window_write16(vp,
1853*4882a593Smuzhiyun 			       (media_status & ~(Media_10TP|Media_SQE)) |
1854*4882a593Smuzhiyun 			       media_tbl[dev->if_port].media_bits,
1855*4882a593Smuzhiyun 			       4, Wn4_Media);
1856*4882a593Smuzhiyun 
1857*4882a593Smuzhiyun 		config = window_read32(vp, 3, Wn3_Config);
1858*4882a593Smuzhiyun 		config = BFINS(config, dev->if_port, 20, 4);
1859*4882a593Smuzhiyun 		window_write32(vp, config, 3, Wn3_Config);
1860*4882a593Smuzhiyun 
1861*4882a593Smuzhiyun 		iowrite16(dev->if_port == XCVR_10base2 ? StartCoax : StopCoax,
1862*4882a593Smuzhiyun 			 ioaddr + EL3_CMD);
1863*4882a593Smuzhiyun 		if (vortex_debug > 1)
1864*4882a593Smuzhiyun 			pr_debug("wrote 0x%08x to Wn3_Config\n", config);
1865*4882a593Smuzhiyun 		/* AKPM: FIXME: Should reset Rx & Tx here.  P60 of 3c90xc.pdf */
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 		spin_unlock_irq(&vp->lock);
1868*4882a593Smuzhiyun 	}
1869*4882a593Smuzhiyun 
1870*4882a593Smuzhiyun leave_media_alone:
1871*4882a593Smuzhiyun 	if (vortex_debug > 2)
1872*4882a593Smuzhiyun 	  pr_debug("%s: Media selection timer finished, %s.\n",
1873*4882a593Smuzhiyun 			 dev->name, media_tbl[dev->if_port].name);
1874*4882a593Smuzhiyun 
1875*4882a593Smuzhiyun 	mod_timer(&vp->timer, RUN_AT(next_tick));
1876*4882a593Smuzhiyun 	if (vp->deferred)
1877*4882a593Smuzhiyun 		iowrite16(FakeIntr, ioaddr + EL3_CMD);
1878*4882a593Smuzhiyun }
1879*4882a593Smuzhiyun 
vortex_tx_timeout(struct net_device * dev,unsigned int txqueue)1880*4882a593Smuzhiyun static void vortex_tx_timeout(struct net_device *dev, unsigned int txqueue)
1881*4882a593Smuzhiyun {
1882*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1883*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
1884*4882a593Smuzhiyun 
1885*4882a593Smuzhiyun 	pr_err("%s: transmit timed out, tx_status %2.2x status %4.4x.\n",
1886*4882a593Smuzhiyun 		   dev->name, ioread8(ioaddr + TxStatus),
1887*4882a593Smuzhiyun 		   ioread16(ioaddr + EL3_STATUS));
1888*4882a593Smuzhiyun 	pr_err("  diagnostics: net %04x media %04x dma %08x fifo %04x\n",
1889*4882a593Smuzhiyun 			window_read16(vp, 4, Wn4_NetDiag),
1890*4882a593Smuzhiyun 			window_read16(vp, 4, Wn4_Media),
1891*4882a593Smuzhiyun 			ioread32(ioaddr + PktStatus),
1892*4882a593Smuzhiyun 			window_read16(vp, 4, Wn4_FIFODiag));
1893*4882a593Smuzhiyun 	/* Slight code bloat to be user friendly. */
1894*4882a593Smuzhiyun 	if ((ioread8(ioaddr + TxStatus) & 0x88) == 0x88)
1895*4882a593Smuzhiyun 		pr_err("%s: Transmitter encountered 16 collisions --"
1896*4882a593Smuzhiyun 			   " network cable problem?\n", dev->name);
1897*4882a593Smuzhiyun 	if (ioread16(ioaddr + EL3_STATUS) & IntLatch) {
1898*4882a593Smuzhiyun 		pr_err("%s: Interrupt posted but not delivered --"
1899*4882a593Smuzhiyun 			   " IRQ blocked by another device?\n", dev->name);
1900*4882a593Smuzhiyun 		/* Bad idea here.. but we might as well handle a few events. */
1901*4882a593Smuzhiyun 		vortex_boomerang_interrupt(dev->irq, dev);
1902*4882a593Smuzhiyun 	}
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	if (vortex_debug > 0)
1905*4882a593Smuzhiyun 		dump_tx_ring(dev);
1906*4882a593Smuzhiyun 
1907*4882a593Smuzhiyun 	issue_and_wait(dev, TxReset);
1908*4882a593Smuzhiyun 
1909*4882a593Smuzhiyun 	dev->stats.tx_errors++;
1910*4882a593Smuzhiyun 	if (vp->full_bus_master_tx) {
1911*4882a593Smuzhiyun 		pr_debug("%s: Resetting the Tx ring pointer.\n", dev->name);
1912*4882a593Smuzhiyun 		if (vp->cur_tx - vp->dirty_tx > 0  &&  ioread32(ioaddr + DownListPtr) == 0)
1913*4882a593Smuzhiyun 			iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc),
1914*4882a593Smuzhiyun 				 ioaddr + DownListPtr);
1915*4882a593Smuzhiyun 		if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) {
1916*4882a593Smuzhiyun 			netif_wake_queue (dev);
1917*4882a593Smuzhiyun 			netdev_reset_queue (dev);
1918*4882a593Smuzhiyun 		}
1919*4882a593Smuzhiyun 		if (vp->drv_flags & IS_BOOMERANG)
1920*4882a593Smuzhiyun 			iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold);
1921*4882a593Smuzhiyun 		iowrite16(DownUnstall, ioaddr + EL3_CMD);
1922*4882a593Smuzhiyun 	} else {
1923*4882a593Smuzhiyun 		dev->stats.tx_dropped++;
1924*4882a593Smuzhiyun 		netif_wake_queue(dev);
1925*4882a593Smuzhiyun 		netdev_reset_queue(dev);
1926*4882a593Smuzhiyun 	}
1927*4882a593Smuzhiyun 	/* Issue Tx Enable */
1928*4882a593Smuzhiyun 	iowrite16(TxEnable, ioaddr + EL3_CMD);
1929*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
1930*4882a593Smuzhiyun }
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun /*
1933*4882a593Smuzhiyun  * Handle uncommon interrupt sources.  This is a separate routine to minimize
1934*4882a593Smuzhiyun  * the cache impact.
1935*4882a593Smuzhiyun  */
1936*4882a593Smuzhiyun static void
vortex_error(struct net_device * dev,int status)1937*4882a593Smuzhiyun vortex_error(struct net_device *dev, int status)
1938*4882a593Smuzhiyun {
1939*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
1940*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
1941*4882a593Smuzhiyun 	int do_tx_reset = 0, reset_mask = 0;
1942*4882a593Smuzhiyun 	unsigned char tx_status = 0;
1943*4882a593Smuzhiyun 
1944*4882a593Smuzhiyun 	if (vortex_debug > 2) {
1945*4882a593Smuzhiyun 		pr_err("%s: vortex_error(), status=0x%x\n", dev->name, status);
1946*4882a593Smuzhiyun 	}
1947*4882a593Smuzhiyun 
1948*4882a593Smuzhiyun 	if (status & TxComplete) {			/* Really "TxError" for us. */
1949*4882a593Smuzhiyun 		tx_status = ioread8(ioaddr + TxStatus);
1950*4882a593Smuzhiyun 		/* Presumably a tx-timeout. We must merely re-enable. */
1951*4882a593Smuzhiyun 		if (vortex_debug > 2 ||
1952*4882a593Smuzhiyun 		    (tx_status != 0x88 && vortex_debug > 0)) {
1953*4882a593Smuzhiyun 			pr_err("%s: Transmit error, Tx status register %2.2x.\n",
1954*4882a593Smuzhiyun 				   dev->name, tx_status);
1955*4882a593Smuzhiyun 			if (tx_status == 0x82) {
1956*4882a593Smuzhiyun 				pr_err("Probably a duplex mismatch.  See "
1957*4882a593Smuzhiyun 						"Documentation/networking/device_drivers/ethernet/3com/vortex.rst\n");
1958*4882a593Smuzhiyun 			}
1959*4882a593Smuzhiyun 			dump_tx_ring(dev);
1960*4882a593Smuzhiyun 		}
1961*4882a593Smuzhiyun 		if (tx_status & 0x14)  dev->stats.tx_fifo_errors++;
1962*4882a593Smuzhiyun 		if (tx_status & 0x38)  dev->stats.tx_aborted_errors++;
1963*4882a593Smuzhiyun 		if (tx_status & 0x08)  vp->xstats.tx_max_collisions++;
1964*4882a593Smuzhiyun 		iowrite8(0, ioaddr + TxStatus);
1965*4882a593Smuzhiyun 		if (tx_status & 0x30) {			/* txJabber or txUnderrun */
1966*4882a593Smuzhiyun 			do_tx_reset = 1;
1967*4882a593Smuzhiyun 		} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET))  {	/* maxCollisions */
1968*4882a593Smuzhiyun 			do_tx_reset = 1;
1969*4882a593Smuzhiyun 			reset_mask = 0x0108;		/* Reset interface logic, but not download logic */
1970*4882a593Smuzhiyun 		} else {				/* Merely re-enable the transmitter. */
1971*4882a593Smuzhiyun 			iowrite16(TxEnable, ioaddr + EL3_CMD);
1972*4882a593Smuzhiyun 		}
1973*4882a593Smuzhiyun 	}
1974*4882a593Smuzhiyun 
1975*4882a593Smuzhiyun 	if (status & RxEarly)				/* Rx early is unused. */
1976*4882a593Smuzhiyun 		iowrite16(AckIntr | RxEarly, ioaddr + EL3_CMD);
1977*4882a593Smuzhiyun 
1978*4882a593Smuzhiyun 	if (status & StatsFull) {			/* Empty statistics. */
1979*4882a593Smuzhiyun 		static int DoneDidThat;
1980*4882a593Smuzhiyun 		if (vortex_debug > 4)
1981*4882a593Smuzhiyun 			pr_debug("%s: Updating stats.\n", dev->name);
1982*4882a593Smuzhiyun 		update_stats(ioaddr, dev);
1983*4882a593Smuzhiyun 		/* HACK: Disable statistics as an interrupt source. */
1984*4882a593Smuzhiyun 		/* This occurs when we have the wrong media type! */
1985*4882a593Smuzhiyun 		if (DoneDidThat == 0  &&
1986*4882a593Smuzhiyun 			ioread16(ioaddr + EL3_STATUS) & StatsFull) {
1987*4882a593Smuzhiyun 			pr_warn("%s: Updating statistics failed, disabling stats as an interrupt source\n",
1988*4882a593Smuzhiyun 				dev->name);
1989*4882a593Smuzhiyun 			iowrite16(SetIntrEnb |
1990*4882a593Smuzhiyun 				  (window_read16(vp, 5, 10) & ~StatsFull),
1991*4882a593Smuzhiyun 				  ioaddr + EL3_CMD);
1992*4882a593Smuzhiyun 			vp->intr_enable &= ~StatsFull;
1993*4882a593Smuzhiyun 			DoneDidThat++;
1994*4882a593Smuzhiyun 		}
1995*4882a593Smuzhiyun 	}
1996*4882a593Smuzhiyun 	if (status & IntReq) {		/* Restore all interrupt sources.  */
1997*4882a593Smuzhiyun 		iowrite16(vp->status_enable, ioaddr + EL3_CMD);
1998*4882a593Smuzhiyun 		iowrite16(vp->intr_enable, ioaddr + EL3_CMD);
1999*4882a593Smuzhiyun 	}
2000*4882a593Smuzhiyun 	if (status & HostError) {
2001*4882a593Smuzhiyun 		u16 fifo_diag;
2002*4882a593Smuzhiyun 		fifo_diag = window_read16(vp, 4, Wn4_FIFODiag);
2003*4882a593Smuzhiyun 		pr_err("%s: Host error, FIFO diagnostic register %4.4x.\n",
2004*4882a593Smuzhiyun 			   dev->name, fifo_diag);
2005*4882a593Smuzhiyun 		/* Adapter failure requires Tx/Rx reset and reinit. */
2006*4882a593Smuzhiyun 		if (vp->full_bus_master_tx) {
2007*4882a593Smuzhiyun 			int bus_status = ioread32(ioaddr + PktStatus);
2008*4882a593Smuzhiyun 			/* 0x80000000 PCI master abort. */
2009*4882a593Smuzhiyun 			/* 0x40000000 PCI target abort. */
2010*4882a593Smuzhiyun 			if (vortex_debug)
2011*4882a593Smuzhiyun 				pr_err("%s: PCI bus error, bus status %8.8x\n", dev->name, bus_status);
2012*4882a593Smuzhiyun 
2013*4882a593Smuzhiyun 			/* In this case, blow the card away */
2014*4882a593Smuzhiyun 			/* Must not enter D3 or we can't legally issue the reset! */
2015*4882a593Smuzhiyun 			vortex_down(dev, 0);
2016*4882a593Smuzhiyun 			issue_and_wait(dev, TotalReset | 0xff);
2017*4882a593Smuzhiyun 			vortex_up(dev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
2018*4882a593Smuzhiyun 		} else if (fifo_diag & 0x0400)
2019*4882a593Smuzhiyun 			do_tx_reset = 1;
2020*4882a593Smuzhiyun 		if (fifo_diag & 0x3000) {
2021*4882a593Smuzhiyun 			/* Reset Rx fifo and upload logic */
2022*4882a593Smuzhiyun 			issue_and_wait(dev, RxReset|0x07);
2023*4882a593Smuzhiyun 			/* Set the Rx filter to the current state. */
2024*4882a593Smuzhiyun 			set_rx_mode(dev);
2025*4882a593Smuzhiyun 			/* enable 802.1q VLAN tagged frames */
2026*4882a593Smuzhiyun 			set_8021q_mode(dev, 1);
2027*4882a593Smuzhiyun 			iowrite16(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
2028*4882a593Smuzhiyun 			iowrite16(AckIntr | HostError, ioaddr + EL3_CMD);
2029*4882a593Smuzhiyun 		}
2030*4882a593Smuzhiyun 	}
2031*4882a593Smuzhiyun 
2032*4882a593Smuzhiyun 	if (do_tx_reset) {
2033*4882a593Smuzhiyun 		issue_and_wait(dev, TxReset|reset_mask);
2034*4882a593Smuzhiyun 		iowrite16(TxEnable, ioaddr + EL3_CMD);
2035*4882a593Smuzhiyun 		if (!vp->full_bus_master_tx)
2036*4882a593Smuzhiyun 			netif_wake_queue(dev);
2037*4882a593Smuzhiyun 	}
2038*4882a593Smuzhiyun }
2039*4882a593Smuzhiyun 
2040*4882a593Smuzhiyun static netdev_tx_t
vortex_start_xmit(struct sk_buff * skb,struct net_device * dev)2041*4882a593Smuzhiyun vortex_start_xmit(struct sk_buff *skb, struct net_device *dev)
2042*4882a593Smuzhiyun {
2043*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2044*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2045*4882a593Smuzhiyun 	int skblen = skb->len;
2046*4882a593Smuzhiyun 
2047*4882a593Smuzhiyun 	/* Put out the doubleword header... */
2048*4882a593Smuzhiyun 	iowrite32(skb->len, ioaddr + TX_FIFO);
2049*4882a593Smuzhiyun 	if (vp->bus_master) {
2050*4882a593Smuzhiyun 		/* Set the bus-master controller to transfer the packet. */
2051*4882a593Smuzhiyun 		int len = (skb->len + 3) & ~3;
2052*4882a593Smuzhiyun 		vp->tx_skb_dma = dma_map_single(vp->gendev, skb->data, len,
2053*4882a593Smuzhiyun 						DMA_TO_DEVICE);
2054*4882a593Smuzhiyun 		if (dma_mapping_error(vp->gendev, vp->tx_skb_dma)) {
2055*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
2056*4882a593Smuzhiyun 			dev->stats.tx_dropped++;
2057*4882a593Smuzhiyun 			return NETDEV_TX_OK;
2058*4882a593Smuzhiyun 		}
2059*4882a593Smuzhiyun 
2060*4882a593Smuzhiyun 		spin_lock_irq(&vp->window_lock);
2061*4882a593Smuzhiyun 		window_set(vp, 7);
2062*4882a593Smuzhiyun 		iowrite32(vp->tx_skb_dma, ioaddr + Wn7_MasterAddr);
2063*4882a593Smuzhiyun 		iowrite16(len, ioaddr + Wn7_MasterLen);
2064*4882a593Smuzhiyun 		spin_unlock_irq(&vp->window_lock);
2065*4882a593Smuzhiyun 		vp->tx_skb = skb;
2066*4882a593Smuzhiyun 		skb_tx_timestamp(skb);
2067*4882a593Smuzhiyun 		iowrite16(StartDMADown, ioaddr + EL3_CMD);
2068*4882a593Smuzhiyun 		/* netif_wake_queue() will be called at the DMADone interrupt. */
2069*4882a593Smuzhiyun 	} else {
2070*4882a593Smuzhiyun 		/* ... and the packet rounded to a doubleword. */
2071*4882a593Smuzhiyun 		skb_tx_timestamp(skb);
2072*4882a593Smuzhiyun 		iowrite32_rep(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
2073*4882a593Smuzhiyun 		dev_consume_skb_any (skb);
2074*4882a593Smuzhiyun 		if (ioread16(ioaddr + TxFree) > 1536) {
2075*4882a593Smuzhiyun 			netif_start_queue (dev);	/* AKPM: redundant? */
2076*4882a593Smuzhiyun 		} else {
2077*4882a593Smuzhiyun 			/* Interrupt us when the FIFO has room for max-sized packet. */
2078*4882a593Smuzhiyun 			netif_stop_queue(dev);
2079*4882a593Smuzhiyun 			iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2080*4882a593Smuzhiyun 		}
2081*4882a593Smuzhiyun 	}
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	netdev_sent_queue(dev, skblen);
2084*4882a593Smuzhiyun 
2085*4882a593Smuzhiyun 	/* Clear the Tx status stack. */
2086*4882a593Smuzhiyun 	{
2087*4882a593Smuzhiyun 		int tx_status;
2088*4882a593Smuzhiyun 		int i = 32;
2089*4882a593Smuzhiyun 
2090*4882a593Smuzhiyun 		while (--i > 0	&&	(tx_status = ioread8(ioaddr + TxStatus)) > 0) {
2091*4882a593Smuzhiyun 			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
2092*4882a593Smuzhiyun 				if (vortex_debug > 2)
2093*4882a593Smuzhiyun 				  pr_debug("%s: Tx error, status %2.2x.\n",
2094*4882a593Smuzhiyun 						 dev->name, tx_status);
2095*4882a593Smuzhiyun 				if (tx_status & 0x04) dev->stats.tx_fifo_errors++;
2096*4882a593Smuzhiyun 				if (tx_status & 0x38) dev->stats.tx_aborted_errors++;
2097*4882a593Smuzhiyun 				if (tx_status & 0x30) {
2098*4882a593Smuzhiyun 					issue_and_wait(dev, TxReset);
2099*4882a593Smuzhiyun 				}
2100*4882a593Smuzhiyun 				iowrite16(TxEnable, ioaddr + EL3_CMD);
2101*4882a593Smuzhiyun 			}
2102*4882a593Smuzhiyun 			iowrite8(0x00, ioaddr + TxStatus); /* Pop the status stack. */
2103*4882a593Smuzhiyun 		}
2104*4882a593Smuzhiyun 	}
2105*4882a593Smuzhiyun 	return NETDEV_TX_OK;
2106*4882a593Smuzhiyun }
2107*4882a593Smuzhiyun 
2108*4882a593Smuzhiyun static netdev_tx_t
boomerang_start_xmit(struct sk_buff * skb,struct net_device * dev)2109*4882a593Smuzhiyun boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
2110*4882a593Smuzhiyun {
2111*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2112*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2113*4882a593Smuzhiyun 	/* Calculate the next Tx descriptor entry. */
2114*4882a593Smuzhiyun 	int entry = vp->cur_tx % TX_RING_SIZE;
2115*4882a593Smuzhiyun 	int skblen = skb->len;
2116*4882a593Smuzhiyun 	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
2117*4882a593Smuzhiyun 	unsigned long flags;
2118*4882a593Smuzhiyun 	dma_addr_t dma_addr;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	if (vortex_debug > 6) {
2121*4882a593Smuzhiyun 		pr_debug("boomerang_start_xmit()\n");
2122*4882a593Smuzhiyun 		pr_debug("%s: Trying to send a packet, Tx index %d.\n",
2123*4882a593Smuzhiyun 			   dev->name, vp->cur_tx);
2124*4882a593Smuzhiyun 	}
2125*4882a593Smuzhiyun 
2126*4882a593Smuzhiyun 	/*
2127*4882a593Smuzhiyun 	 * We can't allow a recursion from our interrupt handler back into the
2128*4882a593Smuzhiyun 	 * tx routine, as they take the same spin lock, and that causes
2129*4882a593Smuzhiyun 	 * deadlock.  Just return NETDEV_TX_BUSY and let the stack try again in
2130*4882a593Smuzhiyun 	 * a bit
2131*4882a593Smuzhiyun 	 */
2132*4882a593Smuzhiyun 	if (vp->handling_irq)
2133*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
2134*4882a593Smuzhiyun 
2135*4882a593Smuzhiyun 	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
2136*4882a593Smuzhiyun 		if (vortex_debug > 0)
2137*4882a593Smuzhiyun 			pr_warn("%s: BUG! Tx Ring full, refusing to send buffer\n",
2138*4882a593Smuzhiyun 				dev->name);
2139*4882a593Smuzhiyun 		netif_stop_queue(dev);
2140*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
2141*4882a593Smuzhiyun 	}
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	vp->tx_skbuff[entry] = skb;
2144*4882a593Smuzhiyun 
2145*4882a593Smuzhiyun 	vp->tx_ring[entry].next = 0;
2146*4882a593Smuzhiyun #if DO_ZEROCOPY
2147*4882a593Smuzhiyun 	if (skb->ip_summed != CHECKSUM_PARTIAL)
2148*4882a593Smuzhiyun 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2149*4882a593Smuzhiyun 	else
2150*4882a593Smuzhiyun 			vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum | AddUDPChksum);
2151*4882a593Smuzhiyun 
2152*4882a593Smuzhiyun 	if (!skb_shinfo(skb)->nr_frags) {
2153*4882a593Smuzhiyun 		dma_addr = dma_map_single(vp->gendev, skb->data, skb->len,
2154*4882a593Smuzhiyun 					  DMA_TO_DEVICE);
2155*4882a593Smuzhiyun 		if (dma_mapping_error(vp->gendev, dma_addr))
2156*4882a593Smuzhiyun 			goto out_dma_err;
2157*4882a593Smuzhiyun 
2158*4882a593Smuzhiyun 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2159*4882a593Smuzhiyun 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
2160*4882a593Smuzhiyun 	} else {
2161*4882a593Smuzhiyun 		int i;
2162*4882a593Smuzhiyun 
2163*4882a593Smuzhiyun 		dma_addr = dma_map_single(vp->gendev, skb->data,
2164*4882a593Smuzhiyun 					  skb_headlen(skb), DMA_TO_DEVICE);
2165*4882a593Smuzhiyun 		if (dma_mapping_error(vp->gendev, dma_addr))
2166*4882a593Smuzhiyun 			goto out_dma_err;
2167*4882a593Smuzhiyun 
2168*4882a593Smuzhiyun 		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(dma_addr);
2169*4882a593Smuzhiyun 		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
2170*4882a593Smuzhiyun 
2171*4882a593Smuzhiyun 		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2172*4882a593Smuzhiyun 			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2173*4882a593Smuzhiyun 
2174*4882a593Smuzhiyun 			dma_addr = skb_frag_dma_map(vp->gendev, frag,
2175*4882a593Smuzhiyun 						    0,
2176*4882a593Smuzhiyun 						    skb_frag_size(frag),
2177*4882a593Smuzhiyun 						    DMA_TO_DEVICE);
2178*4882a593Smuzhiyun 			if (dma_mapping_error(vp->gendev, dma_addr)) {
2179*4882a593Smuzhiyun 				for(i = i-1; i >= 0; i--)
2180*4882a593Smuzhiyun 					dma_unmap_page(vp->gendev,
2181*4882a593Smuzhiyun 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].addr),
2182*4882a593Smuzhiyun 						       le32_to_cpu(vp->tx_ring[entry].frag[i+1].length),
2183*4882a593Smuzhiyun 						       DMA_TO_DEVICE);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 				dma_unmap_single(vp->gendev,
2186*4882a593Smuzhiyun 						 le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2187*4882a593Smuzhiyun 						 le32_to_cpu(vp->tx_ring[entry].frag[0].length),
2188*4882a593Smuzhiyun 						 DMA_TO_DEVICE);
2189*4882a593Smuzhiyun 
2190*4882a593Smuzhiyun 				goto out_dma_err;
2191*4882a593Smuzhiyun 			}
2192*4882a593Smuzhiyun 
2193*4882a593Smuzhiyun 			vp->tx_ring[entry].frag[i+1].addr =
2194*4882a593Smuzhiyun 						cpu_to_le32(dma_addr);
2195*4882a593Smuzhiyun 
2196*4882a593Smuzhiyun 			if (i == skb_shinfo(skb)->nr_frags-1)
2197*4882a593Smuzhiyun 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag)|LAST_FRAG);
2198*4882a593Smuzhiyun 			else
2199*4882a593Smuzhiyun 					vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(skb_frag_size(frag));
2200*4882a593Smuzhiyun 		}
2201*4882a593Smuzhiyun 	}
2202*4882a593Smuzhiyun #else
2203*4882a593Smuzhiyun 	dma_addr = dma_map_single(vp->gendev, skb->data, skb->len, DMA_TO_DEVICE);
2204*4882a593Smuzhiyun 	if (dma_mapping_error(vp->gendev, dma_addr))
2205*4882a593Smuzhiyun 		goto out_dma_err;
2206*4882a593Smuzhiyun 	vp->tx_ring[entry].addr = cpu_to_le32(dma_addr);
2207*4882a593Smuzhiyun 	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
2208*4882a593Smuzhiyun 	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
2209*4882a593Smuzhiyun #endif
2210*4882a593Smuzhiyun 
2211*4882a593Smuzhiyun 	spin_lock_irqsave(&vp->lock, flags);
2212*4882a593Smuzhiyun 	/* Wait for the stall to complete. */
2213*4882a593Smuzhiyun 	issue_and_wait(dev, DownStall);
2214*4882a593Smuzhiyun 	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
2215*4882a593Smuzhiyun 	if (ioread32(ioaddr + DownListPtr) == 0) {
2216*4882a593Smuzhiyun 		iowrite32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
2217*4882a593Smuzhiyun 		vp->queued_packet++;
2218*4882a593Smuzhiyun 	}
2219*4882a593Smuzhiyun 
2220*4882a593Smuzhiyun 	vp->cur_tx++;
2221*4882a593Smuzhiyun 	netdev_sent_queue(dev, skblen);
2222*4882a593Smuzhiyun 
2223*4882a593Smuzhiyun 	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
2224*4882a593Smuzhiyun 		netif_stop_queue (dev);
2225*4882a593Smuzhiyun 	} else {					/* Clear previous interrupt enable. */
2226*4882a593Smuzhiyun #if defined(tx_interrupt_mitigation)
2227*4882a593Smuzhiyun 		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
2228*4882a593Smuzhiyun 		 * were selected, this would corrupt DN_COMPLETE. No?
2229*4882a593Smuzhiyun 		 */
2230*4882a593Smuzhiyun 		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
2231*4882a593Smuzhiyun #endif
2232*4882a593Smuzhiyun 	}
2233*4882a593Smuzhiyun 	skb_tx_timestamp(skb);
2234*4882a593Smuzhiyun 	iowrite16(DownUnstall, ioaddr + EL3_CMD);
2235*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vp->lock, flags);
2236*4882a593Smuzhiyun out:
2237*4882a593Smuzhiyun 	return NETDEV_TX_OK;
2238*4882a593Smuzhiyun out_dma_err:
2239*4882a593Smuzhiyun 	dev_err(vp->gendev, "Error mapping dma buffer\n");
2240*4882a593Smuzhiyun 	goto out;
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun /* The interrupt handler does all of the Rx thread work and cleans up
2244*4882a593Smuzhiyun    after the Tx thread. */
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun /*
2247*4882a593Smuzhiyun  * This is the ISR for the vortex series chips.
2248*4882a593Smuzhiyun  * full_bus_master_tx == 0 && full_bus_master_rx == 0
2249*4882a593Smuzhiyun  */
2250*4882a593Smuzhiyun 
2251*4882a593Smuzhiyun static irqreturn_t
_vortex_interrupt(int irq,struct net_device * dev)2252*4882a593Smuzhiyun _vortex_interrupt(int irq, struct net_device *dev)
2253*4882a593Smuzhiyun {
2254*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2255*4882a593Smuzhiyun 	void __iomem *ioaddr;
2256*4882a593Smuzhiyun 	int status;
2257*4882a593Smuzhiyun 	int work_done = max_interrupt_work;
2258*4882a593Smuzhiyun 	int handled = 0;
2259*4882a593Smuzhiyun 	unsigned int bytes_compl = 0, pkts_compl = 0;
2260*4882a593Smuzhiyun 
2261*4882a593Smuzhiyun 	ioaddr = vp->ioaddr;
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	status = ioread16(ioaddr + EL3_STATUS);
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 	if (vortex_debug > 6)
2266*4882a593Smuzhiyun 		pr_debug("vortex_interrupt(). status=0x%4x\n", status);
2267*4882a593Smuzhiyun 
2268*4882a593Smuzhiyun 	if ((status & IntLatch) == 0)
2269*4882a593Smuzhiyun 		goto handler_exit;		/* No interrupt: shared IRQs cause this */
2270*4882a593Smuzhiyun 	handled = 1;
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun 	if (status & IntReq) {
2273*4882a593Smuzhiyun 		status |= vp->deferred;
2274*4882a593Smuzhiyun 		vp->deferred = 0;
2275*4882a593Smuzhiyun 	}
2276*4882a593Smuzhiyun 
2277*4882a593Smuzhiyun 	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
2278*4882a593Smuzhiyun 		goto handler_exit;
2279*4882a593Smuzhiyun 
2280*4882a593Smuzhiyun 	if (vortex_debug > 4)
2281*4882a593Smuzhiyun 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2282*4882a593Smuzhiyun 			   dev->name, status, ioread8(ioaddr + Timer));
2283*4882a593Smuzhiyun 
2284*4882a593Smuzhiyun 	spin_lock(&vp->window_lock);
2285*4882a593Smuzhiyun 	window_set(vp, 7);
2286*4882a593Smuzhiyun 
2287*4882a593Smuzhiyun 	do {
2288*4882a593Smuzhiyun 		if (vortex_debug > 5)
2289*4882a593Smuzhiyun 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
2290*4882a593Smuzhiyun 					   dev->name, status);
2291*4882a593Smuzhiyun 		if (status & RxComplete)
2292*4882a593Smuzhiyun 			vortex_rx(dev);
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun 		if (status & TxAvailable) {
2295*4882a593Smuzhiyun 			if (vortex_debug > 5)
2296*4882a593Smuzhiyun 				pr_debug("	TX room bit was handled.\n");
2297*4882a593Smuzhiyun 			/* There's room in the FIFO for a full-sized packet. */
2298*4882a593Smuzhiyun 			iowrite16(AckIntr | TxAvailable, ioaddr + EL3_CMD);
2299*4882a593Smuzhiyun 			netif_wake_queue (dev);
2300*4882a593Smuzhiyun 		}
2301*4882a593Smuzhiyun 
2302*4882a593Smuzhiyun 		if (status & DMADone) {
2303*4882a593Smuzhiyun 			if (ioread16(ioaddr + Wn7_MasterStatus) & 0x1000) {
2304*4882a593Smuzhiyun 				iowrite16(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
2305*4882a593Smuzhiyun 				dma_unmap_single(vp->gendev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, DMA_TO_DEVICE);
2306*4882a593Smuzhiyun 				pkts_compl++;
2307*4882a593Smuzhiyun 				bytes_compl += vp->tx_skb->len;
2308*4882a593Smuzhiyun 				dev_consume_skb_irq(vp->tx_skb); /* Release the transferred buffer */
2309*4882a593Smuzhiyun 				if (ioread16(ioaddr + TxFree) > 1536) {
2310*4882a593Smuzhiyun 					/*
2311*4882a593Smuzhiyun 					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
2312*4882a593Smuzhiyun 					 * insufficient FIFO room, the TxAvailable test will succeed and call
2313*4882a593Smuzhiyun 					 * netif_wake_queue()
2314*4882a593Smuzhiyun 					 */
2315*4882a593Smuzhiyun 					netif_wake_queue(dev);
2316*4882a593Smuzhiyun 				} else { /* Interrupt when FIFO has room for max-sized packet. */
2317*4882a593Smuzhiyun 					iowrite16(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
2318*4882a593Smuzhiyun 					netif_stop_queue(dev);
2319*4882a593Smuzhiyun 				}
2320*4882a593Smuzhiyun 			}
2321*4882a593Smuzhiyun 		}
2322*4882a593Smuzhiyun 		/* Check for all uncommon interrupts at once. */
2323*4882a593Smuzhiyun 		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
2324*4882a593Smuzhiyun 			if (status == 0xffff)
2325*4882a593Smuzhiyun 				break;
2326*4882a593Smuzhiyun 			if (status & RxEarly)
2327*4882a593Smuzhiyun 				vortex_rx(dev);
2328*4882a593Smuzhiyun 			spin_unlock(&vp->window_lock);
2329*4882a593Smuzhiyun 			vortex_error(dev, status);
2330*4882a593Smuzhiyun 			spin_lock(&vp->window_lock);
2331*4882a593Smuzhiyun 			window_set(vp, 7);
2332*4882a593Smuzhiyun 		}
2333*4882a593Smuzhiyun 
2334*4882a593Smuzhiyun 		if (--work_done < 0) {
2335*4882a593Smuzhiyun 			pr_warn("%s: Too much work in interrupt, status %4.4x\n",
2336*4882a593Smuzhiyun 				dev->name, status);
2337*4882a593Smuzhiyun 			/* Disable all pending interrupts. */
2338*4882a593Smuzhiyun 			do {
2339*4882a593Smuzhiyun 				vp->deferred |= status;
2340*4882a593Smuzhiyun 				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2341*4882a593Smuzhiyun 					 ioaddr + EL3_CMD);
2342*4882a593Smuzhiyun 				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2343*4882a593Smuzhiyun 			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2344*4882a593Smuzhiyun 			/* The timer will reenable interrupts. */
2345*4882a593Smuzhiyun 			mod_timer(&vp->timer, jiffies + 1*HZ);
2346*4882a593Smuzhiyun 			break;
2347*4882a593Smuzhiyun 		}
2348*4882a593Smuzhiyun 		/* Acknowledge the IRQ. */
2349*4882a593Smuzhiyun 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2350*4882a593Smuzhiyun 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
2351*4882a593Smuzhiyun 
2352*4882a593Smuzhiyun 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2353*4882a593Smuzhiyun 	spin_unlock(&vp->window_lock);
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun 	if (vortex_debug > 4)
2356*4882a593Smuzhiyun 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
2357*4882a593Smuzhiyun 			   dev->name, status);
2358*4882a593Smuzhiyun handler_exit:
2359*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
2360*4882a593Smuzhiyun }
2361*4882a593Smuzhiyun 
2362*4882a593Smuzhiyun /*
2363*4882a593Smuzhiyun  * This is the ISR for the boomerang series chips.
2364*4882a593Smuzhiyun  * full_bus_master_tx == 1 && full_bus_master_rx == 1
2365*4882a593Smuzhiyun  */
2366*4882a593Smuzhiyun 
2367*4882a593Smuzhiyun static irqreturn_t
_boomerang_interrupt(int irq,struct net_device * dev)2368*4882a593Smuzhiyun _boomerang_interrupt(int irq, struct net_device *dev)
2369*4882a593Smuzhiyun {
2370*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2371*4882a593Smuzhiyun 	void __iomem *ioaddr;
2372*4882a593Smuzhiyun 	int status;
2373*4882a593Smuzhiyun 	int work_done = max_interrupt_work;
2374*4882a593Smuzhiyun 	int handled = 0;
2375*4882a593Smuzhiyun 	unsigned int bytes_compl = 0, pkts_compl = 0;
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	ioaddr = vp->ioaddr;
2378*4882a593Smuzhiyun 
2379*4882a593Smuzhiyun 	vp->handling_irq = 1;
2380*4882a593Smuzhiyun 
2381*4882a593Smuzhiyun 	status = ioread16(ioaddr + EL3_STATUS);
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 	if (vortex_debug > 6)
2384*4882a593Smuzhiyun 		pr_debug("boomerang_interrupt. status=0x%4x\n", status);
2385*4882a593Smuzhiyun 
2386*4882a593Smuzhiyun 	if ((status & IntLatch) == 0)
2387*4882a593Smuzhiyun 		goto handler_exit;		/* No interrupt: shared IRQs can cause this */
2388*4882a593Smuzhiyun 	handled = 1;
2389*4882a593Smuzhiyun 
2390*4882a593Smuzhiyun 	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
2391*4882a593Smuzhiyun 		if (vortex_debug > 1)
2392*4882a593Smuzhiyun 			pr_debug("boomerang_interrupt(1): status = 0xffff\n");
2393*4882a593Smuzhiyun 		goto handler_exit;
2394*4882a593Smuzhiyun 	}
2395*4882a593Smuzhiyun 
2396*4882a593Smuzhiyun 	if (status & IntReq) {
2397*4882a593Smuzhiyun 		status |= vp->deferred;
2398*4882a593Smuzhiyun 		vp->deferred = 0;
2399*4882a593Smuzhiyun 	}
2400*4882a593Smuzhiyun 
2401*4882a593Smuzhiyun 	if (vortex_debug > 4)
2402*4882a593Smuzhiyun 		pr_debug("%s: interrupt, status %4.4x, latency %d ticks.\n",
2403*4882a593Smuzhiyun 			   dev->name, status, ioread8(ioaddr + Timer));
2404*4882a593Smuzhiyun 	do {
2405*4882a593Smuzhiyun 		if (vortex_debug > 5)
2406*4882a593Smuzhiyun 				pr_debug("%s: In interrupt loop, status %4.4x.\n",
2407*4882a593Smuzhiyun 					   dev->name, status);
2408*4882a593Smuzhiyun 		if (status & UpComplete) {
2409*4882a593Smuzhiyun 			iowrite16(AckIntr | UpComplete, ioaddr + EL3_CMD);
2410*4882a593Smuzhiyun 			if (vortex_debug > 5)
2411*4882a593Smuzhiyun 				pr_debug("boomerang_interrupt->boomerang_rx\n");
2412*4882a593Smuzhiyun 			boomerang_rx(dev);
2413*4882a593Smuzhiyun 		}
2414*4882a593Smuzhiyun 
2415*4882a593Smuzhiyun 		if (status & DownComplete) {
2416*4882a593Smuzhiyun 			unsigned int dirty_tx = vp->dirty_tx;
2417*4882a593Smuzhiyun 
2418*4882a593Smuzhiyun 			iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD);
2419*4882a593Smuzhiyun 			while (vp->cur_tx - dirty_tx > 0) {
2420*4882a593Smuzhiyun 				int entry = dirty_tx % TX_RING_SIZE;
2421*4882a593Smuzhiyun #if 1	/* AKPM: the latter is faster, but cyclone-only */
2422*4882a593Smuzhiyun 				if (ioread32(ioaddr + DownListPtr) ==
2423*4882a593Smuzhiyun 					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
2424*4882a593Smuzhiyun 					break;			/* It still hasn't been processed. */
2425*4882a593Smuzhiyun #else
2426*4882a593Smuzhiyun 				if ((vp->tx_ring[entry].status & DN_COMPLETE) == 0)
2427*4882a593Smuzhiyun 					break;			/* It still hasn't been processed. */
2428*4882a593Smuzhiyun #endif
2429*4882a593Smuzhiyun 
2430*4882a593Smuzhiyun 				if (vp->tx_skbuff[entry]) {
2431*4882a593Smuzhiyun 					struct sk_buff *skb = vp->tx_skbuff[entry];
2432*4882a593Smuzhiyun #if DO_ZEROCOPY
2433*4882a593Smuzhiyun 					int i;
2434*4882a593Smuzhiyun 					dma_unmap_single(vp->gendev,
2435*4882a593Smuzhiyun 							le32_to_cpu(vp->tx_ring[entry].frag[0].addr),
2436*4882a593Smuzhiyun 							le32_to_cpu(vp->tx_ring[entry].frag[0].length)&0xFFF,
2437*4882a593Smuzhiyun 							DMA_TO_DEVICE);
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 					for (i=1; i<=skb_shinfo(skb)->nr_frags; i++)
2440*4882a593Smuzhiyun 							dma_unmap_page(vp->gendev,
2441*4882a593Smuzhiyun 											 le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
2442*4882a593Smuzhiyun 											 le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
2443*4882a593Smuzhiyun 											 DMA_TO_DEVICE);
2444*4882a593Smuzhiyun #else
2445*4882a593Smuzhiyun 					dma_unmap_single(vp->gendev,
2446*4882a593Smuzhiyun 						le32_to_cpu(vp->tx_ring[entry].addr), skb->len, DMA_TO_DEVICE);
2447*4882a593Smuzhiyun #endif
2448*4882a593Smuzhiyun 					pkts_compl++;
2449*4882a593Smuzhiyun 					bytes_compl += skb->len;
2450*4882a593Smuzhiyun 					dev_consume_skb_irq(skb);
2451*4882a593Smuzhiyun 					vp->tx_skbuff[entry] = NULL;
2452*4882a593Smuzhiyun 				} else {
2453*4882a593Smuzhiyun 					pr_debug("boomerang_interrupt: no skb!\n");
2454*4882a593Smuzhiyun 				}
2455*4882a593Smuzhiyun 				/* dev->stats.tx_packets++;  Counted below. */
2456*4882a593Smuzhiyun 				dirty_tx++;
2457*4882a593Smuzhiyun 			}
2458*4882a593Smuzhiyun 			vp->dirty_tx = dirty_tx;
2459*4882a593Smuzhiyun 			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
2460*4882a593Smuzhiyun 				if (vortex_debug > 6)
2461*4882a593Smuzhiyun 					pr_debug("boomerang_interrupt: wake queue\n");
2462*4882a593Smuzhiyun 				netif_wake_queue (dev);
2463*4882a593Smuzhiyun 			}
2464*4882a593Smuzhiyun 		}
2465*4882a593Smuzhiyun 
2466*4882a593Smuzhiyun 		/* Check for all uncommon interrupts at once. */
2467*4882a593Smuzhiyun 		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
2468*4882a593Smuzhiyun 			vortex_error(dev, status);
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 		if (--work_done < 0) {
2471*4882a593Smuzhiyun 			pr_warn("%s: Too much work in interrupt, status %4.4x\n",
2472*4882a593Smuzhiyun 				dev->name, status);
2473*4882a593Smuzhiyun 			/* Disable all pending interrupts. */
2474*4882a593Smuzhiyun 			do {
2475*4882a593Smuzhiyun 				vp->deferred |= status;
2476*4882a593Smuzhiyun 				iowrite16(SetStatusEnb | (~vp->deferred & vp->status_enable),
2477*4882a593Smuzhiyun 					 ioaddr + EL3_CMD);
2478*4882a593Smuzhiyun 				iowrite16(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
2479*4882a593Smuzhiyun 			} while ((status = ioread16(ioaddr + EL3_CMD)) & IntLatch);
2480*4882a593Smuzhiyun 			/* The timer will reenable interrupts. */
2481*4882a593Smuzhiyun 			mod_timer(&vp->timer, jiffies + 1*HZ);
2482*4882a593Smuzhiyun 			break;
2483*4882a593Smuzhiyun 		}
2484*4882a593Smuzhiyun 		/* Acknowledge the IRQ. */
2485*4882a593Smuzhiyun 		iowrite16(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
2486*4882a593Smuzhiyun 		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
2487*4882a593Smuzhiyun 			iowrite32(0x8000, vp->cb_fn_base + 4);
2488*4882a593Smuzhiyun 
2489*4882a593Smuzhiyun 	} while ((status = ioread16(ioaddr + EL3_STATUS)) & IntLatch);
2490*4882a593Smuzhiyun 	netdev_completed_queue(dev, pkts_compl, bytes_compl);
2491*4882a593Smuzhiyun 
2492*4882a593Smuzhiyun 	if (vortex_debug > 4)
2493*4882a593Smuzhiyun 		pr_debug("%s: exiting interrupt, status %4.4x.\n",
2494*4882a593Smuzhiyun 			   dev->name, status);
2495*4882a593Smuzhiyun handler_exit:
2496*4882a593Smuzhiyun 	vp->handling_irq = 0;
2497*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
2498*4882a593Smuzhiyun }
2499*4882a593Smuzhiyun 
2500*4882a593Smuzhiyun static irqreturn_t
vortex_boomerang_interrupt(int irq,void * dev_id)2501*4882a593Smuzhiyun vortex_boomerang_interrupt(int irq, void *dev_id)
2502*4882a593Smuzhiyun {
2503*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
2504*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2505*4882a593Smuzhiyun 	unsigned long flags;
2506*4882a593Smuzhiyun 	irqreturn_t ret;
2507*4882a593Smuzhiyun 
2508*4882a593Smuzhiyun 	spin_lock_irqsave(&vp->lock, flags);
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 	if (vp->full_bus_master_rx)
2511*4882a593Smuzhiyun 		ret = _boomerang_interrupt(dev->irq, dev);
2512*4882a593Smuzhiyun 	else
2513*4882a593Smuzhiyun 		ret = _vortex_interrupt(dev->irq, dev);
2514*4882a593Smuzhiyun 
2515*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vp->lock, flags);
2516*4882a593Smuzhiyun 
2517*4882a593Smuzhiyun 	return ret;
2518*4882a593Smuzhiyun }
2519*4882a593Smuzhiyun 
vortex_rx(struct net_device * dev)2520*4882a593Smuzhiyun static int vortex_rx(struct net_device *dev)
2521*4882a593Smuzhiyun {
2522*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2523*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2524*4882a593Smuzhiyun 	int i;
2525*4882a593Smuzhiyun 	short rx_status;
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	if (vortex_debug > 5)
2528*4882a593Smuzhiyun 		pr_debug("vortex_rx(): status %4.4x, rx_status %4.4x.\n",
2529*4882a593Smuzhiyun 			   ioread16(ioaddr+EL3_STATUS), ioread16(ioaddr+RxStatus));
2530*4882a593Smuzhiyun 	while ((rx_status = ioread16(ioaddr + RxStatus)) > 0) {
2531*4882a593Smuzhiyun 		if (rx_status & 0x4000) { /* Error, update stats. */
2532*4882a593Smuzhiyun 			unsigned char rx_error = ioread8(ioaddr + RxErrors);
2533*4882a593Smuzhiyun 			if (vortex_debug > 2)
2534*4882a593Smuzhiyun 				pr_debug(" Rx error: status %2.2x.\n", rx_error);
2535*4882a593Smuzhiyun 			dev->stats.rx_errors++;
2536*4882a593Smuzhiyun 			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
2537*4882a593Smuzhiyun 			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
2538*4882a593Smuzhiyun 			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
2539*4882a593Smuzhiyun 			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
2540*4882a593Smuzhiyun 			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
2541*4882a593Smuzhiyun 		} else {
2542*4882a593Smuzhiyun 			/* The packet length: up to 4.5K!. */
2543*4882a593Smuzhiyun 			int pkt_len = rx_status & 0x1fff;
2544*4882a593Smuzhiyun 			struct sk_buff *skb;
2545*4882a593Smuzhiyun 
2546*4882a593Smuzhiyun 			skb = netdev_alloc_skb(dev, pkt_len + 5);
2547*4882a593Smuzhiyun 			if (vortex_debug > 4)
2548*4882a593Smuzhiyun 				pr_debug("Receiving packet size %d status %4.4x.\n",
2549*4882a593Smuzhiyun 					   pkt_len, rx_status);
2550*4882a593Smuzhiyun 			if (skb != NULL) {
2551*4882a593Smuzhiyun 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2552*4882a593Smuzhiyun 				/* 'skb_put()' points to the start of sk_buff data area. */
2553*4882a593Smuzhiyun 				if (vp->bus_master &&
2554*4882a593Smuzhiyun 					! (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)) {
2555*4882a593Smuzhiyun 					dma_addr_t dma = dma_map_single(vp->gendev, skb_put(skb, pkt_len),
2556*4882a593Smuzhiyun 									   pkt_len, DMA_FROM_DEVICE);
2557*4882a593Smuzhiyun 					iowrite32(dma, ioaddr + Wn7_MasterAddr);
2558*4882a593Smuzhiyun 					iowrite16((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
2559*4882a593Smuzhiyun 					iowrite16(StartDMAUp, ioaddr + EL3_CMD);
2560*4882a593Smuzhiyun 					while (ioread16(ioaddr + Wn7_MasterStatus) & 0x8000)
2561*4882a593Smuzhiyun 						;
2562*4882a593Smuzhiyun 					dma_unmap_single(vp->gendev, dma, pkt_len, DMA_FROM_DEVICE);
2563*4882a593Smuzhiyun 				} else {
2564*4882a593Smuzhiyun 					ioread32_rep(ioaddr + RX_FIFO,
2565*4882a593Smuzhiyun 					             skb_put(skb, pkt_len),
2566*4882a593Smuzhiyun 						     (pkt_len + 3) >> 2);
2567*4882a593Smuzhiyun 				}
2568*4882a593Smuzhiyun 				iowrite16(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
2569*4882a593Smuzhiyun 				skb->protocol = eth_type_trans(skb, dev);
2570*4882a593Smuzhiyun 				netif_rx(skb);
2571*4882a593Smuzhiyun 				dev->stats.rx_packets++;
2572*4882a593Smuzhiyun 				/* Wait a limited time to go to next packet. */
2573*4882a593Smuzhiyun 				for (i = 200; i >= 0; i--)
2574*4882a593Smuzhiyun 					if ( ! (ioread16(ioaddr + EL3_STATUS) & CmdInProgress))
2575*4882a593Smuzhiyun 						break;
2576*4882a593Smuzhiyun 				continue;
2577*4882a593Smuzhiyun 			} else if (vortex_debug > 0)
2578*4882a593Smuzhiyun 				pr_notice("%s: No memory to allocate a sk_buff of size %d.\n",
2579*4882a593Smuzhiyun 					dev->name, pkt_len);
2580*4882a593Smuzhiyun 			dev->stats.rx_dropped++;
2581*4882a593Smuzhiyun 		}
2582*4882a593Smuzhiyun 		issue_and_wait(dev, RxDiscard);
2583*4882a593Smuzhiyun 	}
2584*4882a593Smuzhiyun 
2585*4882a593Smuzhiyun 	return 0;
2586*4882a593Smuzhiyun }
2587*4882a593Smuzhiyun 
2588*4882a593Smuzhiyun static int
boomerang_rx(struct net_device * dev)2589*4882a593Smuzhiyun boomerang_rx(struct net_device *dev)
2590*4882a593Smuzhiyun {
2591*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2592*4882a593Smuzhiyun 	int entry = vp->cur_rx % RX_RING_SIZE;
2593*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2594*4882a593Smuzhiyun 	int rx_status;
2595*4882a593Smuzhiyun 	int rx_work_limit = RX_RING_SIZE;
2596*4882a593Smuzhiyun 
2597*4882a593Smuzhiyun 	if (vortex_debug > 5)
2598*4882a593Smuzhiyun 		pr_debug("boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS));
2599*4882a593Smuzhiyun 
2600*4882a593Smuzhiyun 	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
2601*4882a593Smuzhiyun 		if (--rx_work_limit < 0)
2602*4882a593Smuzhiyun 			break;
2603*4882a593Smuzhiyun 		if (rx_status & RxDError) { /* Error, update stats. */
2604*4882a593Smuzhiyun 			unsigned char rx_error = rx_status >> 16;
2605*4882a593Smuzhiyun 			if (vortex_debug > 2)
2606*4882a593Smuzhiyun 				pr_debug(" Rx error: status %2.2x.\n", rx_error);
2607*4882a593Smuzhiyun 			dev->stats.rx_errors++;
2608*4882a593Smuzhiyun 			if (rx_error & 0x01)  dev->stats.rx_over_errors++;
2609*4882a593Smuzhiyun 			if (rx_error & 0x02)  dev->stats.rx_length_errors++;
2610*4882a593Smuzhiyun 			if (rx_error & 0x04)  dev->stats.rx_frame_errors++;
2611*4882a593Smuzhiyun 			if (rx_error & 0x08)  dev->stats.rx_crc_errors++;
2612*4882a593Smuzhiyun 			if (rx_error & 0x10)  dev->stats.rx_length_errors++;
2613*4882a593Smuzhiyun 		} else {
2614*4882a593Smuzhiyun 			/* The packet length: up to 4.5K!. */
2615*4882a593Smuzhiyun 			int pkt_len = rx_status & 0x1fff;
2616*4882a593Smuzhiyun 			struct sk_buff *skb, *newskb;
2617*4882a593Smuzhiyun 			dma_addr_t newdma;
2618*4882a593Smuzhiyun 			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
2619*4882a593Smuzhiyun 
2620*4882a593Smuzhiyun 			if (vortex_debug > 4)
2621*4882a593Smuzhiyun 				pr_debug("Receiving packet size %d status %4.4x.\n",
2622*4882a593Smuzhiyun 					   pkt_len, rx_status);
2623*4882a593Smuzhiyun 
2624*4882a593Smuzhiyun 			/* Check if the packet is long enough to just accept without
2625*4882a593Smuzhiyun 			   copying to a properly sized skbuff. */
2626*4882a593Smuzhiyun 			if (pkt_len < rx_copybreak &&
2627*4882a593Smuzhiyun 			    (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) {
2628*4882a593Smuzhiyun 				skb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
2629*4882a593Smuzhiyun 				dma_sync_single_for_cpu(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2630*4882a593Smuzhiyun 				/* 'skb_put()' points to the start of sk_buff data area. */
2631*4882a593Smuzhiyun 				skb_put_data(skb, vp->rx_skbuff[entry]->data,
2632*4882a593Smuzhiyun 					     pkt_len);
2633*4882a593Smuzhiyun 				dma_sync_single_for_device(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2634*4882a593Smuzhiyun 				vp->rx_copy++;
2635*4882a593Smuzhiyun 			} else {
2636*4882a593Smuzhiyun 				/* Pre-allocate the replacement skb.  If it or its
2637*4882a593Smuzhiyun 				 * mapping fails then recycle the buffer thats already
2638*4882a593Smuzhiyun 				 * in place
2639*4882a593Smuzhiyun 				 */
2640*4882a593Smuzhiyun 				newskb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2641*4882a593Smuzhiyun 				if (!newskb) {
2642*4882a593Smuzhiyun 					dev->stats.rx_dropped++;
2643*4882a593Smuzhiyun 					goto clear_complete;
2644*4882a593Smuzhiyun 				}
2645*4882a593Smuzhiyun 				newdma = dma_map_single(vp->gendev, newskb->data,
2646*4882a593Smuzhiyun 							PKT_BUF_SZ, DMA_FROM_DEVICE);
2647*4882a593Smuzhiyun 				if (dma_mapping_error(vp->gendev, newdma)) {
2648*4882a593Smuzhiyun 					dev->stats.rx_dropped++;
2649*4882a593Smuzhiyun 					consume_skb(newskb);
2650*4882a593Smuzhiyun 					goto clear_complete;
2651*4882a593Smuzhiyun 				}
2652*4882a593Smuzhiyun 
2653*4882a593Smuzhiyun 				/* Pass up the skbuff already on the Rx ring. */
2654*4882a593Smuzhiyun 				skb = vp->rx_skbuff[entry];
2655*4882a593Smuzhiyun 				vp->rx_skbuff[entry] = newskb;
2656*4882a593Smuzhiyun 				vp->rx_ring[entry].addr = cpu_to_le32(newdma);
2657*4882a593Smuzhiyun 				skb_put(skb, pkt_len);
2658*4882a593Smuzhiyun 				dma_unmap_single(vp->gendev, dma, PKT_BUF_SZ, DMA_FROM_DEVICE);
2659*4882a593Smuzhiyun 				vp->rx_nocopy++;
2660*4882a593Smuzhiyun 			}
2661*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
2662*4882a593Smuzhiyun 			{					/* Use hardware checksum info. */
2663*4882a593Smuzhiyun 				int csum_bits = rx_status & 0xee000000;
2664*4882a593Smuzhiyun 				if (csum_bits &&
2665*4882a593Smuzhiyun 					(csum_bits == (IPChksumValid | TCPChksumValid) ||
2666*4882a593Smuzhiyun 					 csum_bits == (IPChksumValid | UDPChksumValid))) {
2667*4882a593Smuzhiyun 					skb->ip_summed = CHECKSUM_UNNECESSARY;
2668*4882a593Smuzhiyun 					vp->rx_csumhits++;
2669*4882a593Smuzhiyun 				}
2670*4882a593Smuzhiyun 			}
2671*4882a593Smuzhiyun 			netif_rx(skb);
2672*4882a593Smuzhiyun 			dev->stats.rx_packets++;
2673*4882a593Smuzhiyun 		}
2674*4882a593Smuzhiyun 
2675*4882a593Smuzhiyun clear_complete:
2676*4882a593Smuzhiyun 		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
2677*4882a593Smuzhiyun 		iowrite16(UpUnstall, ioaddr + EL3_CMD);
2678*4882a593Smuzhiyun 		entry = (++vp->cur_rx) % RX_RING_SIZE;
2679*4882a593Smuzhiyun 	}
2680*4882a593Smuzhiyun 	return 0;
2681*4882a593Smuzhiyun }
2682*4882a593Smuzhiyun 
2683*4882a593Smuzhiyun static void
vortex_down(struct net_device * dev,int final_down)2684*4882a593Smuzhiyun vortex_down(struct net_device *dev, int final_down)
2685*4882a593Smuzhiyun {
2686*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2687*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2688*4882a593Smuzhiyun 
2689*4882a593Smuzhiyun 	netdev_reset_queue(dev);
2690*4882a593Smuzhiyun 	netif_stop_queue(dev);
2691*4882a593Smuzhiyun 
2692*4882a593Smuzhiyun 	del_timer_sync(&vp->timer);
2693*4882a593Smuzhiyun 
2694*4882a593Smuzhiyun 	/* Turn off statistics ASAP.  We update dev->stats below. */
2695*4882a593Smuzhiyun 	iowrite16(StatsDisable, ioaddr + EL3_CMD);
2696*4882a593Smuzhiyun 
2697*4882a593Smuzhiyun 	/* Disable the receiver and transmitter. */
2698*4882a593Smuzhiyun 	iowrite16(RxDisable, ioaddr + EL3_CMD);
2699*4882a593Smuzhiyun 	iowrite16(TxDisable, ioaddr + EL3_CMD);
2700*4882a593Smuzhiyun 
2701*4882a593Smuzhiyun 	/* Disable receiving 802.1q tagged frames */
2702*4882a593Smuzhiyun 	set_8021q_mode(dev, 0);
2703*4882a593Smuzhiyun 
2704*4882a593Smuzhiyun 	if (dev->if_port == XCVR_10base2)
2705*4882a593Smuzhiyun 		/* Turn off thinnet power.  Green! */
2706*4882a593Smuzhiyun 		iowrite16(StopCoax, ioaddr + EL3_CMD);
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 	iowrite16(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
2709*4882a593Smuzhiyun 
2710*4882a593Smuzhiyun 	update_stats(ioaddr, dev);
2711*4882a593Smuzhiyun 	if (vp->full_bus_master_rx)
2712*4882a593Smuzhiyun 		iowrite32(0, ioaddr + UpListPtr);
2713*4882a593Smuzhiyun 	if (vp->full_bus_master_tx)
2714*4882a593Smuzhiyun 		iowrite32(0, ioaddr + DownListPtr);
2715*4882a593Smuzhiyun 
2716*4882a593Smuzhiyun 	if (final_down && VORTEX_PCI(vp)) {
2717*4882a593Smuzhiyun 		vp->pm_state_valid = 1;
2718*4882a593Smuzhiyun 		pci_save_state(VORTEX_PCI(vp));
2719*4882a593Smuzhiyun 		acpi_set_WOL(dev);
2720*4882a593Smuzhiyun 	}
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun 
2723*4882a593Smuzhiyun static int
vortex_close(struct net_device * dev)2724*4882a593Smuzhiyun vortex_close(struct net_device *dev)
2725*4882a593Smuzhiyun {
2726*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2727*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2728*4882a593Smuzhiyun 	int i;
2729*4882a593Smuzhiyun 
2730*4882a593Smuzhiyun 	if (netif_device_present(dev))
2731*4882a593Smuzhiyun 		vortex_down(dev, 1);
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	if (vortex_debug > 1) {
2734*4882a593Smuzhiyun 		pr_debug("%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
2735*4882a593Smuzhiyun 			   dev->name, ioread16(ioaddr + EL3_STATUS), ioread8(ioaddr + TxStatus));
2736*4882a593Smuzhiyun 		pr_debug("%s: vortex close stats: rx_nocopy %d rx_copy %d"
2737*4882a593Smuzhiyun 			   " tx_queued %d Rx pre-checksummed %d.\n",
2738*4882a593Smuzhiyun 			   dev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
2739*4882a593Smuzhiyun 	}
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun #if DO_ZEROCOPY
2742*4882a593Smuzhiyun 	if (vp->rx_csumhits &&
2743*4882a593Smuzhiyun 	    (vp->drv_flags & HAS_HWCKSM) == 0 &&
2744*4882a593Smuzhiyun 	    (vp->card_idx >= MAX_UNITS || hw_checksums[vp->card_idx] == -1)) {
2745*4882a593Smuzhiyun 		pr_warn("%s supports hardware checksums, and we're not using them!\n",
2746*4882a593Smuzhiyun 			dev->name);
2747*4882a593Smuzhiyun 	}
2748*4882a593Smuzhiyun #endif
2749*4882a593Smuzhiyun 
2750*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
2751*4882a593Smuzhiyun 
2752*4882a593Smuzhiyun 	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
2753*4882a593Smuzhiyun 		for (i = 0; i < RX_RING_SIZE; i++)
2754*4882a593Smuzhiyun 			if (vp->rx_skbuff[i]) {
2755*4882a593Smuzhiyun 				dma_unmap_single(vp->gendev, le32_to_cpu(vp->rx_ring[i].addr),
2756*4882a593Smuzhiyun 									PKT_BUF_SZ, DMA_FROM_DEVICE);
2757*4882a593Smuzhiyun 				dev_kfree_skb(vp->rx_skbuff[i]);
2758*4882a593Smuzhiyun 				vp->rx_skbuff[i] = NULL;
2759*4882a593Smuzhiyun 			}
2760*4882a593Smuzhiyun 	}
2761*4882a593Smuzhiyun 	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
2762*4882a593Smuzhiyun 		for (i = 0; i < TX_RING_SIZE; i++) {
2763*4882a593Smuzhiyun 			if (vp->tx_skbuff[i]) {
2764*4882a593Smuzhiyun 				struct sk_buff *skb = vp->tx_skbuff[i];
2765*4882a593Smuzhiyun #if DO_ZEROCOPY
2766*4882a593Smuzhiyun 				int k;
2767*4882a593Smuzhiyun 
2768*4882a593Smuzhiyun 				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
2769*4882a593Smuzhiyun 						dma_unmap_single(vp->gendev,
2770*4882a593Smuzhiyun 										 le32_to_cpu(vp->tx_ring[i].frag[k].addr),
2771*4882a593Smuzhiyun 										 le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
2772*4882a593Smuzhiyun 										 DMA_TO_DEVICE);
2773*4882a593Smuzhiyun #else
2774*4882a593Smuzhiyun 				dma_unmap_single(vp->gendev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, DMA_TO_DEVICE);
2775*4882a593Smuzhiyun #endif
2776*4882a593Smuzhiyun 				dev_kfree_skb(skb);
2777*4882a593Smuzhiyun 				vp->tx_skbuff[i] = NULL;
2778*4882a593Smuzhiyun 			}
2779*4882a593Smuzhiyun 		}
2780*4882a593Smuzhiyun 	}
2781*4882a593Smuzhiyun 
2782*4882a593Smuzhiyun 	return 0;
2783*4882a593Smuzhiyun }
2784*4882a593Smuzhiyun 
2785*4882a593Smuzhiyun static void
dump_tx_ring(struct net_device * dev)2786*4882a593Smuzhiyun dump_tx_ring(struct net_device *dev)
2787*4882a593Smuzhiyun {
2788*4882a593Smuzhiyun 	if (vortex_debug > 0) {
2789*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2790*4882a593Smuzhiyun 		void __iomem *ioaddr = vp->ioaddr;
2791*4882a593Smuzhiyun 
2792*4882a593Smuzhiyun 		if (vp->full_bus_master_tx) {
2793*4882a593Smuzhiyun 			int i;
2794*4882a593Smuzhiyun 			int stalled = ioread32(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
2795*4882a593Smuzhiyun 
2796*4882a593Smuzhiyun 			pr_err("  Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
2797*4882a593Smuzhiyun 					vp->full_bus_master_tx,
2798*4882a593Smuzhiyun 					vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
2799*4882a593Smuzhiyun 					vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
2800*4882a593Smuzhiyun 			pr_err("  Transmit list %8.8x vs. %p.\n",
2801*4882a593Smuzhiyun 				   ioread32(ioaddr + DownListPtr),
2802*4882a593Smuzhiyun 				   &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
2803*4882a593Smuzhiyun 			issue_and_wait(dev, DownStall);
2804*4882a593Smuzhiyun 			for (i = 0; i < TX_RING_SIZE; i++) {
2805*4882a593Smuzhiyun 				unsigned int length;
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun #if DO_ZEROCOPY
2808*4882a593Smuzhiyun 				length = le32_to_cpu(vp->tx_ring[i].frag[0].length);
2809*4882a593Smuzhiyun #else
2810*4882a593Smuzhiyun 				length = le32_to_cpu(vp->tx_ring[i].length);
2811*4882a593Smuzhiyun #endif
2812*4882a593Smuzhiyun 				pr_err("  %d: @%p  length %8.8x status %8.8x\n",
2813*4882a593Smuzhiyun 					   i, &vp->tx_ring[i], length,
2814*4882a593Smuzhiyun 					   le32_to_cpu(vp->tx_ring[i].status));
2815*4882a593Smuzhiyun 			}
2816*4882a593Smuzhiyun 			if (!stalled)
2817*4882a593Smuzhiyun 				iowrite16(DownUnstall, ioaddr + EL3_CMD);
2818*4882a593Smuzhiyun 		}
2819*4882a593Smuzhiyun 	}
2820*4882a593Smuzhiyun }
2821*4882a593Smuzhiyun 
vortex_get_stats(struct net_device * dev)2822*4882a593Smuzhiyun static struct net_device_stats *vortex_get_stats(struct net_device *dev)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2825*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2826*4882a593Smuzhiyun 	unsigned long flags;
2827*4882a593Smuzhiyun 
2828*4882a593Smuzhiyun 	if (netif_device_present(dev)) {	/* AKPM: Used to be netif_running */
2829*4882a593Smuzhiyun 		spin_lock_irqsave (&vp->lock, flags);
2830*4882a593Smuzhiyun 		update_stats(ioaddr, dev);
2831*4882a593Smuzhiyun 		spin_unlock_irqrestore (&vp->lock, flags);
2832*4882a593Smuzhiyun 	}
2833*4882a593Smuzhiyun 	return &dev->stats;
2834*4882a593Smuzhiyun }
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun /*  Update statistics.
2837*4882a593Smuzhiyun 	Unlike with the EL3 we need not worry about interrupts changing
2838*4882a593Smuzhiyun 	the window setting from underneath us, but we must still guard
2839*4882a593Smuzhiyun 	against a race condition with a StatsUpdate interrupt updating the
2840*4882a593Smuzhiyun 	table.  This is done by checking that the ASM (!) code generated uses
2841*4882a593Smuzhiyun 	atomic updates with '+='.
2842*4882a593Smuzhiyun 	*/
update_stats(void __iomem * ioaddr,struct net_device * dev)2843*4882a593Smuzhiyun static void update_stats(void __iomem *ioaddr, struct net_device *dev)
2844*4882a593Smuzhiyun {
2845*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2846*4882a593Smuzhiyun 
2847*4882a593Smuzhiyun 	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
2848*4882a593Smuzhiyun 	/* Switch to the stats window, and read everything. */
2849*4882a593Smuzhiyun 	dev->stats.tx_carrier_errors		+= window_read8(vp, 6, 0);
2850*4882a593Smuzhiyun 	dev->stats.tx_heartbeat_errors		+= window_read8(vp, 6, 1);
2851*4882a593Smuzhiyun 	dev->stats.tx_window_errors		+= window_read8(vp, 6, 4);
2852*4882a593Smuzhiyun 	dev->stats.rx_fifo_errors		+= window_read8(vp, 6, 5);
2853*4882a593Smuzhiyun 	dev->stats.tx_packets			+= window_read8(vp, 6, 6);
2854*4882a593Smuzhiyun 	dev->stats.tx_packets			+= (window_read8(vp, 6, 9) &
2855*4882a593Smuzhiyun 						    0x30) << 4;
2856*4882a593Smuzhiyun 	/* Rx packets	*/			window_read8(vp, 6, 7);   /* Must read to clear */
2857*4882a593Smuzhiyun 	/* Don't bother with register 9, an extension of registers 6&7.
2858*4882a593Smuzhiyun 	   If we do use the 6&7 values the atomic update assumption above
2859*4882a593Smuzhiyun 	   is invalid. */
2860*4882a593Smuzhiyun 	dev->stats.rx_bytes 			+= window_read16(vp, 6, 10);
2861*4882a593Smuzhiyun 	dev->stats.tx_bytes 			+= window_read16(vp, 6, 12);
2862*4882a593Smuzhiyun 	/* Extra stats for get_ethtool_stats() */
2863*4882a593Smuzhiyun 	vp->xstats.tx_multiple_collisions	+= window_read8(vp, 6, 2);
2864*4882a593Smuzhiyun 	vp->xstats.tx_single_collisions         += window_read8(vp, 6, 3);
2865*4882a593Smuzhiyun 	vp->xstats.tx_deferred			+= window_read8(vp, 6, 8);
2866*4882a593Smuzhiyun 	vp->xstats.rx_bad_ssd			+= window_read8(vp, 4, 12);
2867*4882a593Smuzhiyun 
2868*4882a593Smuzhiyun 	dev->stats.collisions = vp->xstats.tx_multiple_collisions
2869*4882a593Smuzhiyun 		+ vp->xstats.tx_single_collisions
2870*4882a593Smuzhiyun 		+ vp->xstats.tx_max_collisions;
2871*4882a593Smuzhiyun 
2872*4882a593Smuzhiyun 	{
2873*4882a593Smuzhiyun 		u8 up = window_read8(vp, 4, 13);
2874*4882a593Smuzhiyun 		dev->stats.rx_bytes += (up & 0x0f) << 16;
2875*4882a593Smuzhiyun 		dev->stats.tx_bytes += (up & 0xf0) << 12;
2876*4882a593Smuzhiyun 	}
2877*4882a593Smuzhiyun }
2878*4882a593Smuzhiyun 
vortex_nway_reset(struct net_device * dev)2879*4882a593Smuzhiyun static int vortex_nway_reset(struct net_device *dev)
2880*4882a593Smuzhiyun {
2881*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 	return mii_nway_restart(&vp->mii);
2884*4882a593Smuzhiyun }
2885*4882a593Smuzhiyun 
vortex_get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * cmd)2886*4882a593Smuzhiyun static int vortex_get_link_ksettings(struct net_device *dev,
2887*4882a593Smuzhiyun 				     struct ethtool_link_ksettings *cmd)
2888*4882a593Smuzhiyun {
2889*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2890*4882a593Smuzhiyun 
2891*4882a593Smuzhiyun 	mii_ethtool_get_link_ksettings(&vp->mii, cmd);
2892*4882a593Smuzhiyun 
2893*4882a593Smuzhiyun 	return 0;
2894*4882a593Smuzhiyun }
2895*4882a593Smuzhiyun 
vortex_set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * cmd)2896*4882a593Smuzhiyun static int vortex_set_link_ksettings(struct net_device *dev,
2897*4882a593Smuzhiyun 				     const struct ethtool_link_ksettings *cmd)
2898*4882a593Smuzhiyun {
2899*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2900*4882a593Smuzhiyun 
2901*4882a593Smuzhiyun 	return mii_ethtool_set_link_ksettings(&vp->mii, cmd);
2902*4882a593Smuzhiyun }
2903*4882a593Smuzhiyun 
vortex_get_msglevel(struct net_device * dev)2904*4882a593Smuzhiyun static u32 vortex_get_msglevel(struct net_device *dev)
2905*4882a593Smuzhiyun {
2906*4882a593Smuzhiyun 	return vortex_debug;
2907*4882a593Smuzhiyun }
2908*4882a593Smuzhiyun 
vortex_set_msglevel(struct net_device * dev,u32 dbg)2909*4882a593Smuzhiyun static void vortex_set_msglevel(struct net_device *dev, u32 dbg)
2910*4882a593Smuzhiyun {
2911*4882a593Smuzhiyun 	vortex_debug = dbg;
2912*4882a593Smuzhiyun }
2913*4882a593Smuzhiyun 
vortex_get_sset_count(struct net_device * dev,int sset)2914*4882a593Smuzhiyun static int vortex_get_sset_count(struct net_device *dev, int sset)
2915*4882a593Smuzhiyun {
2916*4882a593Smuzhiyun 	switch (sset) {
2917*4882a593Smuzhiyun 	case ETH_SS_STATS:
2918*4882a593Smuzhiyun 		return VORTEX_NUM_STATS;
2919*4882a593Smuzhiyun 	default:
2920*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2921*4882a593Smuzhiyun 	}
2922*4882a593Smuzhiyun }
2923*4882a593Smuzhiyun 
vortex_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)2924*4882a593Smuzhiyun static void vortex_get_ethtool_stats(struct net_device *dev,
2925*4882a593Smuzhiyun 	struct ethtool_stats *stats, u64 *data)
2926*4882a593Smuzhiyun {
2927*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2928*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
2929*4882a593Smuzhiyun 	unsigned long flags;
2930*4882a593Smuzhiyun 
2931*4882a593Smuzhiyun 	spin_lock_irqsave(&vp->lock, flags);
2932*4882a593Smuzhiyun 	update_stats(ioaddr, dev);
2933*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vp->lock, flags);
2934*4882a593Smuzhiyun 
2935*4882a593Smuzhiyun 	data[0] = vp->xstats.tx_deferred;
2936*4882a593Smuzhiyun 	data[1] = vp->xstats.tx_max_collisions;
2937*4882a593Smuzhiyun 	data[2] = vp->xstats.tx_multiple_collisions;
2938*4882a593Smuzhiyun 	data[3] = vp->xstats.tx_single_collisions;
2939*4882a593Smuzhiyun 	data[4] = vp->xstats.rx_bad_ssd;
2940*4882a593Smuzhiyun }
2941*4882a593Smuzhiyun 
2942*4882a593Smuzhiyun 
vortex_get_strings(struct net_device * dev,u32 stringset,u8 * data)2943*4882a593Smuzhiyun static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data)
2944*4882a593Smuzhiyun {
2945*4882a593Smuzhiyun 	switch (stringset) {
2946*4882a593Smuzhiyun 	case ETH_SS_STATS:
2947*4882a593Smuzhiyun 		memcpy(data, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
2948*4882a593Smuzhiyun 		break;
2949*4882a593Smuzhiyun 	default:
2950*4882a593Smuzhiyun 		WARN_ON(1);
2951*4882a593Smuzhiyun 		break;
2952*4882a593Smuzhiyun 	}
2953*4882a593Smuzhiyun }
2954*4882a593Smuzhiyun 
vortex_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2955*4882a593Smuzhiyun static void vortex_get_drvinfo(struct net_device *dev,
2956*4882a593Smuzhiyun 					struct ethtool_drvinfo *info)
2957*4882a593Smuzhiyun {
2958*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2959*4882a593Smuzhiyun 
2960*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2961*4882a593Smuzhiyun 	if (VORTEX_PCI(vp)) {
2962*4882a593Smuzhiyun 		strlcpy(info->bus_info, pci_name(VORTEX_PCI(vp)),
2963*4882a593Smuzhiyun 			sizeof(info->bus_info));
2964*4882a593Smuzhiyun 	} else {
2965*4882a593Smuzhiyun 		if (VORTEX_EISA(vp))
2966*4882a593Smuzhiyun 			strlcpy(info->bus_info, dev_name(vp->gendev),
2967*4882a593Smuzhiyun 				sizeof(info->bus_info));
2968*4882a593Smuzhiyun 		else
2969*4882a593Smuzhiyun 			snprintf(info->bus_info, sizeof(info->bus_info),
2970*4882a593Smuzhiyun 				"EISA 0x%lx %d", dev->base_addr, dev->irq);
2971*4882a593Smuzhiyun 	}
2972*4882a593Smuzhiyun }
2973*4882a593Smuzhiyun 
vortex_get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2974*4882a593Smuzhiyun static void vortex_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2975*4882a593Smuzhiyun {
2976*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2977*4882a593Smuzhiyun 
2978*4882a593Smuzhiyun 	if (!VORTEX_PCI(vp))
2979*4882a593Smuzhiyun 		return;
2980*4882a593Smuzhiyun 
2981*4882a593Smuzhiyun 	wol->supported = WAKE_MAGIC;
2982*4882a593Smuzhiyun 
2983*4882a593Smuzhiyun 	wol->wolopts = 0;
2984*4882a593Smuzhiyun 	if (vp->enable_wol)
2985*4882a593Smuzhiyun 		wol->wolopts |= WAKE_MAGIC;
2986*4882a593Smuzhiyun }
2987*4882a593Smuzhiyun 
vortex_set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2988*4882a593Smuzhiyun static int vortex_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2989*4882a593Smuzhiyun {
2990*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
2991*4882a593Smuzhiyun 
2992*4882a593Smuzhiyun 	if (!VORTEX_PCI(vp))
2993*4882a593Smuzhiyun 		return -EOPNOTSUPP;
2994*4882a593Smuzhiyun 
2995*4882a593Smuzhiyun 	if (wol->wolopts & ~WAKE_MAGIC)
2996*4882a593Smuzhiyun 		return -EINVAL;
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 	if (wol->wolopts & WAKE_MAGIC)
2999*4882a593Smuzhiyun 		vp->enable_wol = 1;
3000*4882a593Smuzhiyun 	else
3001*4882a593Smuzhiyun 		vp->enable_wol = 0;
3002*4882a593Smuzhiyun 	acpi_set_WOL(dev);
3003*4882a593Smuzhiyun 
3004*4882a593Smuzhiyun 	return 0;
3005*4882a593Smuzhiyun }
3006*4882a593Smuzhiyun 
3007*4882a593Smuzhiyun static const struct ethtool_ops vortex_ethtool_ops = {
3008*4882a593Smuzhiyun 	.get_drvinfo		= vortex_get_drvinfo,
3009*4882a593Smuzhiyun 	.get_strings            = vortex_get_strings,
3010*4882a593Smuzhiyun 	.get_msglevel           = vortex_get_msglevel,
3011*4882a593Smuzhiyun 	.set_msglevel           = vortex_set_msglevel,
3012*4882a593Smuzhiyun 	.get_ethtool_stats      = vortex_get_ethtool_stats,
3013*4882a593Smuzhiyun 	.get_sset_count		= vortex_get_sset_count,
3014*4882a593Smuzhiyun 	.get_link               = ethtool_op_get_link,
3015*4882a593Smuzhiyun 	.nway_reset             = vortex_nway_reset,
3016*4882a593Smuzhiyun 	.get_wol                = vortex_get_wol,
3017*4882a593Smuzhiyun 	.set_wol                = vortex_set_wol,
3018*4882a593Smuzhiyun 	.get_ts_info		= ethtool_op_get_ts_info,
3019*4882a593Smuzhiyun 	.get_link_ksettings     = vortex_get_link_ksettings,
3020*4882a593Smuzhiyun 	.set_link_ksettings     = vortex_set_link_ksettings,
3021*4882a593Smuzhiyun };
3022*4882a593Smuzhiyun 
3023*4882a593Smuzhiyun #ifdef CONFIG_PCI
3024*4882a593Smuzhiyun /*
3025*4882a593Smuzhiyun  *	Must power the device up to do MDIO operations
3026*4882a593Smuzhiyun  */
vortex_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)3027*4882a593Smuzhiyun static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3028*4882a593Smuzhiyun {
3029*4882a593Smuzhiyun 	int err;
3030*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
3031*4882a593Smuzhiyun 	pci_power_t state = 0;
3032*4882a593Smuzhiyun 
3033*4882a593Smuzhiyun 	if(VORTEX_PCI(vp))
3034*4882a593Smuzhiyun 		state = VORTEX_PCI(vp)->current_state;
3035*4882a593Smuzhiyun 
3036*4882a593Smuzhiyun 	/* The kernel core really should have pci_get_power_state() */
3037*4882a593Smuzhiyun 
3038*4882a593Smuzhiyun 	if(state != 0)
3039*4882a593Smuzhiyun 		pci_set_power_state(VORTEX_PCI(vp), PCI_D0);
3040*4882a593Smuzhiyun 	err = generic_mii_ioctl(&vp->mii, if_mii(rq), cmd, NULL);
3041*4882a593Smuzhiyun 	if(state != 0)
3042*4882a593Smuzhiyun 		pci_set_power_state(VORTEX_PCI(vp), state);
3043*4882a593Smuzhiyun 
3044*4882a593Smuzhiyun 	return err;
3045*4882a593Smuzhiyun }
3046*4882a593Smuzhiyun #endif
3047*4882a593Smuzhiyun 
3048*4882a593Smuzhiyun 
3049*4882a593Smuzhiyun /* Pre-Cyclone chips have no documented multicast filter, so the only
3050*4882a593Smuzhiyun    multicast setting is to receive all multicast frames.  At least
3051*4882a593Smuzhiyun    the chip has a very clean way to set the mode, unlike many others. */
set_rx_mode(struct net_device * dev)3052*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev)
3053*4882a593Smuzhiyun {
3054*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
3055*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
3056*4882a593Smuzhiyun 	int new_mode;
3057*4882a593Smuzhiyun 
3058*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
3059*4882a593Smuzhiyun 		if (vortex_debug > 3)
3060*4882a593Smuzhiyun 			pr_notice("%s: Setting promiscuous mode.\n", dev->name);
3061*4882a593Smuzhiyun 		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
3062*4882a593Smuzhiyun 	} else	if (!netdev_mc_empty(dev) || dev->flags & IFF_ALLMULTI) {
3063*4882a593Smuzhiyun 		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
3064*4882a593Smuzhiyun 	} else
3065*4882a593Smuzhiyun 		new_mode = SetRxFilter | RxStation | RxBroadcast;
3066*4882a593Smuzhiyun 
3067*4882a593Smuzhiyun 	iowrite16(new_mode, ioaddr + EL3_CMD);
3068*4882a593Smuzhiyun }
3069*4882a593Smuzhiyun 
3070*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_VLAN_8021Q)
3071*4882a593Smuzhiyun /* Setup the card so that it can receive frames with an 802.1q VLAN tag.
3072*4882a593Smuzhiyun    Note that this must be done after each RxReset due to some backwards
3073*4882a593Smuzhiyun    compatibility logic in the Cyclone and Tornado ASICs */
3074*4882a593Smuzhiyun 
3075*4882a593Smuzhiyun /* The Ethernet Type used for 802.1q tagged frames */
3076*4882a593Smuzhiyun #define VLAN_ETHER_TYPE 0x8100
3077*4882a593Smuzhiyun 
set_8021q_mode(struct net_device * dev,int enable)3078*4882a593Smuzhiyun static void set_8021q_mode(struct net_device *dev, int enable)
3079*4882a593Smuzhiyun {
3080*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
3081*4882a593Smuzhiyun 	int mac_ctrl;
3082*4882a593Smuzhiyun 
3083*4882a593Smuzhiyun 	if ((vp->drv_flags&IS_CYCLONE) || (vp->drv_flags&IS_TORNADO)) {
3084*4882a593Smuzhiyun 		/* cyclone and tornado chipsets can recognize 802.1q
3085*4882a593Smuzhiyun 		 * tagged frames and treat them correctly */
3086*4882a593Smuzhiyun 
3087*4882a593Smuzhiyun 		int max_pkt_size = dev->mtu+14;	/* MTU+Ethernet header */
3088*4882a593Smuzhiyun 		if (enable)
3089*4882a593Smuzhiyun 			max_pkt_size += 4;	/* 802.1Q VLAN tag */
3090*4882a593Smuzhiyun 
3091*4882a593Smuzhiyun 		window_write16(vp, max_pkt_size, 3, Wn3_MaxPktSize);
3092*4882a593Smuzhiyun 
3093*4882a593Smuzhiyun 		/* set VlanEtherType to let the hardware checksumming
3094*4882a593Smuzhiyun 		   treat tagged frames correctly */
3095*4882a593Smuzhiyun 		window_write16(vp, VLAN_ETHER_TYPE, 7, Wn7_VlanEtherType);
3096*4882a593Smuzhiyun 	} else {
3097*4882a593Smuzhiyun 		/* on older cards we have to enable large frames */
3098*4882a593Smuzhiyun 
3099*4882a593Smuzhiyun 		vp->large_frames = dev->mtu > 1500 || enable;
3100*4882a593Smuzhiyun 
3101*4882a593Smuzhiyun 		mac_ctrl = window_read16(vp, 3, Wn3_MAC_Ctrl);
3102*4882a593Smuzhiyun 		if (vp->large_frames)
3103*4882a593Smuzhiyun 			mac_ctrl |= 0x40;
3104*4882a593Smuzhiyun 		else
3105*4882a593Smuzhiyun 			mac_ctrl &= ~0x40;
3106*4882a593Smuzhiyun 		window_write16(vp, mac_ctrl, 3, Wn3_MAC_Ctrl);
3107*4882a593Smuzhiyun 	}
3108*4882a593Smuzhiyun }
3109*4882a593Smuzhiyun #else
3110*4882a593Smuzhiyun 
set_8021q_mode(struct net_device * dev,int enable)3111*4882a593Smuzhiyun static void set_8021q_mode(struct net_device *dev, int enable)
3112*4882a593Smuzhiyun {
3113*4882a593Smuzhiyun }
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 
3116*4882a593Smuzhiyun #endif
3117*4882a593Smuzhiyun 
3118*4882a593Smuzhiyun /* MII transceiver control section.
3119*4882a593Smuzhiyun    Read and write the MII registers using software-generated serial
3120*4882a593Smuzhiyun    MDIO protocol.  See the MII specifications or DP83840A data sheet
3121*4882a593Smuzhiyun    for details. */
3122*4882a593Smuzhiyun 
3123*4882a593Smuzhiyun /* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
3124*4882a593Smuzhiyun    met by back-to-back PCI I/O cycles, but we insert a delay to avoid
3125*4882a593Smuzhiyun    "overclocking" issues. */
mdio_delay(struct vortex_private * vp)3126*4882a593Smuzhiyun static void mdio_delay(struct vortex_private *vp)
3127*4882a593Smuzhiyun {
3128*4882a593Smuzhiyun 	window_read32(vp, 4, Wn4_PhysicalMgmt);
3129*4882a593Smuzhiyun }
3130*4882a593Smuzhiyun 
3131*4882a593Smuzhiyun #define MDIO_SHIFT_CLK	0x01
3132*4882a593Smuzhiyun #define MDIO_DIR_WRITE	0x04
3133*4882a593Smuzhiyun #define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
3134*4882a593Smuzhiyun #define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
3135*4882a593Smuzhiyun #define MDIO_DATA_READ	0x02
3136*4882a593Smuzhiyun #define MDIO_ENB_IN		0x00
3137*4882a593Smuzhiyun 
3138*4882a593Smuzhiyun /* Generate the preamble required for initial synchronization and
3139*4882a593Smuzhiyun    a few older transceivers. */
mdio_sync(struct vortex_private * vp,int bits)3140*4882a593Smuzhiyun static void mdio_sync(struct vortex_private *vp, int bits)
3141*4882a593Smuzhiyun {
3142*4882a593Smuzhiyun 	/* Establish sync by sending at least 32 logic ones. */
3143*4882a593Smuzhiyun 	while (-- bits >= 0) {
3144*4882a593Smuzhiyun 		window_write16(vp, MDIO_DATA_WRITE1, 4, Wn4_PhysicalMgmt);
3145*4882a593Smuzhiyun 		mdio_delay(vp);
3146*4882a593Smuzhiyun 		window_write16(vp, MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK,
3147*4882a593Smuzhiyun 			       4, Wn4_PhysicalMgmt);
3148*4882a593Smuzhiyun 		mdio_delay(vp);
3149*4882a593Smuzhiyun 	}
3150*4882a593Smuzhiyun }
3151*4882a593Smuzhiyun 
mdio_read(struct net_device * dev,int phy_id,int location)3152*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int phy_id, int location)
3153*4882a593Smuzhiyun {
3154*4882a593Smuzhiyun 	int i;
3155*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
3156*4882a593Smuzhiyun 	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
3157*4882a593Smuzhiyun 	unsigned int retval = 0;
3158*4882a593Smuzhiyun 
3159*4882a593Smuzhiyun 	spin_lock_bh(&vp->mii_lock);
3160*4882a593Smuzhiyun 
3161*4882a593Smuzhiyun 	if (mii_preamble_required)
3162*4882a593Smuzhiyun 		mdio_sync(vp, 32);
3163*4882a593Smuzhiyun 
3164*4882a593Smuzhiyun 	/* Shift the read command bits out. */
3165*4882a593Smuzhiyun 	for (i = 14; i >= 0; i--) {
3166*4882a593Smuzhiyun 		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3167*4882a593Smuzhiyun 		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3168*4882a593Smuzhiyun 		mdio_delay(vp);
3169*4882a593Smuzhiyun 		window_write16(vp, dataval | MDIO_SHIFT_CLK,
3170*4882a593Smuzhiyun 			       4, Wn4_PhysicalMgmt);
3171*4882a593Smuzhiyun 		mdio_delay(vp);
3172*4882a593Smuzhiyun 	}
3173*4882a593Smuzhiyun 	/* Read the two transition, 16 data, and wire-idle bits. */
3174*4882a593Smuzhiyun 	for (i = 19; i > 0; i--) {
3175*4882a593Smuzhiyun 		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3176*4882a593Smuzhiyun 		mdio_delay(vp);
3177*4882a593Smuzhiyun 		retval = (retval << 1) |
3178*4882a593Smuzhiyun 			((window_read16(vp, 4, Wn4_PhysicalMgmt) &
3179*4882a593Smuzhiyun 			  MDIO_DATA_READ) ? 1 : 0);
3180*4882a593Smuzhiyun 		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3181*4882a593Smuzhiyun 			       4, Wn4_PhysicalMgmt);
3182*4882a593Smuzhiyun 		mdio_delay(vp);
3183*4882a593Smuzhiyun 	}
3184*4882a593Smuzhiyun 
3185*4882a593Smuzhiyun 	spin_unlock_bh(&vp->mii_lock);
3186*4882a593Smuzhiyun 
3187*4882a593Smuzhiyun 	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
3188*4882a593Smuzhiyun }
3189*4882a593Smuzhiyun 
mdio_write(struct net_device * dev,int phy_id,int location,int value)3190*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
3191*4882a593Smuzhiyun {
3192*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
3193*4882a593Smuzhiyun 	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
3194*4882a593Smuzhiyun 	int i;
3195*4882a593Smuzhiyun 
3196*4882a593Smuzhiyun 	spin_lock_bh(&vp->mii_lock);
3197*4882a593Smuzhiyun 
3198*4882a593Smuzhiyun 	if (mii_preamble_required)
3199*4882a593Smuzhiyun 		mdio_sync(vp, 32);
3200*4882a593Smuzhiyun 
3201*4882a593Smuzhiyun 	/* Shift the command bits out. */
3202*4882a593Smuzhiyun 	for (i = 31; i >= 0; i--) {
3203*4882a593Smuzhiyun 		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
3204*4882a593Smuzhiyun 		window_write16(vp, dataval, 4, Wn4_PhysicalMgmt);
3205*4882a593Smuzhiyun 		mdio_delay(vp);
3206*4882a593Smuzhiyun 		window_write16(vp, dataval | MDIO_SHIFT_CLK,
3207*4882a593Smuzhiyun 			       4, Wn4_PhysicalMgmt);
3208*4882a593Smuzhiyun 		mdio_delay(vp);
3209*4882a593Smuzhiyun 	}
3210*4882a593Smuzhiyun 	/* Leave the interface idle. */
3211*4882a593Smuzhiyun 	for (i = 1; i >= 0; i--) {
3212*4882a593Smuzhiyun 		window_write16(vp, MDIO_ENB_IN, 4, Wn4_PhysicalMgmt);
3213*4882a593Smuzhiyun 		mdio_delay(vp);
3214*4882a593Smuzhiyun 		window_write16(vp, MDIO_ENB_IN | MDIO_SHIFT_CLK,
3215*4882a593Smuzhiyun 			       4, Wn4_PhysicalMgmt);
3216*4882a593Smuzhiyun 		mdio_delay(vp);
3217*4882a593Smuzhiyun 	}
3218*4882a593Smuzhiyun 
3219*4882a593Smuzhiyun 	spin_unlock_bh(&vp->mii_lock);
3220*4882a593Smuzhiyun }
3221*4882a593Smuzhiyun 
3222*4882a593Smuzhiyun /* ACPI: Advanced Configuration and Power Interface. */
3223*4882a593Smuzhiyun /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
acpi_set_WOL(struct net_device * dev)3224*4882a593Smuzhiyun static void acpi_set_WOL(struct net_device *dev)
3225*4882a593Smuzhiyun {
3226*4882a593Smuzhiyun 	struct vortex_private *vp = netdev_priv(dev);
3227*4882a593Smuzhiyun 	void __iomem *ioaddr = vp->ioaddr;
3228*4882a593Smuzhiyun 
3229*4882a593Smuzhiyun 	device_set_wakeup_enable(vp->gendev, vp->enable_wol);
3230*4882a593Smuzhiyun 
3231*4882a593Smuzhiyun 	if (vp->enable_wol) {
3232*4882a593Smuzhiyun 		/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
3233*4882a593Smuzhiyun 		window_write16(vp, 2, 7, 0x0c);
3234*4882a593Smuzhiyun 		/* The RxFilter must accept the WOL frames. */
3235*4882a593Smuzhiyun 		iowrite16(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
3236*4882a593Smuzhiyun 		iowrite16(RxEnable, ioaddr + EL3_CMD);
3237*4882a593Smuzhiyun 
3238*4882a593Smuzhiyun 		if (pci_enable_wake(VORTEX_PCI(vp), PCI_D3hot, 1)) {
3239*4882a593Smuzhiyun 			pr_info("%s: WOL not supported.\n", pci_name(VORTEX_PCI(vp)));
3240*4882a593Smuzhiyun 
3241*4882a593Smuzhiyun 			vp->enable_wol = 0;
3242*4882a593Smuzhiyun 			return;
3243*4882a593Smuzhiyun 		}
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun 		if (VORTEX_PCI(vp)->current_state < PCI_D3hot)
3246*4882a593Smuzhiyun 			return;
3247*4882a593Smuzhiyun 
3248*4882a593Smuzhiyun 		/* Change the power state to D3; RxEnable doesn't take effect. */
3249*4882a593Smuzhiyun 		pci_set_power_state(VORTEX_PCI(vp), PCI_D3hot);
3250*4882a593Smuzhiyun 	}
3251*4882a593Smuzhiyun }
3252*4882a593Smuzhiyun 
3253*4882a593Smuzhiyun 
vortex_remove_one(struct pci_dev * pdev)3254*4882a593Smuzhiyun static void vortex_remove_one(struct pci_dev *pdev)
3255*4882a593Smuzhiyun {
3256*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
3257*4882a593Smuzhiyun 	struct vortex_private *vp;
3258*4882a593Smuzhiyun 
3259*4882a593Smuzhiyun 	if (!dev) {
3260*4882a593Smuzhiyun 		pr_err("vortex_remove_one called for Compaq device!\n");
3261*4882a593Smuzhiyun 		BUG();
3262*4882a593Smuzhiyun 	}
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun 	vp = netdev_priv(dev);
3265*4882a593Smuzhiyun 
3266*4882a593Smuzhiyun 	if (vp->cb_fn_base)
3267*4882a593Smuzhiyun 		pci_iounmap(pdev, vp->cb_fn_base);
3268*4882a593Smuzhiyun 
3269*4882a593Smuzhiyun 	unregister_netdev(dev);
3270*4882a593Smuzhiyun 
3271*4882a593Smuzhiyun 	pci_set_power_state(pdev, PCI_D0);	/* Go active */
3272*4882a593Smuzhiyun 	if (vp->pm_state_valid)
3273*4882a593Smuzhiyun 		pci_restore_state(pdev);
3274*4882a593Smuzhiyun 	pci_disable_device(pdev);
3275*4882a593Smuzhiyun 
3276*4882a593Smuzhiyun 	/* Should really use issue_and_wait() here */
3277*4882a593Smuzhiyun 	iowrite16(TotalReset | ((vp->drv_flags & EEPROM_RESET) ? 0x04 : 0x14),
3278*4882a593Smuzhiyun 	     vp->ioaddr + EL3_CMD);
3279*4882a593Smuzhiyun 
3280*4882a593Smuzhiyun 	pci_iounmap(pdev, vp->ioaddr);
3281*4882a593Smuzhiyun 
3282*4882a593Smuzhiyun 	dma_free_coherent(&pdev->dev,
3283*4882a593Smuzhiyun 			sizeof(struct boom_rx_desc) * RX_RING_SIZE +
3284*4882a593Smuzhiyun 			sizeof(struct boom_tx_desc) * TX_RING_SIZE,
3285*4882a593Smuzhiyun 			vp->rx_ring, vp->rx_ring_dma);
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 	pci_release_regions(pdev);
3288*4882a593Smuzhiyun 
3289*4882a593Smuzhiyun 	free_netdev(dev);
3290*4882a593Smuzhiyun }
3291*4882a593Smuzhiyun 
3292*4882a593Smuzhiyun 
3293*4882a593Smuzhiyun static struct pci_driver vortex_driver = {
3294*4882a593Smuzhiyun 	.name		= "3c59x",
3295*4882a593Smuzhiyun 	.probe		= vortex_init_one,
3296*4882a593Smuzhiyun 	.remove		= vortex_remove_one,
3297*4882a593Smuzhiyun 	.id_table	= vortex_pci_tbl,
3298*4882a593Smuzhiyun 	.driver.pm	= VORTEX_PM_OPS,
3299*4882a593Smuzhiyun };
3300*4882a593Smuzhiyun 
3301*4882a593Smuzhiyun 
3302*4882a593Smuzhiyun static int vortex_have_pci;
3303*4882a593Smuzhiyun static int vortex_have_eisa;
3304*4882a593Smuzhiyun 
3305*4882a593Smuzhiyun 
vortex_init(void)3306*4882a593Smuzhiyun static int __init vortex_init(void)
3307*4882a593Smuzhiyun {
3308*4882a593Smuzhiyun 	int pci_rc, eisa_rc;
3309*4882a593Smuzhiyun 
3310*4882a593Smuzhiyun 	pci_rc = pci_register_driver(&vortex_driver);
3311*4882a593Smuzhiyun 	eisa_rc = vortex_eisa_init();
3312*4882a593Smuzhiyun 
3313*4882a593Smuzhiyun 	if (pci_rc == 0)
3314*4882a593Smuzhiyun 		vortex_have_pci = 1;
3315*4882a593Smuzhiyun 	if (eisa_rc > 0)
3316*4882a593Smuzhiyun 		vortex_have_eisa = 1;
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 	return (vortex_have_pci + vortex_have_eisa) ? 0 : -ENODEV;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun 
vortex_eisa_cleanup(void)3322*4882a593Smuzhiyun static void __exit vortex_eisa_cleanup(void)
3323*4882a593Smuzhiyun {
3324*4882a593Smuzhiyun 	void __iomem *ioaddr;
3325*4882a593Smuzhiyun 
3326*4882a593Smuzhiyun #ifdef CONFIG_EISA
3327*4882a593Smuzhiyun 	/* Take care of the EISA devices */
3328*4882a593Smuzhiyun 	eisa_driver_unregister(&vortex_eisa_driver);
3329*4882a593Smuzhiyun #endif
3330*4882a593Smuzhiyun 
3331*4882a593Smuzhiyun 	if (compaq_net_device) {
3332*4882a593Smuzhiyun 		ioaddr = ioport_map(compaq_net_device->base_addr,
3333*4882a593Smuzhiyun 		                    VORTEX_TOTAL_SIZE);
3334*4882a593Smuzhiyun 
3335*4882a593Smuzhiyun 		unregister_netdev(compaq_net_device);
3336*4882a593Smuzhiyun 		iowrite16(TotalReset, ioaddr + EL3_CMD);
3337*4882a593Smuzhiyun 		release_region(compaq_net_device->base_addr,
3338*4882a593Smuzhiyun 		               VORTEX_TOTAL_SIZE);
3339*4882a593Smuzhiyun 
3340*4882a593Smuzhiyun 		free_netdev(compaq_net_device);
3341*4882a593Smuzhiyun 	}
3342*4882a593Smuzhiyun }
3343*4882a593Smuzhiyun 
3344*4882a593Smuzhiyun 
vortex_cleanup(void)3345*4882a593Smuzhiyun static void __exit vortex_cleanup(void)
3346*4882a593Smuzhiyun {
3347*4882a593Smuzhiyun 	if (vortex_have_pci)
3348*4882a593Smuzhiyun 		pci_unregister_driver(&vortex_driver);
3349*4882a593Smuzhiyun 	if (vortex_have_eisa)
3350*4882a593Smuzhiyun 		vortex_eisa_cleanup();
3351*4882a593Smuzhiyun }
3352*4882a593Smuzhiyun 
3353*4882a593Smuzhiyun 
3354*4882a593Smuzhiyun module_init(vortex_init);
3355*4882a593Smuzhiyun module_exit(vortex_cleanup);
3356