xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/lance.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* lance.c: An AMD LANCE/PCnet ethernet driver for Linux. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun 	Written/copyright 1993-1998 by Donald Becker.
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 	Copyright 1993 United States Government as represented by the
6*4882a593Smuzhiyun 	Director, National Security Agency.
7*4882a593Smuzhiyun 	This software may be used and distributed according to the terms
8*4882a593Smuzhiyun 	of the GNU General Public License, incorporated herein by reference.
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun 	This driver is for the Allied Telesis AT1500 and HP J2405A, and should work
11*4882a593Smuzhiyun 	with most other LANCE-based bus-master (NE2100/NE2500) ethercards.
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun 	The author may be reached as becker@scyld.com, or C/O
14*4882a593Smuzhiyun 	Scyld Computing Corporation
15*4882a593Smuzhiyun 	410 Severn Ave., Suite 210
16*4882a593Smuzhiyun 	Annapolis MD 21403
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun 	Andrey V. Savochkin:
19*4882a593Smuzhiyun 	- alignment problem with 1.3.* kernel and some minor changes.
20*4882a593Smuzhiyun 	Thomas Bogendoerfer (tsbogend@bigbug.franken.de):
21*4882a593Smuzhiyun 	- added support for Linux/Alpha, but removed most of it, because
22*4882a593Smuzhiyun         it worked only for the PCI chip.
23*4882a593Smuzhiyun       - added hook for the 32bit lance driver
24*4882a593Smuzhiyun       - added PCnetPCI II (79C970A) to chip table
25*4882a593Smuzhiyun 	Paul Gortmaker (gpg109@rsphy1.anu.edu.au):
26*4882a593Smuzhiyun 	- hopefully fix above so Linux/Alpha can use ISA cards too.
27*4882a593Smuzhiyun     8/20/96 Fixed 7990 autoIRQ failure and reversed unneeded alignment -djb
28*4882a593Smuzhiyun     v1.12 10/27/97 Module support -djb
29*4882a593Smuzhiyun     v1.14  2/3/98 Module support modified, made PCI support optional -djb
30*4882a593Smuzhiyun     v1.15 5/27/99 Fixed bug in the cleanup_module(). dev->priv was freed
31*4882a593Smuzhiyun                   before unregister_netdev() which caused NULL pointer
32*4882a593Smuzhiyun                   reference later in the chain (in rtnetlink_fill_ifinfo())
33*4882a593Smuzhiyun                   -- Mika Kuoppala <miku@iki.fi>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun     Forward ported v1.14 to 2.1.129, merged the PCI and misc changes from
36*4882a593Smuzhiyun     the 2.1 version of the old driver - Alan Cox
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun     Get rid of check_region, check kmalloc return in lance_probe1
39*4882a593Smuzhiyun     Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 11/01/2001
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	Reworked detection, added support for Racal InterLan EtherBlaster cards
42*4882a593Smuzhiyun 	Vesselin Kostadinov <vesok at yahoo dot com > - 22/4/2004
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static const char version[] = "lance.c:v1.16 2006/11/09 dplatt@3do.com, becker@cesdis.gsfc.nasa.gov\n";
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #include <linux/module.h>
48*4882a593Smuzhiyun #include <linux/kernel.h>
49*4882a593Smuzhiyun #include <linux/string.h>
50*4882a593Smuzhiyun #include <linux/delay.h>
51*4882a593Smuzhiyun #include <linux/errno.h>
52*4882a593Smuzhiyun #include <linux/ioport.h>
53*4882a593Smuzhiyun #include <linux/slab.h>
54*4882a593Smuzhiyun #include <linux/interrupt.h>
55*4882a593Smuzhiyun #include <linux/pci.h>
56*4882a593Smuzhiyun #include <linux/init.h>
57*4882a593Smuzhiyun #include <linux/netdevice.h>
58*4882a593Smuzhiyun #include <linux/etherdevice.h>
59*4882a593Smuzhiyun #include <linux/skbuff.h>
60*4882a593Smuzhiyun #include <linux/mm.h>
61*4882a593Smuzhiyun #include <linux/bitops.h>
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #include <asm/io.h>
64*4882a593Smuzhiyun #include <asm/dma.h>
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun static unsigned int lance_portlist[] __initdata = { 0x300, 0x320, 0x340, 0x360, 0};
67*4882a593Smuzhiyun static int lance_probe1(struct net_device *dev, int ioaddr, int irq, int options);
68*4882a593Smuzhiyun static int __init do_lance_probe(struct net_device *dev);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun static struct card {
72*4882a593Smuzhiyun 	char id_offset14;
73*4882a593Smuzhiyun 	char id_offset15;
74*4882a593Smuzhiyun } cards[] = {
75*4882a593Smuzhiyun 	{	//"normal"
76*4882a593Smuzhiyun 		.id_offset14 = 0x57,
77*4882a593Smuzhiyun 		.id_offset15 = 0x57,
78*4882a593Smuzhiyun 	},
79*4882a593Smuzhiyun 	{	//NI6510EB
80*4882a593Smuzhiyun 		.id_offset14 = 0x52,
81*4882a593Smuzhiyun 		.id_offset15 = 0x44,
82*4882a593Smuzhiyun 	},
83*4882a593Smuzhiyun 	{	//Racal InterLan EtherBlaster
84*4882a593Smuzhiyun 		.id_offset14 = 0x52,
85*4882a593Smuzhiyun 		.id_offset15 = 0x49,
86*4882a593Smuzhiyun 	},
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun #define NUM_CARDS 3
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun #ifdef LANCE_DEBUG
91*4882a593Smuzhiyun static int lance_debug = LANCE_DEBUG;
92*4882a593Smuzhiyun #else
93*4882a593Smuzhiyun static int lance_debug = 1;
94*4882a593Smuzhiyun #endif
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /*
97*4882a593Smuzhiyun 				Theory of Operation
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun I. Board Compatibility
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun This device driver is designed for the AMD 79C960, the "PCnet-ISA
102*4882a593Smuzhiyun single-chip ethernet controller for ISA".  This chip is used in a wide
103*4882a593Smuzhiyun variety of boards from vendors such as Allied Telesis, HP, Kingston,
104*4882a593Smuzhiyun and Boca.  This driver is also intended to work with older AMD 7990
105*4882a593Smuzhiyun designs, such as the NE1500 and NE2100, and newer 79C961.  For convenience,
106*4882a593Smuzhiyun I use the name LANCE to refer to all of the AMD chips, even though it properly
107*4882a593Smuzhiyun refers only to the original 7990.
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun II. Board-specific settings
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun The driver is designed to work the boards that use the faster
112*4882a593Smuzhiyun bus-master mode, rather than in shared memory mode.	 (Only older designs
113*4882a593Smuzhiyun have on-board buffer memory needed to support the slower shared memory mode.)
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun Most ISA boards have jumpered settings for the I/O base, IRQ line, and DMA
116*4882a593Smuzhiyun channel.  This driver probes the likely base addresses:
117*4882a593Smuzhiyun {0x300, 0x320, 0x340, 0x360}.
118*4882a593Smuzhiyun After the board is found it generates a DMA-timeout interrupt and uses
119*4882a593Smuzhiyun autoIRQ to find the IRQ line.  The DMA channel can be set with the low bits
120*4882a593Smuzhiyun of the otherwise-unused dev->mem_start value (aka PARAM1).  If unset it is
121*4882a593Smuzhiyun probed for by enabling each free DMA channel in turn and checking if
122*4882a593Smuzhiyun initialization succeeds.
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun The HP-J2405A board is an exception: with this board it is easy to read the
125*4882a593Smuzhiyun EEPROM-set values for the base, IRQ, and DMA.  (Of course you must already
126*4882a593Smuzhiyun _know_ the base address -- that field is for writing the EEPROM.)
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun III. Driver operation
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun IIIa. Ring buffers
131*4882a593Smuzhiyun The LANCE uses ring buffers of Tx and Rx descriptors.  Each entry describes
132*4882a593Smuzhiyun the base and length of the data buffer, along with status bits.	 The length
133*4882a593Smuzhiyun of these buffers is set by LANCE_LOG_{RX,TX}_BUFFERS, which is log_2() of
134*4882a593Smuzhiyun the buffer length (rather than being directly the buffer length) for
135*4882a593Smuzhiyun implementation ease.  The current values are 2 (Tx) and 4 (Rx), which leads to
136*4882a593Smuzhiyun ring sizes of 4 (Tx) and 16 (Rx).  Increasing the number of ring entries
137*4882a593Smuzhiyun needlessly uses extra space and reduces the chance that an upper layer will
138*4882a593Smuzhiyun be able to reorder queued Tx packets based on priority.	 Decreasing the number
139*4882a593Smuzhiyun of entries makes it more difficult to achieve back-to-back packet transmission
140*4882a593Smuzhiyun and increases the chance that Rx ring will overflow.  (Consider the worst case
141*4882a593Smuzhiyun of receiving back-to-back minimum-sized packets.)
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun The LANCE has the capability to "chain" both Rx and Tx buffers, but this driver
144*4882a593Smuzhiyun statically allocates full-sized (slightly oversized -- PKT_BUF_SZ) buffers to
145*4882a593Smuzhiyun avoid the administrative overhead. For the Rx side this avoids dynamically
146*4882a593Smuzhiyun allocating full-sized buffers "just in case", at the expense of a
147*4882a593Smuzhiyun memory-to-memory data copy for each packet received.  For most systems this
148*4882a593Smuzhiyun is a good tradeoff: the Rx buffer will always be in low memory, the copy
149*4882a593Smuzhiyun is inexpensive, and it primes the cache for later packet processing.  For Tx
150*4882a593Smuzhiyun the buffers are only used when needed as low-memory bounce buffers.
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun IIIB. 16M memory limitations.
153*4882a593Smuzhiyun For the ISA bus master mode all structures used directly by the LANCE,
154*4882a593Smuzhiyun the initialization block, Rx and Tx rings, and data buffers, must be
155*4882a593Smuzhiyun accessible from the ISA bus, i.e. in the lower 16M of real memory.
156*4882a593Smuzhiyun This is a problem for current Linux kernels on >16M machines. The network
157*4882a593Smuzhiyun devices are initialized after memory initialization, and the kernel doles out
158*4882a593Smuzhiyun memory from the top of memory downward.	 The current solution is to have a
159*4882a593Smuzhiyun special network initialization routine that's called before memory
160*4882a593Smuzhiyun initialization; this will eventually be generalized for all network devices.
161*4882a593Smuzhiyun As mentioned before, low-memory "bounce-buffers" are used when needed.
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun IIIC. Synchronization
164*4882a593Smuzhiyun The driver runs as two independent, single-threaded flows of control.  One
165*4882a593Smuzhiyun is the send-packet routine, which enforces single-threaded use by the
166*4882a593Smuzhiyun dev->tbusy flag.  The other thread is the interrupt handler, which is single
167*4882a593Smuzhiyun threaded by the hardware and other software.
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun The send packet thread has partial control over the Tx ring and 'dev->tbusy'
170*4882a593Smuzhiyun flag.  It sets the tbusy flag whenever it's queuing a Tx packet. If the next
171*4882a593Smuzhiyun queue slot is empty, it clears the tbusy flag when finished otherwise it sets
172*4882a593Smuzhiyun the 'lp->tx_full' flag.
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun The interrupt handler has exclusive control over the Rx ring and records stats
175*4882a593Smuzhiyun from the Tx ring. (The Tx-done interrupt can't be selectively turned off, so
176*4882a593Smuzhiyun we can't avoid the interrupt overhead by having the Tx routine reap the Tx
177*4882a593Smuzhiyun stats.)	 After reaping the stats, it marks the queue entry as empty by setting
178*4882a593Smuzhiyun the 'base' to zero. Iff the 'lp->tx_full' flag is set, it clears both the
179*4882a593Smuzhiyun tx_full and tbusy flags.
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun */
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun /* Set the number of Tx and Rx buffers, using Log_2(# buffers).
184*4882a593Smuzhiyun    Reasonable default values are 16 Tx buffers, and 16 Rx buffers.
185*4882a593Smuzhiyun    That translates to 4 and 4 (16 == 2^^4).
186*4882a593Smuzhiyun    This is a compile-time option for efficiency.
187*4882a593Smuzhiyun    */
188*4882a593Smuzhiyun #ifndef LANCE_LOG_TX_BUFFERS
189*4882a593Smuzhiyun #define LANCE_LOG_TX_BUFFERS 4
190*4882a593Smuzhiyun #define LANCE_LOG_RX_BUFFERS 4
191*4882a593Smuzhiyun #endif
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun #define TX_RING_SIZE			(1 << (LANCE_LOG_TX_BUFFERS))
194*4882a593Smuzhiyun #define TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
195*4882a593Smuzhiyun #define TX_RING_LEN_BITS		((LANCE_LOG_TX_BUFFERS) << 29)
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #define RX_RING_SIZE			(1 << (LANCE_LOG_RX_BUFFERS))
198*4882a593Smuzhiyun #define RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
199*4882a593Smuzhiyun #define RX_RING_LEN_BITS		((LANCE_LOG_RX_BUFFERS) << 29)
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #define PKT_BUF_SZ		1544
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun /* Offsets from base I/O address. */
204*4882a593Smuzhiyun #define LANCE_DATA 0x10
205*4882a593Smuzhiyun #define LANCE_ADDR 0x12
206*4882a593Smuzhiyun #define LANCE_RESET 0x14
207*4882a593Smuzhiyun #define LANCE_BUS_IF 0x16
208*4882a593Smuzhiyun #define LANCE_TOTAL_SIZE 0x18
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun #define TX_TIMEOUT	(HZ/5)
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun /* The LANCE Rx and Tx ring descriptors. */
213*4882a593Smuzhiyun struct lance_rx_head {
214*4882a593Smuzhiyun 	s32 base;
215*4882a593Smuzhiyun 	s16 buf_length;			/* This length is 2s complement (negative)! */
216*4882a593Smuzhiyun 	s16 msg_length;			/* This length is "normal". */
217*4882a593Smuzhiyun };
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun struct lance_tx_head {
220*4882a593Smuzhiyun 	s32 base;
221*4882a593Smuzhiyun 	s16 length;				/* Length is 2s complement (negative)! */
222*4882a593Smuzhiyun 	s16 misc;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun /* The LANCE initialization block, described in databook. */
226*4882a593Smuzhiyun struct lance_init_block {
227*4882a593Smuzhiyun 	u16 mode;		/* Pre-set mode (reg. 15) */
228*4882a593Smuzhiyun 	u8  phys_addr[6]; /* Physical ethernet address */
229*4882a593Smuzhiyun 	u32 filter[2];			/* Multicast filter (unused). */
230*4882a593Smuzhiyun 	/* Receive and transmit ring base, along with extra bits. */
231*4882a593Smuzhiyun 	u32  rx_ring;			/* Tx and Rx ring base pointers */
232*4882a593Smuzhiyun 	u32  tx_ring;
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct lance_private {
236*4882a593Smuzhiyun 	/* The Tx and Rx ring entries must be aligned on 8-byte boundaries. */
237*4882a593Smuzhiyun 	struct lance_rx_head rx_ring[RX_RING_SIZE];
238*4882a593Smuzhiyun 	struct lance_tx_head tx_ring[TX_RING_SIZE];
239*4882a593Smuzhiyun 	struct lance_init_block	init_block;
240*4882a593Smuzhiyun 	const char *name;
241*4882a593Smuzhiyun 	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
242*4882a593Smuzhiyun 	struct sk_buff* tx_skbuff[TX_RING_SIZE];
243*4882a593Smuzhiyun 	/* The addresses of receive-in-place skbuffs. */
244*4882a593Smuzhiyun 	struct sk_buff* rx_skbuff[RX_RING_SIZE];
245*4882a593Smuzhiyun 	unsigned long rx_buffs;		/* Address of Rx and Tx buffers. */
246*4882a593Smuzhiyun 	/* Tx low-memory "bounce buffer" address. */
247*4882a593Smuzhiyun 	char (*tx_bounce_buffs)[PKT_BUF_SZ];
248*4882a593Smuzhiyun 	int cur_rx, cur_tx;			/* The next free ring entry */
249*4882a593Smuzhiyun 	int dirty_rx, dirty_tx;		/* The ring entries to be free()ed. */
250*4882a593Smuzhiyun 	int dma;
251*4882a593Smuzhiyun 	unsigned char chip_version;	/* See lance_chip_type. */
252*4882a593Smuzhiyun 	spinlock_t devlock;
253*4882a593Smuzhiyun };
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun #define LANCE_MUST_PAD          0x00000001
256*4882a593Smuzhiyun #define LANCE_ENABLE_AUTOSELECT 0x00000002
257*4882a593Smuzhiyun #define LANCE_MUST_REINIT_RING  0x00000004
258*4882a593Smuzhiyun #define LANCE_MUST_UNRESET      0x00000008
259*4882a593Smuzhiyun #define LANCE_HAS_MISSED_FRAME  0x00000010
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun /* A mapping from the chip ID number to the part number and features.
262*4882a593Smuzhiyun    These are from the datasheets -- in real life the '970 version
263*4882a593Smuzhiyun    reportedly has the same ID as the '965. */
264*4882a593Smuzhiyun static struct lance_chip_type {
265*4882a593Smuzhiyun 	int id_number;
266*4882a593Smuzhiyun 	const char *name;
267*4882a593Smuzhiyun 	int flags;
268*4882a593Smuzhiyun } chip_table[] = {
269*4882a593Smuzhiyun 	{0x0000, "LANCE 7990",				/* Ancient lance chip.  */
270*4882a593Smuzhiyun 		LANCE_MUST_PAD + LANCE_MUST_UNRESET},
271*4882a593Smuzhiyun 	{0x0003, "PCnet/ISA 79C960",		/* 79C960 PCnet/ISA.  */
272*4882a593Smuzhiyun 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
273*4882a593Smuzhiyun 			LANCE_HAS_MISSED_FRAME},
274*4882a593Smuzhiyun 	{0x2260, "PCnet/ISA+ 79C961",		/* 79C961 PCnet/ISA+, Plug-n-Play.  */
275*4882a593Smuzhiyun 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
276*4882a593Smuzhiyun 			LANCE_HAS_MISSED_FRAME},
277*4882a593Smuzhiyun 	{0x2420, "PCnet/PCI 79C970",		/* 79C970 or 79C974 PCnet-SCSI, PCI. */
278*4882a593Smuzhiyun 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
279*4882a593Smuzhiyun 			LANCE_HAS_MISSED_FRAME},
280*4882a593Smuzhiyun 	/* Bug: the PCnet/PCI actually uses the PCnet/VLB ID number, so just call
281*4882a593Smuzhiyun 		it the PCnet32. */
282*4882a593Smuzhiyun 	{0x2430, "PCnet32",					/* 79C965 PCnet for VL bus. */
283*4882a593Smuzhiyun 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
284*4882a593Smuzhiyun 			LANCE_HAS_MISSED_FRAME},
285*4882a593Smuzhiyun         {0x2621, "PCnet/PCI-II 79C970A",        /* 79C970A PCInetPCI II. */
286*4882a593Smuzhiyun                 LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
287*4882a593Smuzhiyun                         LANCE_HAS_MISSED_FRAME},
288*4882a593Smuzhiyun 	{0x0, 	 "PCnet (unknown)",
289*4882a593Smuzhiyun 		LANCE_ENABLE_AUTOSELECT + LANCE_MUST_REINIT_RING +
290*4882a593Smuzhiyun 			LANCE_HAS_MISSED_FRAME},
291*4882a593Smuzhiyun };
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun enum {OLD_LANCE = 0, PCNET_ISA=1, PCNET_ISAP=2, PCNET_PCI=3, PCNET_VLB=4, PCNET_PCI_II=5, LANCE_UNKNOWN=6};
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun /* Non-zero if lance_probe1() needs to allocate low-memory bounce buffers.
297*4882a593Smuzhiyun    Assume yes until we know the memory size. */
298*4882a593Smuzhiyun static unsigned char lance_need_isa_bounce_buffers = 1;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun static int lance_open(struct net_device *dev);
301*4882a593Smuzhiyun static void lance_init_ring(struct net_device *dev, gfp_t mode);
302*4882a593Smuzhiyun static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
303*4882a593Smuzhiyun 				    struct net_device *dev);
304*4882a593Smuzhiyun static int lance_rx(struct net_device *dev);
305*4882a593Smuzhiyun static irqreturn_t lance_interrupt(int irq, void *dev_id);
306*4882a593Smuzhiyun static int lance_close(struct net_device *dev);
307*4882a593Smuzhiyun static struct net_device_stats *lance_get_stats(struct net_device *dev);
308*4882a593Smuzhiyun static void set_multicast_list(struct net_device *dev);
309*4882a593Smuzhiyun static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun #ifdef MODULE
314*4882a593Smuzhiyun #define MAX_CARDS		8	/* Max number of interfaces (cards) per module */
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun static struct net_device *dev_lance[MAX_CARDS];
317*4882a593Smuzhiyun static int io[MAX_CARDS];
318*4882a593Smuzhiyun static int dma[MAX_CARDS];
319*4882a593Smuzhiyun static int irq[MAX_CARDS];
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun module_param_hw_array(io, int, ioport, NULL, 0);
322*4882a593Smuzhiyun module_param_hw_array(dma, int, dma, NULL, 0);
323*4882a593Smuzhiyun module_param_hw_array(irq, int, irq, NULL, 0);
324*4882a593Smuzhiyun module_param(lance_debug, int, 0);
325*4882a593Smuzhiyun MODULE_PARM_DESC(io, "LANCE/PCnet I/O base address(es),required");
326*4882a593Smuzhiyun MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)");
327*4882a593Smuzhiyun MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)");
328*4882a593Smuzhiyun MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)");
329*4882a593Smuzhiyun 
init_module(void)330*4882a593Smuzhiyun int __init init_module(void)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun 	struct net_device *dev;
333*4882a593Smuzhiyun 	int this_dev, found = 0;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
336*4882a593Smuzhiyun 		if (io[this_dev] == 0)  {
337*4882a593Smuzhiyun 			if (this_dev != 0) /* only complain once */
338*4882a593Smuzhiyun 				break;
339*4882a593Smuzhiyun 			printk(KERN_NOTICE "lance.c: Module autoprobing not allowed. Append \"io=0xNNN\" value(s).\n");
340*4882a593Smuzhiyun 			return -EPERM;
341*4882a593Smuzhiyun 		}
342*4882a593Smuzhiyun 		dev = alloc_etherdev(0);
343*4882a593Smuzhiyun 		if (!dev)
344*4882a593Smuzhiyun 			break;
345*4882a593Smuzhiyun 		dev->irq = irq[this_dev];
346*4882a593Smuzhiyun 		dev->base_addr = io[this_dev];
347*4882a593Smuzhiyun 		dev->dma = dma[this_dev];
348*4882a593Smuzhiyun 		if (do_lance_probe(dev) == 0) {
349*4882a593Smuzhiyun 			dev_lance[found++] = dev;
350*4882a593Smuzhiyun 			continue;
351*4882a593Smuzhiyun 		}
352*4882a593Smuzhiyun 		free_netdev(dev);
353*4882a593Smuzhiyun 		break;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 	if (found != 0)
356*4882a593Smuzhiyun 		return 0;
357*4882a593Smuzhiyun 	return -ENXIO;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun 
cleanup_card(struct net_device * dev)360*4882a593Smuzhiyun static void cleanup_card(struct net_device *dev)
361*4882a593Smuzhiyun {
362*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
363*4882a593Smuzhiyun 	if (dev->dma != 4)
364*4882a593Smuzhiyun 		free_dma(dev->dma);
365*4882a593Smuzhiyun 	release_region(dev->base_addr, LANCE_TOTAL_SIZE);
366*4882a593Smuzhiyun 	kfree(lp->tx_bounce_buffs);
367*4882a593Smuzhiyun 	kfree((void*)lp->rx_buffs);
368*4882a593Smuzhiyun 	kfree(lp);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun 
cleanup_module(void)371*4882a593Smuzhiyun void __exit cleanup_module(void)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun 	int this_dev;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	for (this_dev = 0; this_dev < MAX_CARDS; this_dev++) {
376*4882a593Smuzhiyun 		struct net_device *dev = dev_lance[this_dev];
377*4882a593Smuzhiyun 		if (dev) {
378*4882a593Smuzhiyun 			unregister_netdev(dev);
379*4882a593Smuzhiyun 			cleanup_card(dev);
380*4882a593Smuzhiyun 			free_netdev(dev);
381*4882a593Smuzhiyun 		}
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun #endif /* MODULE */
385*4882a593Smuzhiyun MODULE_LICENSE("GPL");
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun /* Starting in v2.1.*, the LANCE/PCnet probe is now similar to the other
389*4882a593Smuzhiyun    board probes now that kmalloc() can allocate ISA DMA-able regions.
390*4882a593Smuzhiyun    This also allows the LANCE driver to be used as a module.
391*4882a593Smuzhiyun    */
do_lance_probe(struct net_device * dev)392*4882a593Smuzhiyun static int __init do_lance_probe(struct net_device *dev)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun 	unsigned int *port;
395*4882a593Smuzhiyun 	int result;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (high_memory <= phys_to_virt(16*1024*1024))
398*4882a593Smuzhiyun 		lance_need_isa_bounce_buffers = 0;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	for (port = lance_portlist; *port; port++) {
401*4882a593Smuzhiyun 		int ioaddr = *port;
402*4882a593Smuzhiyun 		struct resource *r = request_region(ioaddr, LANCE_TOTAL_SIZE,
403*4882a593Smuzhiyun 							"lance-probe");
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 		if (r) {
406*4882a593Smuzhiyun 			/* Detect the card with minimal I/O reads */
407*4882a593Smuzhiyun 			char offset14 = inb(ioaddr + 14);
408*4882a593Smuzhiyun 			int card;
409*4882a593Smuzhiyun 			for (card = 0; card < NUM_CARDS; ++card)
410*4882a593Smuzhiyun 				if (cards[card].id_offset14 == offset14)
411*4882a593Smuzhiyun 					break;
412*4882a593Smuzhiyun 			if (card < NUM_CARDS) {/*yes, the first byte matches*/
413*4882a593Smuzhiyun 				char offset15 = inb(ioaddr + 15);
414*4882a593Smuzhiyun 				for (card = 0; card < NUM_CARDS; ++card)
415*4882a593Smuzhiyun 					if ((cards[card].id_offset14 == offset14) &&
416*4882a593Smuzhiyun 						(cards[card].id_offset15 == offset15))
417*4882a593Smuzhiyun 						break;
418*4882a593Smuzhiyun 			}
419*4882a593Smuzhiyun 			if (card < NUM_CARDS) { /*Signature OK*/
420*4882a593Smuzhiyun 				result = lance_probe1(dev, ioaddr, 0, 0);
421*4882a593Smuzhiyun 				if (!result) {
422*4882a593Smuzhiyun 					struct lance_private *lp = dev->ml_priv;
423*4882a593Smuzhiyun 					int ver = lp->chip_version;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 					r->name = chip_table[ver].name;
426*4882a593Smuzhiyun 					return 0;
427*4882a593Smuzhiyun 				}
428*4882a593Smuzhiyun 			}
429*4882a593Smuzhiyun 			release_region(ioaddr, LANCE_TOTAL_SIZE);
430*4882a593Smuzhiyun 		}
431*4882a593Smuzhiyun 	}
432*4882a593Smuzhiyun 	return -ENODEV;
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun #ifndef MODULE
lance_probe(int unit)436*4882a593Smuzhiyun struct net_device * __init lance_probe(int unit)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct net_device *dev = alloc_etherdev(0);
439*4882a593Smuzhiyun 	int err;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	if (!dev)
442*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	sprintf(dev->name, "eth%d", unit);
445*4882a593Smuzhiyun 	netdev_boot_setup_check(dev);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	err = do_lance_probe(dev);
448*4882a593Smuzhiyun 	if (err)
449*4882a593Smuzhiyun 		goto out;
450*4882a593Smuzhiyun 	return dev;
451*4882a593Smuzhiyun out:
452*4882a593Smuzhiyun 	free_netdev(dev);
453*4882a593Smuzhiyun 	return ERR_PTR(err);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun #endif
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun static const struct net_device_ops lance_netdev_ops = {
458*4882a593Smuzhiyun 	.ndo_open 		= lance_open,
459*4882a593Smuzhiyun 	.ndo_start_xmit		= lance_start_xmit,
460*4882a593Smuzhiyun 	.ndo_stop		= lance_close,
461*4882a593Smuzhiyun 	.ndo_get_stats		= lance_get_stats,
462*4882a593Smuzhiyun 	.ndo_set_rx_mode	= set_multicast_list,
463*4882a593Smuzhiyun 	.ndo_tx_timeout		= lance_tx_timeout,
464*4882a593Smuzhiyun 	.ndo_set_mac_address 	= eth_mac_addr,
465*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
466*4882a593Smuzhiyun };
467*4882a593Smuzhiyun 
lance_probe1(struct net_device * dev,int ioaddr,int irq,int options)468*4882a593Smuzhiyun static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun 	struct lance_private *lp;
471*4882a593Smuzhiyun 	unsigned long dma_channels;	/* Mark spuriously-busy DMA channels */
472*4882a593Smuzhiyun 	int i, reset_val, lance_version;
473*4882a593Smuzhiyun 	const char *chipname;
474*4882a593Smuzhiyun 	/* Flags for specific chips or boards. */
475*4882a593Smuzhiyun 	unsigned char hpJ2405A = 0;	/* HP ISA adaptor */
476*4882a593Smuzhiyun 	int hp_builtin = 0;		/* HP on-board ethernet. */
477*4882a593Smuzhiyun 	static int did_version;		/* Already printed version info. */
478*4882a593Smuzhiyun 	unsigned long flags;
479*4882a593Smuzhiyun 	int err = -ENOMEM;
480*4882a593Smuzhiyun 	void __iomem *bios;
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* First we look for special cases.
483*4882a593Smuzhiyun 	   Check for HP's on-board ethernet by looking for 'HP' in the BIOS.
484*4882a593Smuzhiyun 	   There are two HP versions, check the BIOS for the configuration port.
485*4882a593Smuzhiyun 	   This method provided by L. Julliard, Laurent_Julliard@grenoble.hp.com.
486*4882a593Smuzhiyun 	   */
487*4882a593Smuzhiyun 	bios = ioremap(0xf00f0, 0x14);
488*4882a593Smuzhiyun 	if (!bios)
489*4882a593Smuzhiyun 		return -ENOMEM;
490*4882a593Smuzhiyun 	if (readw(bios + 0x12) == 0x5048)  {
491*4882a593Smuzhiyun 		static const short ioaddr_table[] = { 0x300, 0x320, 0x340, 0x360};
492*4882a593Smuzhiyun 		int hp_port = (readl(bios + 1) & 1)  ? 0x499 : 0x99;
493*4882a593Smuzhiyun 		/* We can have boards other than the built-in!  Verify this is on-board. */
494*4882a593Smuzhiyun 		if ((inb(hp_port) & 0xc0) == 0x80 &&
495*4882a593Smuzhiyun 		    ioaddr_table[inb(hp_port) & 3] == ioaddr)
496*4882a593Smuzhiyun 			hp_builtin = hp_port;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 	iounmap(bios);
499*4882a593Smuzhiyun 	/* We also recognize the HP Vectra on-board here, but check below. */
500*4882a593Smuzhiyun 	hpJ2405A = (inb(ioaddr) == 0x08 && inb(ioaddr+1) == 0x00 &&
501*4882a593Smuzhiyun 		    inb(ioaddr+2) == 0x09);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* Reset the LANCE.	 */
504*4882a593Smuzhiyun 	reset_val = inw(ioaddr+LANCE_RESET); /* Reset the LANCE */
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* The Un-Reset needed is only needed for the real NE2100, and will
507*4882a593Smuzhiyun 	   confuse the HP board. */
508*4882a593Smuzhiyun 	if (!hpJ2405A)
509*4882a593Smuzhiyun 		outw(reset_val, ioaddr+LANCE_RESET);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	outw(0x0000, ioaddr+LANCE_ADDR); /* Switch to window 0 */
512*4882a593Smuzhiyun 	if (inw(ioaddr+LANCE_DATA) != 0x0004)
513*4882a593Smuzhiyun 		return -ENODEV;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	/* Get the version of the chip. */
516*4882a593Smuzhiyun 	outw(88, ioaddr+LANCE_ADDR);
517*4882a593Smuzhiyun 	if (inw(ioaddr+LANCE_ADDR) != 88) {
518*4882a593Smuzhiyun 		lance_version = 0;
519*4882a593Smuzhiyun 	} else {			/* Good, it's a newer chip. */
520*4882a593Smuzhiyun 		int chip_version = inw(ioaddr+LANCE_DATA);
521*4882a593Smuzhiyun 		outw(89, ioaddr+LANCE_ADDR);
522*4882a593Smuzhiyun 		chip_version |= inw(ioaddr+LANCE_DATA) << 16;
523*4882a593Smuzhiyun 		if (lance_debug > 2)
524*4882a593Smuzhiyun 			printk("  LANCE chip version is %#x.\n", chip_version);
525*4882a593Smuzhiyun 		if ((chip_version & 0xfff) != 0x003)
526*4882a593Smuzhiyun 			return -ENODEV;
527*4882a593Smuzhiyun 		chip_version = (chip_version >> 12) & 0xffff;
528*4882a593Smuzhiyun 		for (lance_version = 1; chip_table[lance_version].id_number; lance_version++) {
529*4882a593Smuzhiyun 			if (chip_table[lance_version].id_number == chip_version)
530*4882a593Smuzhiyun 				break;
531*4882a593Smuzhiyun 		}
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	/* We can't allocate private data from alloc_etherdev() because it must
535*4882a593Smuzhiyun 	   a ISA DMA-able region. */
536*4882a593Smuzhiyun 	chipname = chip_table[lance_version].name;
537*4882a593Smuzhiyun 	printk("%s: %s at %#3x, ", dev->name, chipname, ioaddr);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	/* There is a 16 byte station address PROM at the base address.
540*4882a593Smuzhiyun 	   The first six bytes are the station address. */
541*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
542*4882a593Smuzhiyun 		dev->dev_addr[i] = inb(ioaddr + i);
543*4882a593Smuzhiyun 	printk("%pM", dev->dev_addr);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	dev->base_addr = ioaddr;
546*4882a593Smuzhiyun 	/* Make certain the data structures used by the LANCE are aligned and DMAble. */
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
549*4882a593Smuzhiyun 	if (!lp)
550*4882a593Smuzhiyun 		return -ENOMEM;
551*4882a593Smuzhiyun 	if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
552*4882a593Smuzhiyun 	dev->ml_priv = lp;
553*4882a593Smuzhiyun 	lp->name = chipname;
554*4882a593Smuzhiyun 	lp->rx_buffs = (unsigned long)kmalloc_array(RX_RING_SIZE, PKT_BUF_SZ,
555*4882a593Smuzhiyun 						    GFP_DMA | GFP_KERNEL);
556*4882a593Smuzhiyun 	if (!lp->rx_buffs)
557*4882a593Smuzhiyun 		goto out_lp;
558*4882a593Smuzhiyun 	if (lance_need_isa_bounce_buffers) {
559*4882a593Smuzhiyun 		lp->tx_bounce_buffs = kmalloc_array(TX_RING_SIZE, PKT_BUF_SZ,
560*4882a593Smuzhiyun 						    GFP_DMA | GFP_KERNEL);
561*4882a593Smuzhiyun 		if (!lp->tx_bounce_buffs)
562*4882a593Smuzhiyun 			goto out_rx;
563*4882a593Smuzhiyun 	} else
564*4882a593Smuzhiyun 		lp->tx_bounce_buffs = NULL;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	lp->chip_version = lance_version;
567*4882a593Smuzhiyun 	spin_lock_init(&lp->devlock);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	lp->init_block.mode = 0x0003;		/* Disable Rx and Tx. */
570*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
571*4882a593Smuzhiyun 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
572*4882a593Smuzhiyun 	lp->init_block.filter[0] = 0x00000000;
573*4882a593Smuzhiyun 	lp->init_block.filter[1] = 0x00000000;
574*4882a593Smuzhiyun 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
575*4882a593Smuzhiyun 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	outw(0x0001, ioaddr+LANCE_ADDR);
578*4882a593Smuzhiyun 	inw(ioaddr+LANCE_ADDR);
579*4882a593Smuzhiyun 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
580*4882a593Smuzhiyun 	outw(0x0002, ioaddr+LANCE_ADDR);
581*4882a593Smuzhiyun 	inw(ioaddr+LANCE_ADDR);
582*4882a593Smuzhiyun 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
583*4882a593Smuzhiyun 	outw(0x0000, ioaddr+LANCE_ADDR);
584*4882a593Smuzhiyun 	inw(ioaddr+LANCE_ADDR);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (irq) {					/* Set iff PCI card. */
587*4882a593Smuzhiyun 		dev->dma = 4;			/* Native bus-master, no DMA channel needed. */
588*4882a593Smuzhiyun 		dev->irq = irq;
589*4882a593Smuzhiyun 	} else if (hp_builtin) {
590*4882a593Smuzhiyun 		static const char dma_tbl[4] = {3, 5, 6, 0};
591*4882a593Smuzhiyun 		static const char irq_tbl[4] = {3, 4, 5, 9};
592*4882a593Smuzhiyun 		unsigned char port_val = inb(hp_builtin);
593*4882a593Smuzhiyun 		dev->dma = dma_tbl[(port_val >> 4) & 3];
594*4882a593Smuzhiyun 		dev->irq = irq_tbl[(port_val >> 2) & 3];
595*4882a593Smuzhiyun 		printk(" HP Vectra IRQ %d DMA %d.\n", dev->irq, dev->dma);
596*4882a593Smuzhiyun 	} else if (hpJ2405A) {
597*4882a593Smuzhiyun 		static const char dma_tbl[4] = {3, 5, 6, 7};
598*4882a593Smuzhiyun 		static const char irq_tbl[8] = {3, 4, 5, 9, 10, 11, 12, 15};
599*4882a593Smuzhiyun 		short reset_val = inw(ioaddr+LANCE_RESET);
600*4882a593Smuzhiyun 		dev->dma = dma_tbl[(reset_val >> 2) & 3];
601*4882a593Smuzhiyun 		dev->irq = irq_tbl[(reset_val >> 4) & 7];
602*4882a593Smuzhiyun 		printk(" HP J2405A IRQ %d DMA %d.\n", dev->irq, dev->dma);
603*4882a593Smuzhiyun 	} else if (lance_version == PCNET_ISAP) {		/* The plug-n-play version. */
604*4882a593Smuzhiyun 		short bus_info;
605*4882a593Smuzhiyun 		outw(8, ioaddr+LANCE_ADDR);
606*4882a593Smuzhiyun 		bus_info = inw(ioaddr+LANCE_BUS_IF);
607*4882a593Smuzhiyun 		dev->dma = bus_info & 0x07;
608*4882a593Smuzhiyun 		dev->irq = (bus_info >> 4) & 0x0F;
609*4882a593Smuzhiyun 	} else {
610*4882a593Smuzhiyun 		/* The DMA channel may be passed in PARAM1. */
611*4882a593Smuzhiyun 		if (dev->mem_start & 0x07)
612*4882a593Smuzhiyun 			dev->dma = dev->mem_start & 0x07;
613*4882a593Smuzhiyun 	}
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	if (dev->dma == 0) {
616*4882a593Smuzhiyun 		/* Read the DMA channel status register, so that we can avoid
617*4882a593Smuzhiyun 		   stuck DMA channels in the DMA detection below. */
618*4882a593Smuzhiyun 		dma_channels = ((inb(DMA1_STAT_REG) >> 4) & 0x0f) |
619*4882a593Smuzhiyun 			(inb(DMA2_STAT_REG) & 0xf0);
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 	err = -ENODEV;
622*4882a593Smuzhiyun 	if (dev->irq >= 2)
623*4882a593Smuzhiyun 		printk(" assigned IRQ %d", dev->irq);
624*4882a593Smuzhiyun 	else if (lance_version != 0)  {	/* 7990 boards need DMA detection first. */
625*4882a593Smuzhiyun 		unsigned long irq_mask;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		/* To auto-IRQ we enable the initialization-done and DMA error
628*4882a593Smuzhiyun 		   interrupts. For ISA boards we get a DMA error, but VLB and PCI
629*4882a593Smuzhiyun 		   boards will work. */
630*4882a593Smuzhiyun 		irq_mask = probe_irq_on();
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		/* Trigger an initialization just for the interrupt. */
633*4882a593Smuzhiyun 		outw(0x0041, ioaddr+LANCE_DATA);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		mdelay(20);
636*4882a593Smuzhiyun 		dev->irq = probe_irq_off(irq_mask);
637*4882a593Smuzhiyun 		if (dev->irq)
638*4882a593Smuzhiyun 			printk(", probed IRQ %d", dev->irq);
639*4882a593Smuzhiyun 		else {
640*4882a593Smuzhiyun 			printk(", failed to detect IRQ line.\n");
641*4882a593Smuzhiyun 			goto out_tx;
642*4882a593Smuzhiyun 		}
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 		/* Check for the initialization done bit, 0x0100, which means
645*4882a593Smuzhiyun 		   that we don't need a DMA channel. */
646*4882a593Smuzhiyun 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
647*4882a593Smuzhiyun 			dev->dma = 4;
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	if (dev->dma == 4) {
651*4882a593Smuzhiyun 		printk(", no DMA needed.\n");
652*4882a593Smuzhiyun 	} else if (dev->dma) {
653*4882a593Smuzhiyun 		if (request_dma(dev->dma, chipname)) {
654*4882a593Smuzhiyun 			printk("DMA %d allocation failed.\n", dev->dma);
655*4882a593Smuzhiyun 			goto out_tx;
656*4882a593Smuzhiyun 		} else
657*4882a593Smuzhiyun 			printk(", assigned DMA %d.\n", dev->dma);
658*4882a593Smuzhiyun 	} else {			/* OK, we have to auto-DMA. */
659*4882a593Smuzhiyun 		for (i = 0; i < 4; i++) {
660*4882a593Smuzhiyun 			static const char dmas[] = { 5, 6, 7, 3 };
661*4882a593Smuzhiyun 			int dma = dmas[i];
662*4882a593Smuzhiyun 			int boguscnt;
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 			/* Don't enable a permanently busy DMA channel, or the machine
665*4882a593Smuzhiyun 			   will hang. */
666*4882a593Smuzhiyun 			if (test_bit(dma, &dma_channels))
667*4882a593Smuzhiyun 				continue;
668*4882a593Smuzhiyun 			outw(0x7f04, ioaddr+LANCE_DATA); /* Clear the memory error bits. */
669*4882a593Smuzhiyun 			if (request_dma(dma, chipname))
670*4882a593Smuzhiyun 				continue;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 			flags=claim_dma_lock();
673*4882a593Smuzhiyun 			set_dma_mode(dma, DMA_MODE_CASCADE);
674*4882a593Smuzhiyun 			enable_dma(dma);
675*4882a593Smuzhiyun 			release_dma_lock(flags);
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 			/* Trigger an initialization. */
678*4882a593Smuzhiyun 			outw(0x0001, ioaddr+LANCE_DATA);
679*4882a593Smuzhiyun 			for (boguscnt = 100; boguscnt > 0; --boguscnt)
680*4882a593Smuzhiyun 				if (inw(ioaddr+LANCE_DATA) & 0x0900)
681*4882a593Smuzhiyun 					break;
682*4882a593Smuzhiyun 			if (inw(ioaddr+LANCE_DATA) & 0x0100) {
683*4882a593Smuzhiyun 				dev->dma = dma;
684*4882a593Smuzhiyun 				printk(", DMA %d.\n", dev->dma);
685*4882a593Smuzhiyun 				break;
686*4882a593Smuzhiyun 			} else {
687*4882a593Smuzhiyun 				flags=claim_dma_lock();
688*4882a593Smuzhiyun 				disable_dma(dma);
689*4882a593Smuzhiyun 				release_dma_lock(flags);
690*4882a593Smuzhiyun 				free_dma(dma);
691*4882a593Smuzhiyun 			}
692*4882a593Smuzhiyun 		}
693*4882a593Smuzhiyun 		if (i == 4) {			/* Failure: bail. */
694*4882a593Smuzhiyun 			printk("DMA detection failed.\n");
695*4882a593Smuzhiyun 			goto out_tx;
696*4882a593Smuzhiyun 		}
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	if (lance_version == 0 && dev->irq == 0) {
700*4882a593Smuzhiyun 		/* We may auto-IRQ now that we have a DMA channel. */
701*4882a593Smuzhiyun 		/* Trigger an initialization just for the interrupt. */
702*4882a593Smuzhiyun 		unsigned long irq_mask;
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun 		irq_mask = probe_irq_on();
705*4882a593Smuzhiyun 		outw(0x0041, ioaddr+LANCE_DATA);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 		mdelay(40);
708*4882a593Smuzhiyun 		dev->irq = probe_irq_off(irq_mask);
709*4882a593Smuzhiyun 		if (dev->irq == 0) {
710*4882a593Smuzhiyun 			printk("  Failed to detect the 7990 IRQ line.\n");
711*4882a593Smuzhiyun 			goto out_dma;
712*4882a593Smuzhiyun 		}
713*4882a593Smuzhiyun 		printk("  Auto-IRQ detected IRQ%d.\n", dev->irq);
714*4882a593Smuzhiyun 	}
715*4882a593Smuzhiyun 
716*4882a593Smuzhiyun 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
717*4882a593Smuzhiyun 		/* Turn on auto-select of media (10baseT or BNC) so that the user
718*4882a593Smuzhiyun 		   can watch the LEDs even if the board isn't opened. */
719*4882a593Smuzhiyun 		outw(0x0002, ioaddr+LANCE_ADDR);
720*4882a593Smuzhiyun 		/* Don't touch 10base2 power bit. */
721*4882a593Smuzhiyun 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
722*4882a593Smuzhiyun 	}
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	if (lance_debug > 0  &&  did_version++ == 0)
725*4882a593Smuzhiyun 		printk(version);
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/* The LANCE-specific entries in the device structure. */
728*4882a593Smuzhiyun 	dev->netdev_ops = &lance_netdev_ops;
729*4882a593Smuzhiyun 	dev->watchdog_timeo = TX_TIMEOUT;
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	err = register_netdev(dev);
732*4882a593Smuzhiyun 	if (err)
733*4882a593Smuzhiyun 		goto out_dma;
734*4882a593Smuzhiyun 	return 0;
735*4882a593Smuzhiyun out_dma:
736*4882a593Smuzhiyun 	if (dev->dma != 4)
737*4882a593Smuzhiyun 		free_dma(dev->dma);
738*4882a593Smuzhiyun out_tx:
739*4882a593Smuzhiyun 	kfree(lp->tx_bounce_buffs);
740*4882a593Smuzhiyun out_rx:
741*4882a593Smuzhiyun 	kfree((void*)lp->rx_buffs);
742*4882a593Smuzhiyun out_lp:
743*4882a593Smuzhiyun 	kfree(lp);
744*4882a593Smuzhiyun 	return err;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun static int
lance_open(struct net_device * dev)749*4882a593Smuzhiyun lance_open(struct net_device *dev)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
752*4882a593Smuzhiyun 	int ioaddr = dev->base_addr;
753*4882a593Smuzhiyun 	int i;
754*4882a593Smuzhiyun 
755*4882a593Smuzhiyun 	if (dev->irq == 0 ||
756*4882a593Smuzhiyun 		request_irq(dev->irq, lance_interrupt, 0, dev->name, dev)) {
757*4882a593Smuzhiyun 		return -EAGAIN;
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	/* We used to allocate DMA here, but that was silly.
761*4882a593Smuzhiyun 	   DMA lines can't be shared!  We now permanently allocate them. */
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	/* Reset the LANCE */
764*4882a593Smuzhiyun 	inw(ioaddr+LANCE_RESET);
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	/* The DMA controller is used as a no-operation slave, "cascade mode". */
767*4882a593Smuzhiyun 	if (dev->dma != 4) {
768*4882a593Smuzhiyun 		unsigned long flags=claim_dma_lock();
769*4882a593Smuzhiyun 		enable_dma(dev->dma);
770*4882a593Smuzhiyun 		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
771*4882a593Smuzhiyun 		release_dma_lock(flags);
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	/* Un-Reset the LANCE, needed only for the NE2100. */
775*4882a593Smuzhiyun 	if (chip_table[lp->chip_version].flags & LANCE_MUST_UNRESET)
776*4882a593Smuzhiyun 		outw(0, ioaddr+LANCE_RESET);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	if (chip_table[lp->chip_version].flags & LANCE_ENABLE_AUTOSELECT) {
779*4882a593Smuzhiyun 		/* This is 79C960-specific: Turn on auto-select of media (AUI, BNC). */
780*4882a593Smuzhiyun 		outw(0x0002, ioaddr+LANCE_ADDR);
781*4882a593Smuzhiyun 		/* Only touch autoselect bit. */
782*4882a593Smuzhiyun 		outw(inw(ioaddr+LANCE_BUS_IF) | 0x0002, ioaddr+LANCE_BUS_IF);
783*4882a593Smuzhiyun  	}
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 	if (lance_debug > 1)
786*4882a593Smuzhiyun 		printk("%s: lance_open() irq %d dma %d tx/rx rings %#x/%#x init %#x.\n",
787*4882a593Smuzhiyun 			   dev->name, dev->irq, dev->dma,
788*4882a593Smuzhiyun 		           (u32) isa_virt_to_bus(lp->tx_ring),
789*4882a593Smuzhiyun 		           (u32) isa_virt_to_bus(lp->rx_ring),
790*4882a593Smuzhiyun 			   (u32) isa_virt_to_bus(&lp->init_block));
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	lance_init_ring(dev, GFP_KERNEL);
793*4882a593Smuzhiyun 	/* Re-initialize the LANCE, and start it when done. */
794*4882a593Smuzhiyun 	outw(0x0001, ioaddr+LANCE_ADDR);
795*4882a593Smuzhiyun 	outw((short) (u32) isa_virt_to_bus(&lp->init_block), ioaddr+LANCE_DATA);
796*4882a593Smuzhiyun 	outw(0x0002, ioaddr+LANCE_ADDR);
797*4882a593Smuzhiyun 	outw(((u32)isa_virt_to_bus(&lp->init_block)) >> 16, ioaddr+LANCE_DATA);
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	outw(0x0004, ioaddr+LANCE_ADDR);
800*4882a593Smuzhiyun 	outw(0x0915, ioaddr+LANCE_DATA);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	outw(0x0000, ioaddr+LANCE_ADDR);
803*4882a593Smuzhiyun 	outw(0x0001, ioaddr+LANCE_DATA);
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	netif_start_queue (dev);
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	i = 0;
808*4882a593Smuzhiyun 	while (i++ < 100)
809*4882a593Smuzhiyun 		if (inw(ioaddr+LANCE_DATA) & 0x0100)
810*4882a593Smuzhiyun 			break;
811*4882a593Smuzhiyun 	/*
812*4882a593Smuzhiyun 	 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
813*4882a593Smuzhiyun 	 * reports that doing so triggers a bug in the '974.
814*4882a593Smuzhiyun 	 */
815*4882a593Smuzhiyun  	outw(0x0042, ioaddr+LANCE_DATA);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	if (lance_debug > 2)
818*4882a593Smuzhiyun 		printk("%s: LANCE open after %d ticks, init block %#x csr0 %4.4x.\n",
819*4882a593Smuzhiyun 			   dev->name, i, (u32) isa_virt_to_bus(&lp->init_block), inw(ioaddr+LANCE_DATA));
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	return 0;					/* Always succeed */
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun /* The LANCE has been halted for one reason or another (busmaster memory
825*4882a593Smuzhiyun    arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
826*4882a593Smuzhiyun    etc.).  Modern LANCE variants always reload their ring-buffer
827*4882a593Smuzhiyun    configuration when restarted, so we must reinitialize our ring
828*4882a593Smuzhiyun    context before restarting.  As part of this reinitialization,
829*4882a593Smuzhiyun    find all packets still on the Tx ring and pretend that they had been
830*4882a593Smuzhiyun    sent (in effect, drop the packets on the floor) - the higher-level
831*4882a593Smuzhiyun    protocols will time out and retransmit.  It'd be better to shuffle
832*4882a593Smuzhiyun    these skbs to a temp list and then actually re-Tx them after
833*4882a593Smuzhiyun    restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
834*4882a593Smuzhiyun */
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun static void
lance_purge_ring(struct net_device * dev)837*4882a593Smuzhiyun lance_purge_ring(struct net_device *dev)
838*4882a593Smuzhiyun {
839*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
840*4882a593Smuzhiyun 	int i;
841*4882a593Smuzhiyun 
842*4882a593Smuzhiyun 	/* Free all the skbuffs in the Rx and Tx queues. */
843*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
844*4882a593Smuzhiyun 		struct sk_buff *skb = lp->rx_skbuff[i];
845*4882a593Smuzhiyun 		lp->rx_skbuff[i] = NULL;
846*4882a593Smuzhiyun 		lp->rx_ring[i].base = 0;		/* Not owned by LANCE chip. */
847*4882a593Smuzhiyun 		if (skb)
848*4882a593Smuzhiyun 			dev_kfree_skb_any(skb);
849*4882a593Smuzhiyun 	}
850*4882a593Smuzhiyun 	for (i = 0; i < TX_RING_SIZE; i++) {
851*4882a593Smuzhiyun 		if (lp->tx_skbuff[i]) {
852*4882a593Smuzhiyun 			dev_kfree_skb_any(lp->tx_skbuff[i]);
853*4882a593Smuzhiyun 			lp->tx_skbuff[i] = NULL;
854*4882a593Smuzhiyun 		}
855*4882a593Smuzhiyun 	}
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun /* Initialize the LANCE Rx and Tx rings. */
860*4882a593Smuzhiyun static void
lance_init_ring(struct net_device * dev,gfp_t gfp)861*4882a593Smuzhiyun lance_init_ring(struct net_device *dev, gfp_t gfp)
862*4882a593Smuzhiyun {
863*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
864*4882a593Smuzhiyun 	int i;
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	lp->cur_rx = lp->cur_tx = 0;
867*4882a593Smuzhiyun 	lp->dirty_rx = lp->dirty_tx = 0;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
870*4882a593Smuzhiyun 		struct sk_buff *skb;
871*4882a593Smuzhiyun 		void *rx_buff;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 		skb = alloc_skb(PKT_BUF_SZ, GFP_DMA | gfp);
874*4882a593Smuzhiyun 		lp->rx_skbuff[i] = skb;
875*4882a593Smuzhiyun 		if (skb)
876*4882a593Smuzhiyun 			rx_buff = skb->data;
877*4882a593Smuzhiyun 		else
878*4882a593Smuzhiyun 			rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
879*4882a593Smuzhiyun 		if (rx_buff == NULL)
880*4882a593Smuzhiyun 			lp->rx_ring[i].base = 0;
881*4882a593Smuzhiyun 		else
882*4882a593Smuzhiyun 			lp->rx_ring[i].base = (u32)isa_virt_to_bus(rx_buff) | 0x80000000;
883*4882a593Smuzhiyun 		lp->rx_ring[i].buf_length = -PKT_BUF_SZ;
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 	/* The Tx buffer address is filled in as needed, but we do need to clear
886*4882a593Smuzhiyun 	   the upper ownership bit. */
887*4882a593Smuzhiyun 	for (i = 0; i < TX_RING_SIZE; i++) {
888*4882a593Smuzhiyun 		lp->tx_skbuff[i] = NULL;
889*4882a593Smuzhiyun 		lp->tx_ring[i].base = 0;
890*4882a593Smuzhiyun 	}
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	lp->init_block.mode = 0x0000;
893*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
894*4882a593Smuzhiyun 		lp->init_block.phys_addr[i] = dev->dev_addr[i];
895*4882a593Smuzhiyun 	lp->init_block.filter[0] = 0x00000000;
896*4882a593Smuzhiyun 	lp->init_block.filter[1] = 0x00000000;
897*4882a593Smuzhiyun 	lp->init_block.rx_ring = ((u32)isa_virt_to_bus(lp->rx_ring) & 0xffffff) | RX_RING_LEN_BITS;
898*4882a593Smuzhiyun 	lp->init_block.tx_ring = ((u32)isa_virt_to_bus(lp->tx_ring) & 0xffffff) | TX_RING_LEN_BITS;
899*4882a593Smuzhiyun }
900*4882a593Smuzhiyun 
901*4882a593Smuzhiyun static void
lance_restart(struct net_device * dev,unsigned int csr0_bits,int must_reinit)902*4882a593Smuzhiyun lance_restart(struct net_device *dev, unsigned int csr0_bits, int must_reinit)
903*4882a593Smuzhiyun {
904*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	if (must_reinit ||
907*4882a593Smuzhiyun 		(chip_table[lp->chip_version].flags & LANCE_MUST_REINIT_RING)) {
908*4882a593Smuzhiyun 		lance_purge_ring(dev);
909*4882a593Smuzhiyun 		lance_init_ring(dev, GFP_ATOMIC);
910*4882a593Smuzhiyun 	}
911*4882a593Smuzhiyun 	outw(0x0000,    dev->base_addr + LANCE_ADDR);
912*4882a593Smuzhiyun 	outw(csr0_bits, dev->base_addr + LANCE_DATA);
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun 
915*4882a593Smuzhiyun 
lance_tx_timeout(struct net_device * dev,unsigned int txqueue)916*4882a593Smuzhiyun static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun 	struct lance_private *lp = (struct lance_private *) dev->ml_priv;
919*4882a593Smuzhiyun 	int ioaddr = dev->base_addr;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	outw (0, ioaddr + LANCE_ADDR);
922*4882a593Smuzhiyun 	printk ("%s: transmit timed out, status %4.4x, resetting.\n",
923*4882a593Smuzhiyun 		dev->name, inw (ioaddr + LANCE_DATA));
924*4882a593Smuzhiyun 	outw (0x0004, ioaddr + LANCE_DATA);
925*4882a593Smuzhiyun 	dev->stats.tx_errors++;
926*4882a593Smuzhiyun #ifndef final_version
927*4882a593Smuzhiyun 	if (lance_debug > 3) {
928*4882a593Smuzhiyun 		int i;
929*4882a593Smuzhiyun 		printk (" Ring data dump: dirty_tx %d cur_tx %d%s cur_rx %d.",
930*4882a593Smuzhiyun 		  lp->dirty_tx, lp->cur_tx, netif_queue_stopped(dev) ? " (full)" : "",
931*4882a593Smuzhiyun 			lp->cur_rx);
932*4882a593Smuzhiyun 		for (i = 0; i < RX_RING_SIZE; i++)
933*4882a593Smuzhiyun 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
934*4882a593Smuzhiyun 			 lp->rx_ring[i].base, -lp->rx_ring[i].buf_length,
935*4882a593Smuzhiyun 				lp->rx_ring[i].msg_length);
936*4882a593Smuzhiyun 		for (i = 0; i < TX_RING_SIZE; i++)
937*4882a593Smuzhiyun 			printk ("%s %08x %04x %04x", i & 0x3 ? "" : "\n ",
938*4882a593Smuzhiyun 			     lp->tx_ring[i].base, -lp->tx_ring[i].length,
939*4882a593Smuzhiyun 				lp->tx_ring[i].misc);
940*4882a593Smuzhiyun 		printk ("\n");
941*4882a593Smuzhiyun 	}
942*4882a593Smuzhiyun #endif
943*4882a593Smuzhiyun 	lance_restart (dev, 0x0043, 1);
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
946*4882a593Smuzhiyun 	netif_wake_queue (dev);
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)950*4882a593Smuzhiyun static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
951*4882a593Smuzhiyun 				    struct net_device *dev)
952*4882a593Smuzhiyun {
953*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
954*4882a593Smuzhiyun 	int ioaddr = dev->base_addr;
955*4882a593Smuzhiyun 	int entry;
956*4882a593Smuzhiyun 	unsigned long flags;
957*4882a593Smuzhiyun 
958*4882a593Smuzhiyun 	spin_lock_irqsave(&lp->devlock, flags);
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	if (lance_debug > 3) {
961*4882a593Smuzhiyun 		outw(0x0000, ioaddr+LANCE_ADDR);
962*4882a593Smuzhiyun 		printk("%s: lance_start_xmit() called, csr0 %4.4x.\n", dev->name,
963*4882a593Smuzhiyun 			   inw(ioaddr+LANCE_DATA));
964*4882a593Smuzhiyun 		outw(0x0000, ioaddr+LANCE_DATA);
965*4882a593Smuzhiyun 	}
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	/* Fill in a Tx ring entry */
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	/* Mask to ring buffer boundary. */
970*4882a593Smuzhiyun 	entry = lp->cur_tx & TX_RING_MOD_MASK;
971*4882a593Smuzhiyun 
972*4882a593Smuzhiyun 	/* Caution: the write order is important here, set the base address
973*4882a593Smuzhiyun 	   with the "ownership" bits last. */
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
976*4882a593Smuzhiyun 	if (chip_table[lp->chip_version].flags & LANCE_MUST_PAD) {
977*4882a593Smuzhiyun 		if (skb->len < ETH_ZLEN) {
978*4882a593Smuzhiyun 			if (skb_padto(skb, ETH_ZLEN))
979*4882a593Smuzhiyun 				goto out;
980*4882a593Smuzhiyun 			lp->tx_ring[entry].length = -ETH_ZLEN;
981*4882a593Smuzhiyun 		}
982*4882a593Smuzhiyun 		else
983*4882a593Smuzhiyun 			lp->tx_ring[entry].length = -skb->len;
984*4882a593Smuzhiyun 	} else
985*4882a593Smuzhiyun 		lp->tx_ring[entry].length = -skb->len;
986*4882a593Smuzhiyun 
987*4882a593Smuzhiyun 	lp->tx_ring[entry].misc = 0x0000;
988*4882a593Smuzhiyun 
989*4882a593Smuzhiyun 	dev->stats.tx_bytes += skb->len;
990*4882a593Smuzhiyun 
991*4882a593Smuzhiyun 	/* If any part of this buffer is >16M we must copy it to a low-memory
992*4882a593Smuzhiyun 	   buffer. */
993*4882a593Smuzhiyun 	if ((u32)isa_virt_to_bus(skb->data) + skb->len > 0x01000000) {
994*4882a593Smuzhiyun 		if (lance_debug > 5)
995*4882a593Smuzhiyun 			printk("%s: bouncing a high-memory packet (%#x).\n",
996*4882a593Smuzhiyun 				   dev->name, (u32)isa_virt_to_bus(skb->data));
997*4882a593Smuzhiyun 		skb_copy_from_linear_data(skb, &lp->tx_bounce_buffs[entry], skb->len);
998*4882a593Smuzhiyun 		lp->tx_ring[entry].base =
999*4882a593Smuzhiyun 			((u32)isa_virt_to_bus((lp->tx_bounce_buffs + entry)) & 0xffffff) | 0x83000000;
1000*4882a593Smuzhiyun 		dev_kfree_skb(skb);
1001*4882a593Smuzhiyun 	} else {
1002*4882a593Smuzhiyun 		lp->tx_skbuff[entry] = skb;
1003*4882a593Smuzhiyun 		lp->tx_ring[entry].base = ((u32)isa_virt_to_bus(skb->data) & 0xffffff) | 0x83000000;
1004*4882a593Smuzhiyun 	}
1005*4882a593Smuzhiyun 	lp->cur_tx++;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 	/* Trigger an immediate send poll. */
1008*4882a593Smuzhiyun 	outw(0x0000, ioaddr+LANCE_ADDR);
1009*4882a593Smuzhiyun 	outw(0x0048, ioaddr+LANCE_DATA);
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	if ((lp->cur_tx - lp->dirty_tx) >= TX_RING_SIZE)
1012*4882a593Smuzhiyun 		netif_stop_queue(dev);
1013*4882a593Smuzhiyun 
1014*4882a593Smuzhiyun out:
1015*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lp->devlock, flags);
1016*4882a593Smuzhiyun 	return NETDEV_TX_OK;
1017*4882a593Smuzhiyun }
1018*4882a593Smuzhiyun 
1019*4882a593Smuzhiyun /* The LANCE interrupt handler. */
lance_interrupt(int irq,void * dev_id)1020*4882a593Smuzhiyun static irqreturn_t lance_interrupt(int irq, void *dev_id)
1021*4882a593Smuzhiyun {
1022*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
1023*4882a593Smuzhiyun 	struct lance_private *lp;
1024*4882a593Smuzhiyun 	int csr0, ioaddr, boguscnt=10;
1025*4882a593Smuzhiyun 	int must_restart;
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun 	ioaddr = dev->base_addr;
1028*4882a593Smuzhiyun 	lp = dev->ml_priv;
1029*4882a593Smuzhiyun 
1030*4882a593Smuzhiyun 	spin_lock (&lp->devlock);
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	outw(0x00, dev->base_addr + LANCE_ADDR);
1033*4882a593Smuzhiyun 	while ((csr0 = inw(dev->base_addr + LANCE_DATA)) & 0x8600 &&
1034*4882a593Smuzhiyun 	       --boguscnt >= 0) {
1035*4882a593Smuzhiyun 		/* Acknowledge all of the current interrupt sources ASAP. */
1036*4882a593Smuzhiyun 		outw(csr0 & ~0x004f, dev->base_addr + LANCE_DATA);
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 		must_restart = 0;
1039*4882a593Smuzhiyun 
1040*4882a593Smuzhiyun 		if (lance_debug > 5)
1041*4882a593Smuzhiyun 			printk("%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
1042*4882a593Smuzhiyun 				   dev->name, csr0, inw(dev->base_addr + LANCE_DATA));
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 		if (csr0 & 0x0400)			/* Rx interrupt */
1045*4882a593Smuzhiyun 			lance_rx(dev);
1046*4882a593Smuzhiyun 
1047*4882a593Smuzhiyun 		if (csr0 & 0x0200) {		/* Tx-done interrupt */
1048*4882a593Smuzhiyun 			int dirty_tx = lp->dirty_tx;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 			while (dirty_tx < lp->cur_tx) {
1051*4882a593Smuzhiyun 				int entry = dirty_tx & TX_RING_MOD_MASK;
1052*4882a593Smuzhiyun 				int status = lp->tx_ring[entry].base;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 				if (status < 0)
1055*4882a593Smuzhiyun 					break;			/* It still hasn't been Txed */
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 				lp->tx_ring[entry].base = 0;
1058*4882a593Smuzhiyun 
1059*4882a593Smuzhiyun 				if (status & 0x40000000) {
1060*4882a593Smuzhiyun 					/* There was an major error, log it. */
1061*4882a593Smuzhiyun 					int err_status = lp->tx_ring[entry].misc;
1062*4882a593Smuzhiyun 					dev->stats.tx_errors++;
1063*4882a593Smuzhiyun 					if (err_status & 0x0400)
1064*4882a593Smuzhiyun 						dev->stats.tx_aborted_errors++;
1065*4882a593Smuzhiyun 					if (err_status & 0x0800)
1066*4882a593Smuzhiyun 						dev->stats.tx_carrier_errors++;
1067*4882a593Smuzhiyun 					if (err_status & 0x1000)
1068*4882a593Smuzhiyun 						dev->stats.tx_window_errors++;
1069*4882a593Smuzhiyun 					if (err_status & 0x4000) {
1070*4882a593Smuzhiyun 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
1071*4882a593Smuzhiyun 						dev->stats.tx_fifo_errors++;
1072*4882a593Smuzhiyun 						/* Remove this verbosity later! */
1073*4882a593Smuzhiyun 						printk("%s: Tx FIFO error! Status %4.4x.\n",
1074*4882a593Smuzhiyun 							   dev->name, csr0);
1075*4882a593Smuzhiyun 						/* Restart the chip. */
1076*4882a593Smuzhiyun 						must_restart = 1;
1077*4882a593Smuzhiyun 					}
1078*4882a593Smuzhiyun 				} else {
1079*4882a593Smuzhiyun 					if (status & 0x18000000)
1080*4882a593Smuzhiyun 						dev->stats.collisions++;
1081*4882a593Smuzhiyun 					dev->stats.tx_packets++;
1082*4882a593Smuzhiyun 				}
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 				/* We must free the original skb if it's not a data-only copy
1085*4882a593Smuzhiyun 				   in the bounce buffer. */
1086*4882a593Smuzhiyun 				if (lp->tx_skbuff[entry]) {
1087*4882a593Smuzhiyun 					dev_consume_skb_irq(lp->tx_skbuff[entry]);
1088*4882a593Smuzhiyun 					lp->tx_skbuff[entry] = NULL;
1089*4882a593Smuzhiyun 				}
1090*4882a593Smuzhiyun 				dirty_tx++;
1091*4882a593Smuzhiyun 			}
1092*4882a593Smuzhiyun 
1093*4882a593Smuzhiyun #ifndef final_version
1094*4882a593Smuzhiyun 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
1095*4882a593Smuzhiyun 				printk("out-of-sync dirty pointer, %d vs. %d, full=%s.\n",
1096*4882a593Smuzhiyun 					   dirty_tx, lp->cur_tx,
1097*4882a593Smuzhiyun 					   netif_queue_stopped(dev) ? "yes" : "no");
1098*4882a593Smuzhiyun 				dirty_tx += TX_RING_SIZE;
1099*4882a593Smuzhiyun 			}
1100*4882a593Smuzhiyun #endif
1101*4882a593Smuzhiyun 
1102*4882a593Smuzhiyun 			/* if the ring is no longer full, accept more packets */
1103*4882a593Smuzhiyun 			if (netif_queue_stopped(dev) &&
1104*4882a593Smuzhiyun 			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2)
1105*4882a593Smuzhiyun 				netif_wake_queue (dev);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 			lp->dirty_tx = dirty_tx;
1108*4882a593Smuzhiyun 		}
1109*4882a593Smuzhiyun 
1110*4882a593Smuzhiyun 		/* Log misc errors. */
1111*4882a593Smuzhiyun 		if (csr0 & 0x4000)
1112*4882a593Smuzhiyun 			dev->stats.tx_errors++; /* Tx babble. */
1113*4882a593Smuzhiyun 		if (csr0 & 0x1000)
1114*4882a593Smuzhiyun 			dev->stats.rx_errors++; /* Missed a Rx frame. */
1115*4882a593Smuzhiyun 		if (csr0 & 0x0800) {
1116*4882a593Smuzhiyun 			printk("%s: Bus master arbitration failure, status %4.4x.\n",
1117*4882a593Smuzhiyun 				   dev->name, csr0);
1118*4882a593Smuzhiyun 			/* Restart the chip. */
1119*4882a593Smuzhiyun 			must_restart = 1;
1120*4882a593Smuzhiyun 		}
1121*4882a593Smuzhiyun 
1122*4882a593Smuzhiyun 		if (must_restart) {
1123*4882a593Smuzhiyun 			/* stop the chip to clear the error condition, then restart */
1124*4882a593Smuzhiyun 			outw(0x0000, dev->base_addr + LANCE_ADDR);
1125*4882a593Smuzhiyun 			outw(0x0004, dev->base_addr + LANCE_DATA);
1126*4882a593Smuzhiyun 			lance_restart(dev, 0x0002, 0);
1127*4882a593Smuzhiyun 		}
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	/* Clear any other interrupt, and set interrupt enable. */
1131*4882a593Smuzhiyun 	outw(0x0000, dev->base_addr + LANCE_ADDR);
1132*4882a593Smuzhiyun 	outw(0x7940, dev->base_addr + LANCE_DATA);
1133*4882a593Smuzhiyun 
1134*4882a593Smuzhiyun 	if (lance_debug > 4)
1135*4882a593Smuzhiyun 		printk("%s: exiting interrupt, csr%d=%#4.4x.\n",
1136*4882a593Smuzhiyun 			   dev->name, inw(ioaddr + LANCE_ADDR),
1137*4882a593Smuzhiyun 			   inw(dev->base_addr + LANCE_DATA));
1138*4882a593Smuzhiyun 
1139*4882a593Smuzhiyun 	spin_unlock (&lp->devlock);
1140*4882a593Smuzhiyun 	return IRQ_HANDLED;
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
1143*4882a593Smuzhiyun static int
lance_rx(struct net_device * dev)1144*4882a593Smuzhiyun lance_rx(struct net_device *dev)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
1147*4882a593Smuzhiyun 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
1148*4882a593Smuzhiyun 	int i;
1149*4882a593Smuzhiyun 
1150*4882a593Smuzhiyun 	/* If we own the next entry, it's a new packet. Send it up. */
1151*4882a593Smuzhiyun 	while (lp->rx_ring[entry].base >= 0) {
1152*4882a593Smuzhiyun 		int status = lp->rx_ring[entry].base >> 24;
1153*4882a593Smuzhiyun 
1154*4882a593Smuzhiyun 		if (status != 0x03) {			/* There was an error. */
1155*4882a593Smuzhiyun 			/* There is a tricky error noted by John Murphy,
1156*4882a593Smuzhiyun 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
1157*4882a593Smuzhiyun 			   buffers it's possible for a jabber packet to use two
1158*4882a593Smuzhiyun 			   buffers, with only the last correctly noting the error. */
1159*4882a593Smuzhiyun 			if (status & 0x01)	/* Only count a general error at the */
1160*4882a593Smuzhiyun 				dev->stats.rx_errors++; /* end of a packet.*/
1161*4882a593Smuzhiyun 			if (status & 0x20)
1162*4882a593Smuzhiyun 				dev->stats.rx_frame_errors++;
1163*4882a593Smuzhiyun 			if (status & 0x10)
1164*4882a593Smuzhiyun 				dev->stats.rx_over_errors++;
1165*4882a593Smuzhiyun 			if (status & 0x08)
1166*4882a593Smuzhiyun 				dev->stats.rx_crc_errors++;
1167*4882a593Smuzhiyun 			if (status & 0x04)
1168*4882a593Smuzhiyun 				dev->stats.rx_fifo_errors++;
1169*4882a593Smuzhiyun 			lp->rx_ring[entry].base &= 0x03ffffff;
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 		else
1172*4882a593Smuzhiyun 		{
1173*4882a593Smuzhiyun 			/* Malloc up new buffer, compatible with net3. */
1174*4882a593Smuzhiyun 			short pkt_len = (lp->rx_ring[entry].msg_length & 0xfff)-4;
1175*4882a593Smuzhiyun 			struct sk_buff *skb;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 			if(pkt_len<60)
1178*4882a593Smuzhiyun 			{
1179*4882a593Smuzhiyun 				printk("%s: Runt packet!\n",dev->name);
1180*4882a593Smuzhiyun 				dev->stats.rx_errors++;
1181*4882a593Smuzhiyun 			}
1182*4882a593Smuzhiyun 			else
1183*4882a593Smuzhiyun 			{
1184*4882a593Smuzhiyun 				skb = dev_alloc_skb(pkt_len+2);
1185*4882a593Smuzhiyun 				if (skb == NULL)
1186*4882a593Smuzhiyun 				{
1187*4882a593Smuzhiyun 					printk("%s: Memory squeeze, deferring packet.\n", dev->name);
1188*4882a593Smuzhiyun 					for (i=0; i < RX_RING_SIZE; i++)
1189*4882a593Smuzhiyun 						if (lp->rx_ring[(entry+i) & RX_RING_MOD_MASK].base < 0)
1190*4882a593Smuzhiyun 							break;
1191*4882a593Smuzhiyun 
1192*4882a593Smuzhiyun 					if (i > RX_RING_SIZE -2)
1193*4882a593Smuzhiyun 					{
1194*4882a593Smuzhiyun 						dev->stats.rx_dropped++;
1195*4882a593Smuzhiyun 						lp->rx_ring[entry].base |= 0x80000000;
1196*4882a593Smuzhiyun 						lp->cur_rx++;
1197*4882a593Smuzhiyun 					}
1198*4882a593Smuzhiyun 					break;
1199*4882a593Smuzhiyun 				}
1200*4882a593Smuzhiyun 				skb_reserve(skb,2);	/* 16 byte align */
1201*4882a593Smuzhiyun 				skb_put(skb,pkt_len);	/* Make room */
1202*4882a593Smuzhiyun 				skb_copy_to_linear_data(skb,
1203*4882a593Smuzhiyun 					(unsigned char *)isa_bus_to_virt((lp->rx_ring[entry].base & 0x00ffffff)),
1204*4882a593Smuzhiyun 					pkt_len);
1205*4882a593Smuzhiyun 				skb->protocol=eth_type_trans(skb,dev);
1206*4882a593Smuzhiyun 				netif_rx(skb);
1207*4882a593Smuzhiyun 				dev->stats.rx_packets++;
1208*4882a593Smuzhiyun 				dev->stats.rx_bytes += pkt_len;
1209*4882a593Smuzhiyun 			}
1210*4882a593Smuzhiyun 		}
1211*4882a593Smuzhiyun 		/* The docs say that the buffer length isn't touched, but Andrew Boyd
1212*4882a593Smuzhiyun 		   of QNX reports that some revs of the 79C965 clear it. */
1213*4882a593Smuzhiyun 		lp->rx_ring[entry].buf_length = -PKT_BUF_SZ;
1214*4882a593Smuzhiyun 		lp->rx_ring[entry].base |= 0x80000000;
1215*4882a593Smuzhiyun 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1216*4882a593Smuzhiyun 	}
1217*4882a593Smuzhiyun 
1218*4882a593Smuzhiyun 	/* We should check that at least two ring entries are free.	 If not,
1219*4882a593Smuzhiyun 	   we should free one and mark stats->rx_dropped++. */
1220*4882a593Smuzhiyun 
1221*4882a593Smuzhiyun 	return 0;
1222*4882a593Smuzhiyun }
1223*4882a593Smuzhiyun 
1224*4882a593Smuzhiyun static int
lance_close(struct net_device * dev)1225*4882a593Smuzhiyun lance_close(struct net_device *dev)
1226*4882a593Smuzhiyun {
1227*4882a593Smuzhiyun 	int ioaddr = dev->base_addr;
1228*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	netif_stop_queue (dev);
1231*4882a593Smuzhiyun 
1232*4882a593Smuzhiyun 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1233*4882a593Smuzhiyun 		outw(112, ioaddr+LANCE_ADDR);
1234*4882a593Smuzhiyun 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1235*4882a593Smuzhiyun 	}
1236*4882a593Smuzhiyun 	outw(0, ioaddr+LANCE_ADDR);
1237*4882a593Smuzhiyun 
1238*4882a593Smuzhiyun 	if (lance_debug > 1)
1239*4882a593Smuzhiyun 		printk("%s: Shutting down ethercard, status was %2.2x.\n",
1240*4882a593Smuzhiyun 			   dev->name, inw(ioaddr+LANCE_DATA));
1241*4882a593Smuzhiyun 
1242*4882a593Smuzhiyun 	/* We stop the LANCE here -- it occasionally polls
1243*4882a593Smuzhiyun 	   memory if we don't. */
1244*4882a593Smuzhiyun 	outw(0x0004, ioaddr+LANCE_DATA);
1245*4882a593Smuzhiyun 
1246*4882a593Smuzhiyun 	if (dev->dma != 4)
1247*4882a593Smuzhiyun 	{
1248*4882a593Smuzhiyun 		unsigned long flags=claim_dma_lock();
1249*4882a593Smuzhiyun 		disable_dma(dev->dma);
1250*4882a593Smuzhiyun 		release_dma_lock(flags);
1251*4882a593Smuzhiyun 	}
1252*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	lance_purge_ring(dev);
1255*4882a593Smuzhiyun 
1256*4882a593Smuzhiyun 	return 0;
1257*4882a593Smuzhiyun }
1258*4882a593Smuzhiyun 
lance_get_stats(struct net_device * dev)1259*4882a593Smuzhiyun static struct net_device_stats *lance_get_stats(struct net_device *dev)
1260*4882a593Smuzhiyun {
1261*4882a593Smuzhiyun 	struct lance_private *lp = dev->ml_priv;
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	if (chip_table[lp->chip_version].flags & LANCE_HAS_MISSED_FRAME) {
1264*4882a593Smuzhiyun 		short ioaddr = dev->base_addr;
1265*4882a593Smuzhiyun 		short saved_addr;
1266*4882a593Smuzhiyun 		unsigned long flags;
1267*4882a593Smuzhiyun 
1268*4882a593Smuzhiyun 		spin_lock_irqsave(&lp->devlock, flags);
1269*4882a593Smuzhiyun 		saved_addr = inw(ioaddr+LANCE_ADDR);
1270*4882a593Smuzhiyun 		outw(112, ioaddr+LANCE_ADDR);
1271*4882a593Smuzhiyun 		dev->stats.rx_missed_errors = inw(ioaddr+LANCE_DATA);
1272*4882a593Smuzhiyun 		outw(saved_addr, ioaddr+LANCE_ADDR);
1273*4882a593Smuzhiyun 		spin_unlock_irqrestore(&lp->devlock, flags);
1274*4882a593Smuzhiyun 	}
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	return &dev->stats;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun 
1279*4882a593Smuzhiyun /* Set or clear the multicast filter for this adaptor.
1280*4882a593Smuzhiyun  */
1281*4882a593Smuzhiyun 
set_multicast_list(struct net_device * dev)1282*4882a593Smuzhiyun static void set_multicast_list(struct net_device *dev)
1283*4882a593Smuzhiyun {
1284*4882a593Smuzhiyun 	short ioaddr = dev->base_addr;
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	outw(0, ioaddr+LANCE_ADDR);
1287*4882a593Smuzhiyun 	outw(0x0004, ioaddr+LANCE_DATA); /* Temporarily stop the lance.	 */
1288*4882a593Smuzhiyun 
1289*4882a593Smuzhiyun 	if (dev->flags&IFF_PROMISC) {
1290*4882a593Smuzhiyun 		outw(15, ioaddr+LANCE_ADDR);
1291*4882a593Smuzhiyun 		outw(0x8000, ioaddr+LANCE_DATA); /* Set promiscuous mode */
1292*4882a593Smuzhiyun 	} else {
1293*4882a593Smuzhiyun 		short multicast_table[4];
1294*4882a593Smuzhiyun 		int i;
1295*4882a593Smuzhiyun 		int num_addrs=netdev_mc_count(dev);
1296*4882a593Smuzhiyun 		if(dev->flags&IFF_ALLMULTI)
1297*4882a593Smuzhiyun 			num_addrs=1;
1298*4882a593Smuzhiyun 		/* FIXIT: We don't use the multicast table, but rely on upper-layer filtering. */
1299*4882a593Smuzhiyun 		memset(multicast_table, (num_addrs == 0) ? 0 : -1, sizeof(multicast_table));
1300*4882a593Smuzhiyun 		for (i = 0; i < 4; i++) {
1301*4882a593Smuzhiyun 			outw(8 + i, ioaddr+LANCE_ADDR);
1302*4882a593Smuzhiyun 			outw(multicast_table[i], ioaddr+LANCE_DATA);
1303*4882a593Smuzhiyun 		}
1304*4882a593Smuzhiyun 		outw(15, ioaddr+LANCE_ADDR);
1305*4882a593Smuzhiyun 		outw(0x0000, ioaddr+LANCE_DATA); /* Unset promiscuous mode */
1306*4882a593Smuzhiyun 	}
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	lance_restart(dev, 0x0142, 0); /*  Resume normal operation */
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun }
1311*4882a593Smuzhiyun 
1312