xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/atarilance.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* atarilance.c: Ethernet driver for VME Lance cards on the Atari */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun 	Written 1995/96 by Roman Hodek (Roman.Hodek@informatik.uni-erlangen.de)
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun 	This software may be used and distributed according to the terms
6*4882a593Smuzhiyun 	of the GNU General Public License, incorporated herein by reference.
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 	This drivers was written with the following sources of reference:
9*4882a593Smuzhiyun 	 - The driver for the Riebl Lance card by the TU Vienna.
10*4882a593Smuzhiyun 	 - The modified TUW driver for PAM's VME cards
11*4882a593Smuzhiyun 	 - The PC-Linux driver for Lance cards (but this is for bus master
12*4882a593Smuzhiyun        cards, not the shared memory ones)
13*4882a593Smuzhiyun 	 - The Amiga Ariadne driver
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun 	v1.0: (in 1.2.13pl4/0.9.13)
16*4882a593Smuzhiyun 	      Initial version
17*4882a593Smuzhiyun 	v1.1: (in 1.2.13pl5)
18*4882a593Smuzhiyun 	      more comments
19*4882a593Smuzhiyun 		  deleted some debugging stuff
20*4882a593Smuzhiyun 		  optimized register access (keep AREG pointing to CSR0)
21*4882a593Smuzhiyun 		  following AMD, CSR0_STRT should be set only after IDON is detected
22*4882a593Smuzhiyun 		  use memcpy() for data transfers, that also employs long word moves
23*4882a593Smuzhiyun 		  better probe procedure for 24-bit systems
24*4882a593Smuzhiyun           non-VME-RieblCards need extra delays in memcpy
25*4882a593Smuzhiyun 		  must also do write test, since 0xfxe00000 may hit ROM
26*4882a593Smuzhiyun 		  use 8/32 tx/rx buffers, which should give better NFS performance;
27*4882a593Smuzhiyun 		    this is made possible by shifting the last packet buffer after the
28*4882a593Smuzhiyun 		    RieblCard reserved area
29*4882a593Smuzhiyun     v1.2: (in 1.2.13pl8)
30*4882a593Smuzhiyun 	      again fixed probing for the Falcon; 0xfe01000 hits phys. 0x00010000
31*4882a593Smuzhiyun 		  and thus RAM, in case of no Lance found all memory contents have to
32*4882a593Smuzhiyun 		  be restored!
33*4882a593Smuzhiyun 		  Now possible to compile as module.
34*4882a593Smuzhiyun 	v1.3: 03/30/96 Jes Sorensen, Roman (in 1.3)
35*4882a593Smuzhiyun 	      Several little 1.3 adaptions
36*4882a593Smuzhiyun 		  When the lance is stopped it jumps back into little-endian
37*4882a593Smuzhiyun 		  mode. It is therefore necessary to put it back where it
38*4882a593Smuzhiyun 		  belongs, in big endian mode, in order to make things work.
39*4882a593Smuzhiyun 		  This might be the reason why multicast-mode didn't work
40*4882a593Smuzhiyun 		  before, but I'm not able to test it as I only got an Amiga
41*4882a593Smuzhiyun 		  (we had similar problems with the A2065 driver).
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun static const char version[] = "atarilance.c: v1.3 04/04/96 "
46*4882a593Smuzhiyun 			      "Roman.Hodek@informatik.uni-erlangen.de\n";
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include <linux/netdevice.h>
49*4882a593Smuzhiyun #include <linux/etherdevice.h>
50*4882a593Smuzhiyun #include <linux/module.h>
51*4882a593Smuzhiyun #include <linux/stddef.h>
52*4882a593Smuzhiyun #include <linux/kernel.h>
53*4882a593Smuzhiyun #include <linux/string.h>
54*4882a593Smuzhiyun #include <linux/errno.h>
55*4882a593Smuzhiyun #include <linux/skbuff.h>
56*4882a593Smuzhiyun #include <linux/interrupt.h>
57*4882a593Smuzhiyun #include <linux/init.h>
58*4882a593Smuzhiyun #include <linux/bitops.h>
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #include <asm/setup.h>
61*4882a593Smuzhiyun #include <asm/irq.h>
62*4882a593Smuzhiyun #include <asm/atarihw.h>
63*4882a593Smuzhiyun #include <asm/atariints.h>
64*4882a593Smuzhiyun #include <asm/io.h>
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun /* Debug level:
67*4882a593Smuzhiyun  *  0 = silent, print only serious errors
68*4882a593Smuzhiyun  *  1 = normal, print error messages
69*4882a593Smuzhiyun  *  2 = debug, print debug infos
70*4882a593Smuzhiyun  *  3 = debug, print even more debug infos (packet data)
71*4882a593Smuzhiyun  */
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun #define	LANCE_DEBUG	1
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #ifdef LANCE_DEBUG
76*4882a593Smuzhiyun static int lance_debug = LANCE_DEBUG;
77*4882a593Smuzhiyun #else
78*4882a593Smuzhiyun static int lance_debug = 1;
79*4882a593Smuzhiyun #endif
80*4882a593Smuzhiyun module_param(lance_debug, int, 0);
81*4882a593Smuzhiyun MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)");
82*4882a593Smuzhiyun MODULE_LICENSE("GPL");
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* Print debug messages on probing? */
85*4882a593Smuzhiyun #undef LANCE_DEBUG_PROBE
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun #define	DPRINTK(n,a)							\
88*4882a593Smuzhiyun 	do {										\
89*4882a593Smuzhiyun 		if (lance_debug >= n)					\
90*4882a593Smuzhiyun 			printk a;							\
91*4882a593Smuzhiyun 	} while( 0 )
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun #ifdef LANCE_DEBUG_PROBE
94*4882a593Smuzhiyun # define PROBE_PRINT(a)	printk a
95*4882a593Smuzhiyun #else
96*4882a593Smuzhiyun # define PROBE_PRINT(a)
97*4882a593Smuzhiyun #endif
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun /* These define the number of Rx and Tx buffers as log2. (Only powers
100*4882a593Smuzhiyun  * of two are valid)
101*4882a593Smuzhiyun  * Much more rx buffers (32) are reserved than tx buffers (8), since receiving
102*4882a593Smuzhiyun  * is more time critical then sending and packets may have to remain in the
103*4882a593Smuzhiyun  * board's memory when main memory is low.
104*4882a593Smuzhiyun  */
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun #define TX_LOG_RING_SIZE			3
107*4882a593Smuzhiyun #define RX_LOG_RING_SIZE			5
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* These are the derived values */
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun #define TX_RING_SIZE			(1 << TX_LOG_RING_SIZE)
112*4882a593Smuzhiyun #define TX_RING_LEN_BITS		(TX_LOG_RING_SIZE << 5)
113*4882a593Smuzhiyun #define	TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun #define RX_RING_SIZE			(1 << RX_LOG_RING_SIZE)
116*4882a593Smuzhiyun #define RX_RING_LEN_BITS		(RX_LOG_RING_SIZE << 5)
117*4882a593Smuzhiyun #define	RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #define TX_TIMEOUT	(HZ/5)
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun /* The LANCE Rx and Tx ring descriptors. */
122*4882a593Smuzhiyun struct lance_rx_head {
123*4882a593Smuzhiyun 	unsigned short			base;		/* Low word of base addr */
124*4882a593Smuzhiyun 	volatile unsigned char	flag;
125*4882a593Smuzhiyun 	unsigned char			base_hi;	/* High word of base addr (unused) */
126*4882a593Smuzhiyun 	short					buf_length;	/* This length is 2s complement! */
127*4882a593Smuzhiyun 	volatile short			msg_length;	/* This length is "normal". */
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun struct lance_tx_head {
131*4882a593Smuzhiyun 	unsigned short			base;		/* Low word of base addr */
132*4882a593Smuzhiyun 	volatile unsigned char	flag;
133*4882a593Smuzhiyun 	unsigned char			base_hi;	/* High word of base addr (unused) */
134*4882a593Smuzhiyun 	short					length;		/* Length is 2s complement! */
135*4882a593Smuzhiyun 	volatile short			misc;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun struct ringdesc {
139*4882a593Smuzhiyun 	unsigned short	adr_lo;		/* Low 16 bits of address */
140*4882a593Smuzhiyun 	unsigned char	len;		/* Length bits */
141*4882a593Smuzhiyun 	unsigned char	adr_hi;		/* High 8 bits of address (unused) */
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* The LANCE initialization block, described in databook. */
145*4882a593Smuzhiyun struct lance_init_block {
146*4882a593Smuzhiyun 	unsigned short	mode;		/* Pre-set mode */
147*4882a593Smuzhiyun 	unsigned char	hwaddr[6];	/* Physical ethernet address */
148*4882a593Smuzhiyun 	unsigned		filter[2];	/* Multicast filter (unused). */
149*4882a593Smuzhiyun 	/* Receive and transmit ring base, along with length bits. */
150*4882a593Smuzhiyun 	struct ringdesc	rx_ring;
151*4882a593Smuzhiyun 	struct ringdesc	tx_ring;
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun /* The whole layout of the Lance shared memory */
155*4882a593Smuzhiyun struct lance_memory {
156*4882a593Smuzhiyun 	struct lance_init_block	init;
157*4882a593Smuzhiyun 	struct lance_tx_head	tx_head[TX_RING_SIZE];
158*4882a593Smuzhiyun 	struct lance_rx_head	rx_head[RX_RING_SIZE];
159*4882a593Smuzhiyun 	char					packet_area[];	/* packet data follow after the
160*4882a593Smuzhiyun 											 * init block and the ring
161*4882a593Smuzhiyun 											 * descriptors and are located
162*4882a593Smuzhiyun 											 * at runtime */
163*4882a593Smuzhiyun };
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* RieblCard specifics:
166*4882a593Smuzhiyun  * The original TOS driver for these cards reserves the area from offset
167*4882a593Smuzhiyun  * 0xee70 to 0xeebb for storing configuration data. Of interest to us is the
168*4882a593Smuzhiyun  * Ethernet address there, and the magic for verifying the data's validity.
169*4882a593Smuzhiyun  * The reserved area isn't touch by packet buffers. Furthermore, offset 0xfffe
170*4882a593Smuzhiyun  * is reserved for the interrupt vector number.
171*4882a593Smuzhiyun  */
172*4882a593Smuzhiyun #define	RIEBL_RSVD_START	0xee70
173*4882a593Smuzhiyun #define	RIEBL_RSVD_END		0xeec0
174*4882a593Smuzhiyun #define RIEBL_MAGIC			0x09051990
175*4882a593Smuzhiyun #define RIEBL_MAGIC_ADDR	((unsigned long *)(((char *)MEM) + 0xee8a))
176*4882a593Smuzhiyun #define RIEBL_HWADDR_ADDR	((unsigned char *)(((char *)MEM) + 0xee8e))
177*4882a593Smuzhiyun #define RIEBL_IVEC_ADDR		((unsigned short *)(((char *)MEM) + 0xfffe))
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun /* This is a default address for the old RieblCards without a battery
180*4882a593Smuzhiyun  * that have no ethernet address at boot time. 00:00:36:04 is the
181*4882a593Smuzhiyun  * prefix for Riebl cards, the 00:00 at the end is arbitrary.
182*4882a593Smuzhiyun  */
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun static unsigned char OldRieblDefHwaddr[6] = {
185*4882a593Smuzhiyun 	0x00, 0x00, 0x36, 0x04, 0x00, 0x00
186*4882a593Smuzhiyun };
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /* I/O registers of the Lance chip */
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun struct lance_ioreg {
192*4882a593Smuzhiyun /* base+0x0 */	volatile unsigned short	data;
193*4882a593Smuzhiyun /* base+0x2 */	volatile unsigned short	addr;
194*4882a593Smuzhiyun 				unsigned char			_dummy1[3];
195*4882a593Smuzhiyun /* base+0x7 */	volatile unsigned char	ivec;
196*4882a593Smuzhiyun 				unsigned char			_dummy2[5];
197*4882a593Smuzhiyun /* base+0xd */	volatile unsigned char	eeprom;
198*4882a593Smuzhiyun 				unsigned char			_dummy3;
199*4882a593Smuzhiyun /* base+0xf */	volatile unsigned char	mem;
200*4882a593Smuzhiyun };
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /* Types of boards this driver supports */
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun enum lance_type {
205*4882a593Smuzhiyun 	OLD_RIEBL,		/* old Riebl card without battery */
206*4882a593Smuzhiyun 	NEW_RIEBL,		/* new Riebl card with battery */
207*4882a593Smuzhiyun 	PAM_CARD		/* PAM card with EEPROM */
208*4882a593Smuzhiyun };
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun static char *lance_names[] = {
211*4882a593Smuzhiyun 	"Riebl-Card (without battery)",
212*4882a593Smuzhiyun 	"Riebl-Card (with battery)",
213*4882a593Smuzhiyun 	"PAM intern card"
214*4882a593Smuzhiyun };
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /* The driver's private device structure */
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun struct lance_private {
219*4882a593Smuzhiyun 	enum lance_type		cardtype;
220*4882a593Smuzhiyun 	struct lance_ioreg	*iobase;
221*4882a593Smuzhiyun 	struct lance_memory	*mem;
222*4882a593Smuzhiyun 	int		 	cur_rx, cur_tx;	/* The next free ring entry */
223*4882a593Smuzhiyun 	int			dirty_tx;		/* Ring entries to be freed. */
224*4882a593Smuzhiyun 				/* copy function */
225*4882a593Smuzhiyun 	void			*(*memcpy_f)( void *, const void *, size_t );
226*4882a593Smuzhiyun /* This must be long for set_bit() */
227*4882a593Smuzhiyun 	long			tx_full;
228*4882a593Smuzhiyun 	spinlock_t		devlock;
229*4882a593Smuzhiyun };
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun /* I/O register access macros */
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun #define	MEM		lp->mem
234*4882a593Smuzhiyun #define	DREG	IO->data
235*4882a593Smuzhiyun #define	AREG	IO->addr
236*4882a593Smuzhiyun #define	REGA(a)	(*( AREG = (a), &DREG ))
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun /* Definitions for packet buffer access: */
239*4882a593Smuzhiyun #define PKT_BUF_SZ		1544
240*4882a593Smuzhiyun /* Get the address of a packet buffer corresponding to a given buffer head */
241*4882a593Smuzhiyun #define	PKTBUF_ADDR(head)	(((unsigned char *)(MEM)) + (head)->base)
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun /* Possible memory/IO addresses for probing */
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun static struct lance_addr {
246*4882a593Smuzhiyun 	unsigned long	memaddr;
247*4882a593Smuzhiyun 	unsigned long	ioaddr;
248*4882a593Smuzhiyun 	int				slow_flag;
249*4882a593Smuzhiyun } lance_addr_list[] = {
250*4882a593Smuzhiyun 	{ 0xfe010000, 0xfe00fff0, 0 },	/* RieblCard VME in TT */
251*4882a593Smuzhiyun 	{ 0xffc10000, 0xffc0fff0, 0 },	/* RieblCard VME in MegaSTE
252*4882a593Smuzhiyun 									   (highest byte stripped) */
253*4882a593Smuzhiyun 	{ 0xffe00000, 0xffff7000, 1 },	/* RieblCard in ST
254*4882a593Smuzhiyun 									   (highest byte stripped) */
255*4882a593Smuzhiyun 	{ 0xffd00000, 0xffff7000, 1 },	/* RieblCard in ST with hw modif. to
256*4882a593Smuzhiyun 									   avoid conflict with ROM
257*4882a593Smuzhiyun 									   (highest byte stripped) */
258*4882a593Smuzhiyun 	{ 0xffcf0000, 0xffcffff0, 0 },	/* PAMCard VME in TT and MSTE
259*4882a593Smuzhiyun 									   (highest byte stripped) */
260*4882a593Smuzhiyun 	{ 0xfecf0000, 0xfecffff0, 0 },	/* Rhotron's PAMCard VME in TT and MSTE
261*4882a593Smuzhiyun 									   (highest byte stripped) */
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #define	N_LANCE_ADDR	ARRAY_SIZE(lance_addr_list)
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun /* Definitions for the Lance */
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun /* tx_head flags */
270*4882a593Smuzhiyun #define TMD1_ENP		0x01	/* end of packet */
271*4882a593Smuzhiyun #define TMD1_STP		0x02	/* start of packet */
272*4882a593Smuzhiyun #define TMD1_DEF		0x04	/* deferred */
273*4882a593Smuzhiyun #define TMD1_ONE		0x08	/* one retry needed */
274*4882a593Smuzhiyun #define TMD1_MORE		0x10	/* more than one retry needed */
275*4882a593Smuzhiyun #define TMD1_ERR		0x40	/* error summary */
276*4882a593Smuzhiyun #define TMD1_OWN 		0x80	/* ownership (set: chip owns) */
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun #define TMD1_OWN_CHIP	TMD1_OWN
279*4882a593Smuzhiyun #define TMD1_OWN_HOST	0
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun /* tx_head misc field */
282*4882a593Smuzhiyun #define TMD3_TDR		0x03FF	/* Time Domain Reflectometry counter */
283*4882a593Smuzhiyun #define TMD3_RTRY		0x0400	/* failed after 16 retries */
284*4882a593Smuzhiyun #define TMD3_LCAR		0x0800	/* carrier lost */
285*4882a593Smuzhiyun #define TMD3_LCOL		0x1000	/* late collision */
286*4882a593Smuzhiyun #define TMD3_UFLO		0x4000	/* underflow (late memory) */
287*4882a593Smuzhiyun #define TMD3_BUFF		0x8000	/* buffering error (no ENP) */
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun /* rx_head flags */
290*4882a593Smuzhiyun #define RMD1_ENP		0x01	/* end of packet */
291*4882a593Smuzhiyun #define RMD1_STP		0x02	/* start of packet */
292*4882a593Smuzhiyun #define RMD1_BUFF		0x04	/* buffer error */
293*4882a593Smuzhiyun #define RMD1_CRC		0x08	/* CRC error */
294*4882a593Smuzhiyun #define RMD1_OFLO		0x10	/* overflow */
295*4882a593Smuzhiyun #define RMD1_FRAM		0x20	/* framing error */
296*4882a593Smuzhiyun #define RMD1_ERR		0x40	/* error summary */
297*4882a593Smuzhiyun #define RMD1_OWN 		0x80	/* ownership (set: ship owns) */
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun #define RMD1_OWN_CHIP	RMD1_OWN
300*4882a593Smuzhiyun #define RMD1_OWN_HOST	0
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun /* register names */
303*4882a593Smuzhiyun #define CSR0	0		/* mode/status */
304*4882a593Smuzhiyun #define CSR1	1		/* init block addr (low) */
305*4882a593Smuzhiyun #define CSR2	2		/* init block addr (high) */
306*4882a593Smuzhiyun #define CSR3	3		/* misc */
307*4882a593Smuzhiyun #define CSR8	8	  	/* address filter */
308*4882a593Smuzhiyun #define CSR15	15		/* promiscuous mode */
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun /* CSR0 */
311*4882a593Smuzhiyun /* (R=readable, W=writeable, S=set on write, C=clear on write) */
312*4882a593Smuzhiyun #define CSR0_INIT	0x0001		/* initialize (RS) */
313*4882a593Smuzhiyun #define CSR0_STRT	0x0002		/* start (RS) */
314*4882a593Smuzhiyun #define CSR0_STOP	0x0004		/* stop (RS) */
315*4882a593Smuzhiyun #define CSR0_TDMD	0x0008		/* transmit demand (RS) */
316*4882a593Smuzhiyun #define CSR0_TXON	0x0010		/* transmitter on (R) */
317*4882a593Smuzhiyun #define CSR0_RXON	0x0020		/* receiver on (R) */
318*4882a593Smuzhiyun #define CSR0_INEA	0x0040		/* interrupt enable (RW) */
319*4882a593Smuzhiyun #define CSR0_INTR	0x0080		/* interrupt active (R) */
320*4882a593Smuzhiyun #define CSR0_IDON	0x0100		/* initialization done (RC) */
321*4882a593Smuzhiyun #define CSR0_TINT	0x0200		/* transmitter interrupt (RC) */
322*4882a593Smuzhiyun #define CSR0_RINT	0x0400		/* receiver interrupt (RC) */
323*4882a593Smuzhiyun #define CSR0_MERR	0x0800		/* memory error (RC) */
324*4882a593Smuzhiyun #define CSR0_MISS	0x1000		/* missed frame (RC) */
325*4882a593Smuzhiyun #define CSR0_CERR	0x2000		/* carrier error (no heartbeat :-) (RC) */
326*4882a593Smuzhiyun #define CSR0_BABL	0x4000		/* babble: tx-ed too many bits (RC) */
327*4882a593Smuzhiyun #define CSR0_ERR	0x8000		/* error (RC) */
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun /* CSR3 */
330*4882a593Smuzhiyun #define CSR3_BCON	0x0001		/* byte control */
331*4882a593Smuzhiyun #define CSR3_ACON	0x0002		/* ALE control */
332*4882a593Smuzhiyun #define CSR3_BSWP	0x0004		/* byte swap (1=big endian) */
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun /***************************** Prototypes *****************************/
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun static unsigned long lance_probe1( struct net_device *dev, struct lance_addr
339*4882a593Smuzhiyun                                    *init_rec );
340*4882a593Smuzhiyun static int lance_open( struct net_device *dev );
341*4882a593Smuzhiyun static void lance_init_ring( struct net_device *dev );
342*4882a593Smuzhiyun static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
343*4882a593Smuzhiyun 				    struct net_device *dev);
344*4882a593Smuzhiyun static irqreturn_t lance_interrupt( int irq, void *dev_id );
345*4882a593Smuzhiyun static int lance_rx( struct net_device *dev );
346*4882a593Smuzhiyun static int lance_close( struct net_device *dev );
347*4882a593Smuzhiyun static void set_multicast_list( struct net_device *dev );
348*4882a593Smuzhiyun static int lance_set_mac_address( struct net_device *dev, void *addr );
349*4882a593Smuzhiyun static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue);
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun /************************* End of Prototypes **************************/
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 
slow_memcpy(void * dst,const void * src,size_t len)357*4882a593Smuzhiyun static void *slow_memcpy( void *dst, const void *src, size_t len )
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun {	char *cto = dst;
360*4882a593Smuzhiyun 	const char *cfrom = src;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	while( len-- ) {
363*4882a593Smuzhiyun 		*cto++ = *cfrom++;
364*4882a593Smuzhiyun 		MFPDELAY();
365*4882a593Smuzhiyun 	}
366*4882a593Smuzhiyun 	return dst;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 
atarilance_probe(int unit)370*4882a593Smuzhiyun struct net_device * __init atarilance_probe(int unit)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	int i;
373*4882a593Smuzhiyun 	static int found;
374*4882a593Smuzhiyun 	struct net_device *dev;
375*4882a593Smuzhiyun 	int err = -ENODEV;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	if (!MACH_IS_ATARI || found)
378*4882a593Smuzhiyun 		/* Assume there's only one board possible... That seems true, since
379*4882a593Smuzhiyun 		 * the Riebl/PAM board's address cannot be changed. */
380*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct lance_private));
383*4882a593Smuzhiyun 	if (!dev)
384*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
385*4882a593Smuzhiyun 	if (unit >= 0) {
386*4882a593Smuzhiyun 		sprintf(dev->name, "eth%d", unit);
387*4882a593Smuzhiyun 		netdev_boot_setup_check(dev);
388*4882a593Smuzhiyun 	}
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	for( i = 0; i < N_LANCE_ADDR; ++i ) {
391*4882a593Smuzhiyun 		if (lance_probe1( dev, &lance_addr_list[i] )) {
392*4882a593Smuzhiyun 			found = 1;
393*4882a593Smuzhiyun 			err = register_netdev(dev);
394*4882a593Smuzhiyun 			if (!err)
395*4882a593Smuzhiyun 				return dev;
396*4882a593Smuzhiyun 			free_irq(dev->irq, dev);
397*4882a593Smuzhiyun 			break;
398*4882a593Smuzhiyun 		}
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 	free_netdev(dev);
401*4882a593Smuzhiyun 	return ERR_PTR(err);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun /* Derived from hwreg_present() in atari/config.c: */
406*4882a593Smuzhiyun 
addr_accessible(volatile void * regp,int wordflag,int writeflag)407*4882a593Smuzhiyun static noinline int __init addr_accessible(volatile void *regp, int wordflag,
408*4882a593Smuzhiyun 					   int writeflag)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	int		ret;
411*4882a593Smuzhiyun 	unsigned long	flags;
412*4882a593Smuzhiyun 	long	*vbr, save_berr;
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 	local_irq_save(flags);
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	__asm__ __volatile__ ( "movec	%/vbr,%0" : "=r" (vbr) : );
417*4882a593Smuzhiyun 	save_berr = vbr[2];
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	__asm__ __volatile__
420*4882a593Smuzhiyun 	(	"movel	%/sp,%/d1\n\t"
421*4882a593Smuzhiyun 		"movel	#Lberr,%2@\n\t"
422*4882a593Smuzhiyun 		"moveq	#0,%0\n\t"
423*4882a593Smuzhiyun 		"tstl   %3\n\t"
424*4882a593Smuzhiyun 		"bne	1f\n\t"
425*4882a593Smuzhiyun 		"moveb	%1@,%/d0\n\t"
426*4882a593Smuzhiyun 		"nop	\n\t"
427*4882a593Smuzhiyun 		"bra	2f\n"
428*4882a593Smuzhiyun "1:		 movew	%1@,%/d0\n\t"
429*4882a593Smuzhiyun 		"nop	\n"
430*4882a593Smuzhiyun "2:		 tstl   %4\n\t"
431*4882a593Smuzhiyun 		"beq	2f\n\t"
432*4882a593Smuzhiyun 		"tstl	%3\n\t"
433*4882a593Smuzhiyun 		"bne	1f\n\t"
434*4882a593Smuzhiyun 		"clrb	%1@\n\t"
435*4882a593Smuzhiyun 		"nop	\n\t"
436*4882a593Smuzhiyun 		"moveb	%/d0,%1@\n\t"
437*4882a593Smuzhiyun 		"nop	\n\t"
438*4882a593Smuzhiyun 		"bra	2f\n"
439*4882a593Smuzhiyun "1:		 clrw	%1@\n\t"
440*4882a593Smuzhiyun 		"nop	\n\t"
441*4882a593Smuzhiyun 		"movew	%/d0,%1@\n\t"
442*4882a593Smuzhiyun 		"nop	\n"
443*4882a593Smuzhiyun "2:		 moveq	#1,%0\n"
444*4882a593Smuzhiyun "Lberr:	 movel	%/d1,%/sp"
445*4882a593Smuzhiyun 		: "=&d" (ret)
446*4882a593Smuzhiyun 		: "a" (regp), "a" (&vbr[2]), "rm" (wordflag), "rm" (writeflag)
447*4882a593Smuzhiyun 		: "d0", "d1", "memory"
448*4882a593Smuzhiyun 	);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	vbr[2] = save_berr;
451*4882a593Smuzhiyun 	local_irq_restore(flags);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	return ret;
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun static const struct net_device_ops lance_netdev_ops = {
457*4882a593Smuzhiyun 	.ndo_open		= lance_open,
458*4882a593Smuzhiyun 	.ndo_stop		= lance_close,
459*4882a593Smuzhiyun 	.ndo_start_xmit		= lance_start_xmit,
460*4882a593Smuzhiyun 	.ndo_set_rx_mode	= set_multicast_list,
461*4882a593Smuzhiyun 	.ndo_set_mac_address	= lance_set_mac_address,
462*4882a593Smuzhiyun 	.ndo_tx_timeout		= lance_tx_timeout,
463*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
464*4882a593Smuzhiyun };
465*4882a593Smuzhiyun 
lance_probe1(struct net_device * dev,struct lance_addr * init_rec)466*4882a593Smuzhiyun static unsigned long __init lance_probe1( struct net_device *dev,
467*4882a593Smuzhiyun 					   struct lance_addr *init_rec )
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun 	volatile unsigned short *memaddr =
470*4882a593Smuzhiyun 		(volatile unsigned short *)init_rec->memaddr;
471*4882a593Smuzhiyun 	volatile unsigned short *ioaddr =
472*4882a593Smuzhiyun 		(volatile unsigned short *)init_rec->ioaddr;
473*4882a593Smuzhiyun 	struct lance_private	*lp;
474*4882a593Smuzhiyun 	struct lance_ioreg		*IO;
475*4882a593Smuzhiyun 	int 					i;
476*4882a593Smuzhiyun 	static int 				did_version;
477*4882a593Smuzhiyun 	unsigned short			save1, save2;
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	PROBE_PRINT(( "Probing for Lance card at mem %#lx io %#lx\n",
480*4882a593Smuzhiyun 				  (long)memaddr, (long)ioaddr ));
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/* Test whether memory readable and writable */
483*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing memory to be accessible\n" ));
484*4882a593Smuzhiyun 	if (!addr_accessible( memaddr, 1, 1 )) goto probe_fail;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	/* Written values should come back... */
487*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing memory to be writable (1)\n" ));
488*4882a593Smuzhiyun 	save1 = *memaddr;
489*4882a593Smuzhiyun 	*memaddr = 0x0001;
490*4882a593Smuzhiyun 	if (*memaddr != 0x0001) goto probe_fail;
491*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing memory to be writable (2)\n" ));
492*4882a593Smuzhiyun 	*memaddr = 0x0000;
493*4882a593Smuzhiyun 	if (*memaddr != 0x0000) goto probe_fail;
494*4882a593Smuzhiyun 	*memaddr = save1;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/* First port should be readable and writable */
497*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing ioport to be accessible\n" ));
498*4882a593Smuzhiyun 	if (!addr_accessible( ioaddr, 1, 1 )) goto probe_fail;
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/* and written values should be readable */
501*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing ioport to be writeable\n" ));
502*4882a593Smuzhiyun 	save2 = ioaddr[1];
503*4882a593Smuzhiyun 	ioaddr[1] = 0x0001;
504*4882a593Smuzhiyun 	if (ioaddr[1] != 0x0001) goto probe_fail;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	/* The CSR0_INIT bit should not be readable */
507*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing CSR0 register function (1)\n" ));
508*4882a593Smuzhiyun 	save1 = ioaddr[0];
509*4882a593Smuzhiyun 	ioaddr[1] = CSR0;
510*4882a593Smuzhiyun 	ioaddr[0] = CSR0_INIT | CSR0_STOP;
511*4882a593Smuzhiyun 	if (ioaddr[0] != CSR0_STOP) {
512*4882a593Smuzhiyun 		ioaddr[0] = save1;
513*4882a593Smuzhiyun 		ioaddr[1] = save2;
514*4882a593Smuzhiyun 		goto probe_fail;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: testing CSR0 register function (2)\n" ));
517*4882a593Smuzhiyun 	ioaddr[0] = CSR0_STOP;
518*4882a593Smuzhiyun 	if (ioaddr[0] != CSR0_STOP) {
519*4882a593Smuzhiyun 		ioaddr[0] = save1;
520*4882a593Smuzhiyun 		ioaddr[1] = save2;
521*4882a593Smuzhiyun 		goto probe_fail;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	/* Now ok... */
525*4882a593Smuzhiyun 	PROBE_PRINT(( "lance_probe1: Lance card detected\n" ));
526*4882a593Smuzhiyun 	goto probe_ok;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun   probe_fail:
529*4882a593Smuzhiyun 	return 0;
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun   probe_ok:
532*4882a593Smuzhiyun 	lp = netdev_priv(dev);
533*4882a593Smuzhiyun 	MEM = (struct lance_memory *)memaddr;
534*4882a593Smuzhiyun 	IO = lp->iobase = (struct lance_ioreg *)ioaddr;
535*4882a593Smuzhiyun 	dev->base_addr = (unsigned long)ioaddr; /* informational only */
536*4882a593Smuzhiyun 	lp->memcpy_f = init_rec->slow_flag ? slow_memcpy : memcpy;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	REGA( CSR0 ) = CSR0_STOP;
539*4882a593Smuzhiyun 
540*4882a593Smuzhiyun 	/* Now test for type: If the eeprom I/O port is readable, it is a
541*4882a593Smuzhiyun 	 * PAM card */
542*4882a593Smuzhiyun 	if (addr_accessible( &(IO->eeprom), 0, 0 )) {
543*4882a593Smuzhiyun 		/* Switch back to Ram */
544*4882a593Smuzhiyun 		i = IO->mem;
545*4882a593Smuzhiyun 		lp->cardtype = PAM_CARD;
546*4882a593Smuzhiyun 	}
547*4882a593Smuzhiyun 	else if (*RIEBL_MAGIC_ADDR == RIEBL_MAGIC) {
548*4882a593Smuzhiyun 		lp->cardtype = NEW_RIEBL;
549*4882a593Smuzhiyun 	}
550*4882a593Smuzhiyun 	else
551*4882a593Smuzhiyun 		lp->cardtype = OLD_RIEBL;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (lp->cardtype == PAM_CARD ||
554*4882a593Smuzhiyun 		memaddr == (unsigned short *)0xffe00000) {
555*4882a593Smuzhiyun 		/* PAMs card and Riebl on ST use level 5 autovector */
556*4882a593Smuzhiyun 		if (request_irq(IRQ_AUTO_5, lance_interrupt, 0,
557*4882a593Smuzhiyun 				"PAM,Riebl-ST Ethernet", dev)) {
558*4882a593Smuzhiyun 			printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
559*4882a593Smuzhiyun 			return 0;
560*4882a593Smuzhiyun 		}
561*4882a593Smuzhiyun 		dev->irq = IRQ_AUTO_5;
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 	else {
564*4882a593Smuzhiyun 		/* For VME-RieblCards, request a free VME int */
565*4882a593Smuzhiyun 		unsigned int irq = atari_register_vme_int();
566*4882a593Smuzhiyun 		if (!irq) {
567*4882a593Smuzhiyun 			printk( "Lance: request for VME interrupt failed\n" );
568*4882a593Smuzhiyun 			return 0;
569*4882a593Smuzhiyun 		}
570*4882a593Smuzhiyun 		if (request_irq(irq, lance_interrupt, 0, "Riebl-VME Ethernet",
571*4882a593Smuzhiyun 				dev)) {
572*4882a593Smuzhiyun 			printk( "Lance: request for irq %u failed\n", irq );
573*4882a593Smuzhiyun 			return 0;
574*4882a593Smuzhiyun 		}
575*4882a593Smuzhiyun 		dev->irq = irq;
576*4882a593Smuzhiyun 	}
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	printk("%s: %s at io %#lx, mem %#lx, irq %d%s, hwaddr ",
579*4882a593Smuzhiyun 		   dev->name, lance_names[lp->cardtype],
580*4882a593Smuzhiyun 		   (unsigned long)ioaddr,
581*4882a593Smuzhiyun 		   (unsigned long)memaddr,
582*4882a593Smuzhiyun 		   dev->irq,
583*4882a593Smuzhiyun 		   init_rec->slow_flag ? " (slow memcpy)" : "" );
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Get the ethernet address */
586*4882a593Smuzhiyun 	switch( lp->cardtype ) {
587*4882a593Smuzhiyun 	  case OLD_RIEBL:
588*4882a593Smuzhiyun 		/* No ethernet address! (Set some default address) */
589*4882a593Smuzhiyun 		memcpy(dev->dev_addr, OldRieblDefHwaddr, ETH_ALEN);
590*4882a593Smuzhiyun 		break;
591*4882a593Smuzhiyun 	  case NEW_RIEBL:
592*4882a593Smuzhiyun 		lp->memcpy_f(dev->dev_addr, RIEBL_HWADDR_ADDR, ETH_ALEN);
593*4882a593Smuzhiyun 		break;
594*4882a593Smuzhiyun 	  case PAM_CARD:
595*4882a593Smuzhiyun 		i = IO->eeprom;
596*4882a593Smuzhiyun 		for( i = 0; i < 6; ++i )
597*4882a593Smuzhiyun 			dev->dev_addr[i] =
598*4882a593Smuzhiyun 				((((unsigned short *)MEM)[i*2] & 0x0f) << 4) |
599*4882a593Smuzhiyun 				((((unsigned short *)MEM)[i*2+1] & 0x0f));
600*4882a593Smuzhiyun 		i = IO->mem;
601*4882a593Smuzhiyun 		break;
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 	printk("%pM\n", dev->dev_addr);
604*4882a593Smuzhiyun 	if (lp->cardtype == OLD_RIEBL) {
605*4882a593Smuzhiyun 		printk( "%s: Warning: This is a default ethernet address!\n",
606*4882a593Smuzhiyun 				dev->name );
607*4882a593Smuzhiyun 		printk( "      Use \"ifconfig hw ether ...\" to set the address.\n" );
608*4882a593Smuzhiyun 	}
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	spin_lock_init(&lp->devlock);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	MEM->init.mode = 0x0000;		/* Disable Rx and Tx. */
613*4882a593Smuzhiyun 	for( i = 0; i < 6; i++ )
614*4882a593Smuzhiyun 		MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
615*4882a593Smuzhiyun 	MEM->init.filter[0] = 0x00000000;
616*4882a593Smuzhiyun 	MEM->init.filter[1] = 0x00000000;
617*4882a593Smuzhiyun 	MEM->init.rx_ring.adr_lo = offsetof( struct lance_memory, rx_head );
618*4882a593Smuzhiyun 	MEM->init.rx_ring.adr_hi = 0;
619*4882a593Smuzhiyun 	MEM->init.rx_ring.len    = RX_RING_LEN_BITS;
620*4882a593Smuzhiyun 	MEM->init.tx_ring.adr_lo = offsetof( struct lance_memory, tx_head );
621*4882a593Smuzhiyun 	MEM->init.tx_ring.adr_hi = 0;
622*4882a593Smuzhiyun 	MEM->init.tx_ring.len    = TX_RING_LEN_BITS;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	if (lp->cardtype == PAM_CARD)
625*4882a593Smuzhiyun 		IO->ivec = IRQ_SOURCE_TO_VECTOR(dev->irq);
626*4882a593Smuzhiyun 	else
627*4882a593Smuzhiyun 		*RIEBL_IVEC_ADDR = IRQ_SOURCE_TO_VECTOR(dev->irq);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	if (did_version++ == 0)
630*4882a593Smuzhiyun 		DPRINTK( 1, ( version ));
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	dev->netdev_ops = &lance_netdev_ops;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	/* XXX MSch */
635*4882a593Smuzhiyun 	dev->watchdog_timeo = TX_TIMEOUT;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	return 1;
638*4882a593Smuzhiyun }
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 
lance_open(struct net_device * dev)641*4882a593Smuzhiyun static int lance_open( struct net_device *dev )
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
644*4882a593Smuzhiyun 	struct lance_ioreg	 *IO = lp->iobase;
645*4882a593Smuzhiyun 	int i;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	lance_init_ring(dev);
650*4882a593Smuzhiyun 	/* Re-initialize the LANCE, and start it when done. */
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
653*4882a593Smuzhiyun 	REGA( CSR2 ) = 0;
654*4882a593Smuzhiyun 	REGA( CSR1 ) = 0;
655*4882a593Smuzhiyun 	REGA( CSR0 ) = CSR0_INIT;
656*4882a593Smuzhiyun 	/* From now on, AREG is kept to point to CSR0 */
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 	i = 1000000;
659*4882a593Smuzhiyun 	while (--i > 0)
660*4882a593Smuzhiyun 		if (DREG & CSR0_IDON)
661*4882a593Smuzhiyun 			break;
662*4882a593Smuzhiyun 	if (i <= 0 || (DREG & CSR0_ERR)) {
663*4882a593Smuzhiyun 		DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
664*4882a593Smuzhiyun 					  dev->name, i, DREG ));
665*4882a593Smuzhiyun 		DREG = CSR0_STOP;
666*4882a593Smuzhiyun 		return -EIO;
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 	DREG = CSR0_IDON;
669*4882a593Smuzhiyun 	DREG = CSR0_STRT;
670*4882a593Smuzhiyun 	DREG = CSR0_INEA;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	netif_start_queue (dev);
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	return 0;
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /* Initialize the LANCE Rx and Tx rings. */
681*4882a593Smuzhiyun 
lance_init_ring(struct net_device * dev)682*4882a593Smuzhiyun static void lance_init_ring( struct net_device *dev )
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
685*4882a593Smuzhiyun 	int i;
686*4882a593Smuzhiyun 	unsigned offset;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	lp->tx_full = 0;
689*4882a593Smuzhiyun 	lp->cur_rx = lp->cur_tx = 0;
690*4882a593Smuzhiyun 	lp->dirty_tx = 0;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	offset = offsetof( struct lance_memory, packet_area );
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun /* If the packet buffer at offset 'o' would conflict with the reserved area
695*4882a593Smuzhiyun  * of RieblCards, advance it */
696*4882a593Smuzhiyun #define	CHECK_OFFSET(o)														 \
697*4882a593Smuzhiyun 	do {																	 \
698*4882a593Smuzhiyun 		if (lp->cardtype == OLD_RIEBL || lp->cardtype == NEW_RIEBL) {		 \
699*4882a593Smuzhiyun 			if (((o) < RIEBL_RSVD_START) ? (o)+PKT_BUF_SZ > RIEBL_RSVD_START \
700*4882a593Smuzhiyun 										 : (o) < RIEBL_RSVD_END)			 \
701*4882a593Smuzhiyun 				(o) = RIEBL_RSVD_END;										 \
702*4882a593Smuzhiyun 		}																	 \
703*4882a593Smuzhiyun 	} while(0)
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	for( i = 0; i < TX_RING_SIZE; i++ ) {
706*4882a593Smuzhiyun 		CHECK_OFFSET(offset);
707*4882a593Smuzhiyun 		MEM->tx_head[i].base = offset;
708*4882a593Smuzhiyun 		MEM->tx_head[i].flag = TMD1_OWN_HOST;
709*4882a593Smuzhiyun  		MEM->tx_head[i].base_hi = 0;
710*4882a593Smuzhiyun 		MEM->tx_head[i].length = 0;
711*4882a593Smuzhiyun 		MEM->tx_head[i].misc = 0;
712*4882a593Smuzhiyun 		offset += PKT_BUF_SZ;
713*4882a593Smuzhiyun 	}
714*4882a593Smuzhiyun 
715*4882a593Smuzhiyun 	for( i = 0; i < RX_RING_SIZE; i++ ) {
716*4882a593Smuzhiyun 		CHECK_OFFSET(offset);
717*4882a593Smuzhiyun 		MEM->rx_head[i].base = offset;
718*4882a593Smuzhiyun 		MEM->rx_head[i].flag = TMD1_OWN_CHIP;
719*4882a593Smuzhiyun 		MEM->rx_head[i].base_hi = 0;
720*4882a593Smuzhiyun 		MEM->rx_head[i].buf_length = -PKT_BUF_SZ;
721*4882a593Smuzhiyun 		MEM->rx_head[i].msg_length = 0;
722*4882a593Smuzhiyun 		offset += PKT_BUF_SZ;
723*4882a593Smuzhiyun 	}
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 
lance_tx_timeout(struct net_device * dev,unsigned int txqueue)730*4882a593Smuzhiyun static void lance_tx_timeout (struct net_device *dev, unsigned int txqueue)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
733*4882a593Smuzhiyun 	struct lance_ioreg	 *IO = lp->iobase;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	AREG = CSR0;
736*4882a593Smuzhiyun 	DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
737*4882a593Smuzhiyun 			  dev->name, DREG ));
738*4882a593Smuzhiyun 	DREG = CSR0_STOP;
739*4882a593Smuzhiyun 	/*
740*4882a593Smuzhiyun 	 * Always set BSWP after a STOP as STOP puts it back into
741*4882a593Smuzhiyun 	 * little endian mode.
742*4882a593Smuzhiyun 	 */
743*4882a593Smuzhiyun 	REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
744*4882a593Smuzhiyun 	dev->stats.tx_errors++;
745*4882a593Smuzhiyun #ifndef final_version
746*4882a593Smuzhiyun 		{	int i;
747*4882a593Smuzhiyun 			DPRINTK( 2, ( "Ring data: dirty_tx %d cur_tx %d%s cur_rx %d\n",
748*4882a593Smuzhiyun 						  lp->dirty_tx, lp->cur_tx,
749*4882a593Smuzhiyun 						  lp->tx_full ? " (full)" : "",
750*4882a593Smuzhiyun 						  lp->cur_rx ));
751*4882a593Smuzhiyun 			for( i = 0 ; i < RX_RING_SIZE; i++ )
752*4882a593Smuzhiyun 				DPRINTK( 2, ( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
753*4882a593Smuzhiyun 							  i, MEM->rx_head[i].base,
754*4882a593Smuzhiyun 							  -MEM->rx_head[i].buf_length,
755*4882a593Smuzhiyun 							  MEM->rx_head[i].msg_length ));
756*4882a593Smuzhiyun 			for( i = 0 ; i < TX_RING_SIZE; i++ )
757*4882a593Smuzhiyun 				DPRINTK( 2, ( "tx #%d: base=%04x len=%04x misc=%04x\n",
758*4882a593Smuzhiyun 							  i, MEM->tx_head[i].base,
759*4882a593Smuzhiyun 							  -MEM->tx_head[i].length,
760*4882a593Smuzhiyun 							  MEM->tx_head[i].misc ));
761*4882a593Smuzhiyun 		}
762*4882a593Smuzhiyun #endif
763*4882a593Smuzhiyun 	/* XXX MSch: maybe purge/reinit ring here */
764*4882a593Smuzhiyun 	/* lance_restart, essentially */
765*4882a593Smuzhiyun 	lance_init_ring(dev);
766*4882a593Smuzhiyun 	REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
767*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
768*4882a593Smuzhiyun 	netif_wake_queue(dev);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun /* XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX */
772*4882a593Smuzhiyun 
773*4882a593Smuzhiyun static netdev_tx_t
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)774*4882a593Smuzhiyun lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
777*4882a593Smuzhiyun 	struct lance_ioreg	 *IO = lp->iobase;
778*4882a593Smuzhiyun 	int entry, len;
779*4882a593Smuzhiyun 	struct lance_tx_head *head;
780*4882a593Smuzhiyun 	unsigned long flags;
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
783*4882a593Smuzhiyun 				  dev->name, DREG ));
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun 	/* The old LANCE chips doesn't automatically pad buffers to min. size. */
787*4882a593Smuzhiyun 	len = skb->len;
788*4882a593Smuzhiyun 	if (len < ETH_ZLEN)
789*4882a593Smuzhiyun 		len = ETH_ZLEN;
790*4882a593Smuzhiyun 	/* PAM-Card has a bug: Can only send packets with even number of bytes! */
791*4882a593Smuzhiyun 	else if (lp->cardtype == PAM_CARD && (len & 1))
792*4882a593Smuzhiyun 		++len;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 	if (len > skb->len) {
795*4882a593Smuzhiyun 		if (skb_padto(skb, len))
796*4882a593Smuzhiyun 			return NETDEV_TX_OK;
797*4882a593Smuzhiyun 	}
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 	netif_stop_queue (dev);
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	/* Fill in a Tx ring entry */
802*4882a593Smuzhiyun 	if (lance_debug >= 3) {
803*4882a593Smuzhiyun 		printk( "%s: TX pkt type 0x%04x from %pM to %pM"
804*4882a593Smuzhiyun 				" data at 0x%08x len %d\n",
805*4882a593Smuzhiyun 				dev->name, ((u_short *)skb->data)[6],
806*4882a593Smuzhiyun 				&skb->data[6], skb->data,
807*4882a593Smuzhiyun 				(int)skb->data, (int)skb->len );
808*4882a593Smuzhiyun 	}
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	/* We're not prepared for the int until the last flags are set/reset. And
811*4882a593Smuzhiyun 	 * the int may happen already after setting the OWN_CHIP... */
812*4882a593Smuzhiyun 	spin_lock_irqsave (&lp->devlock, flags);
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	/* Mask to ring buffer boundary. */
815*4882a593Smuzhiyun 	entry = lp->cur_tx & TX_RING_MOD_MASK;
816*4882a593Smuzhiyun 	head  = &(MEM->tx_head[entry]);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	/* Caution: the write order is important here, set the "ownership" bits
819*4882a593Smuzhiyun 	 * last.
820*4882a593Smuzhiyun 	 */
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	head->length = -len;
824*4882a593Smuzhiyun 	head->misc = 0;
825*4882a593Smuzhiyun 	lp->memcpy_f( PKTBUF_ADDR(head), (void *)skb->data, skb->len );
826*4882a593Smuzhiyun 	head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
827*4882a593Smuzhiyun 	dev->stats.tx_bytes += skb->len;
828*4882a593Smuzhiyun 	dev_kfree_skb( skb );
829*4882a593Smuzhiyun 	lp->cur_tx++;
830*4882a593Smuzhiyun 	while( lp->cur_tx >= TX_RING_SIZE && lp->dirty_tx >= TX_RING_SIZE ) {
831*4882a593Smuzhiyun 		lp->cur_tx -= TX_RING_SIZE;
832*4882a593Smuzhiyun 		lp->dirty_tx -= TX_RING_SIZE;
833*4882a593Smuzhiyun 	}
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/* Trigger an immediate send poll. */
836*4882a593Smuzhiyun 	DREG = CSR0_INEA | CSR0_TDMD;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
839*4882a593Smuzhiyun 		TMD1_OWN_HOST)
840*4882a593Smuzhiyun 		netif_start_queue (dev);
841*4882a593Smuzhiyun 	else
842*4882a593Smuzhiyun 		lp->tx_full = 1;
843*4882a593Smuzhiyun 	spin_unlock_irqrestore (&lp->devlock, flags);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	return NETDEV_TX_OK;
846*4882a593Smuzhiyun }
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun /* The LANCE interrupt handler. */
849*4882a593Smuzhiyun 
lance_interrupt(int irq,void * dev_id)850*4882a593Smuzhiyun static irqreturn_t lance_interrupt( int irq, void *dev_id )
851*4882a593Smuzhiyun {
852*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
853*4882a593Smuzhiyun 	struct lance_private *lp;
854*4882a593Smuzhiyun 	struct lance_ioreg	 *IO;
855*4882a593Smuzhiyun 	int csr0, boguscnt = 10;
856*4882a593Smuzhiyun 	int handled = 0;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (dev == NULL) {
859*4882a593Smuzhiyun 		DPRINTK( 1, ( "lance_interrupt(): interrupt for unknown device.\n" ));
860*4882a593Smuzhiyun 		return IRQ_NONE;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	lp = netdev_priv(dev);
864*4882a593Smuzhiyun 	IO = lp->iobase;
865*4882a593Smuzhiyun 	spin_lock (&lp->devlock);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	AREG = CSR0;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	while( ((csr0 = DREG) & (CSR0_ERR | CSR0_TINT | CSR0_RINT)) &&
870*4882a593Smuzhiyun 		   --boguscnt >= 0) {
871*4882a593Smuzhiyun 		handled = 1;
872*4882a593Smuzhiyun 		/* Acknowledge all of the current interrupt sources ASAP. */
873*4882a593Smuzhiyun 		DREG = csr0 & ~(CSR0_INIT | CSR0_STRT | CSR0_STOP |
874*4882a593Smuzhiyun 									CSR0_TDMD | CSR0_INEA);
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 		DPRINTK( 2, ( "%s: interrupt  csr0=%04x new csr=%04x.\n",
877*4882a593Smuzhiyun 					  dev->name, csr0, DREG ));
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 		if (csr0 & CSR0_RINT)			/* Rx interrupt */
880*4882a593Smuzhiyun 			lance_rx( dev );
881*4882a593Smuzhiyun 
882*4882a593Smuzhiyun 		if (csr0 & CSR0_TINT) {			/* Tx-done interrupt */
883*4882a593Smuzhiyun 			int dirty_tx = lp->dirty_tx;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 			while( dirty_tx < lp->cur_tx) {
886*4882a593Smuzhiyun 				int entry = dirty_tx & TX_RING_MOD_MASK;
887*4882a593Smuzhiyun 				int status = MEM->tx_head[entry].flag;
888*4882a593Smuzhiyun 
889*4882a593Smuzhiyun 				if (status & TMD1_OWN_CHIP)
890*4882a593Smuzhiyun 					break;			/* It still hasn't been Txed */
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 				MEM->tx_head[entry].flag = 0;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 				if (status & TMD1_ERR) {
895*4882a593Smuzhiyun 					/* There was an major error, log it. */
896*4882a593Smuzhiyun 					int err_status = MEM->tx_head[entry].misc;
897*4882a593Smuzhiyun 					dev->stats.tx_errors++;
898*4882a593Smuzhiyun 					if (err_status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
899*4882a593Smuzhiyun 					if (err_status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
900*4882a593Smuzhiyun 					if (err_status & TMD3_LCOL) dev->stats.tx_window_errors++;
901*4882a593Smuzhiyun 					if (err_status & TMD3_UFLO) {
902*4882a593Smuzhiyun 						/* Ackk!  On FIFO errors the Tx unit is turned off! */
903*4882a593Smuzhiyun 						dev->stats.tx_fifo_errors++;
904*4882a593Smuzhiyun 						/* Remove this verbosity later! */
905*4882a593Smuzhiyun 						DPRINTK( 1, ( "%s: Tx FIFO error! Status %04x\n",
906*4882a593Smuzhiyun 									  dev->name, csr0 ));
907*4882a593Smuzhiyun 						/* Restart the chip. */
908*4882a593Smuzhiyun 						DREG = CSR0_STRT;
909*4882a593Smuzhiyun 					}
910*4882a593Smuzhiyun 				} else {
911*4882a593Smuzhiyun 					if (status & (TMD1_MORE | TMD1_ONE | TMD1_DEF))
912*4882a593Smuzhiyun 						dev->stats.collisions++;
913*4882a593Smuzhiyun 					dev->stats.tx_packets++;
914*4882a593Smuzhiyun 				}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 				/* XXX MSch: free skb?? */
917*4882a593Smuzhiyun 				dirty_tx++;
918*4882a593Smuzhiyun 			}
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun #ifndef final_version
921*4882a593Smuzhiyun 			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
922*4882a593Smuzhiyun 				DPRINTK( 0, ( "out-of-sync dirty pointer,"
923*4882a593Smuzhiyun 							  " %d vs. %d, full=%ld.\n",
924*4882a593Smuzhiyun 							  dirty_tx, lp->cur_tx, lp->tx_full ));
925*4882a593Smuzhiyun 				dirty_tx += TX_RING_SIZE;
926*4882a593Smuzhiyun 			}
927*4882a593Smuzhiyun #endif
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 			if (lp->tx_full && (netif_queue_stopped(dev)) &&
930*4882a593Smuzhiyun 				dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
931*4882a593Smuzhiyun 				/* The ring is no longer full, clear tbusy. */
932*4882a593Smuzhiyun 				lp->tx_full = 0;
933*4882a593Smuzhiyun 				netif_wake_queue (dev);
934*4882a593Smuzhiyun 			}
935*4882a593Smuzhiyun 
936*4882a593Smuzhiyun 			lp->dirty_tx = dirty_tx;
937*4882a593Smuzhiyun 		}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 		/* Log misc errors. */
940*4882a593Smuzhiyun 		if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
941*4882a593Smuzhiyun 		if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
942*4882a593Smuzhiyun 		if (csr0 & CSR0_MERR) {
943*4882a593Smuzhiyun 			DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
944*4882a593Smuzhiyun 						  "status %04x.\n", dev->name, csr0 ));
945*4882a593Smuzhiyun 			/* Restart the chip. */
946*4882a593Smuzhiyun 			DREG = CSR0_STRT;
947*4882a593Smuzhiyun 		}
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun     /* Clear any other interrupt, and set interrupt enable. */
951*4882a593Smuzhiyun 	DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
952*4882a593Smuzhiyun 		   CSR0_IDON | CSR0_INEA;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
955*4882a593Smuzhiyun 				  dev->name, DREG ));
956*4882a593Smuzhiyun 
957*4882a593Smuzhiyun 	spin_unlock (&lp->devlock);
958*4882a593Smuzhiyun 	return IRQ_RETVAL(handled);
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 
lance_rx(struct net_device * dev)962*4882a593Smuzhiyun static int lance_rx( struct net_device *dev )
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
965*4882a593Smuzhiyun 	int entry = lp->cur_rx & RX_RING_MOD_MASK;
966*4882a593Smuzhiyun 	int i;
967*4882a593Smuzhiyun 
968*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: rx int, flag=%04x\n", dev->name,
969*4882a593Smuzhiyun 				  MEM->rx_head[entry].flag ));
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/* If we own the next entry, it's a new packet. Send it up. */
972*4882a593Smuzhiyun 	while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
973*4882a593Smuzhiyun 		struct lance_rx_head *head = &(MEM->rx_head[entry]);
974*4882a593Smuzhiyun 		int status = head->flag;
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 		if (status != (RMD1_ENP|RMD1_STP)) {		/* There was an error. */
977*4882a593Smuzhiyun 			/* There is a tricky error noted by John Murphy,
978*4882a593Smuzhiyun 			   <murf@perftech.com> to Russ Nelson: Even with full-sized
979*4882a593Smuzhiyun 			   buffers it's possible for a jabber packet to use two
980*4882a593Smuzhiyun 			   buffers, with only the last correctly noting the error. */
981*4882a593Smuzhiyun 			if (status & RMD1_ENP)	/* Only count a general error at the */
982*4882a593Smuzhiyun 				dev->stats.rx_errors++; /* end of a packet.*/
983*4882a593Smuzhiyun 			if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
984*4882a593Smuzhiyun 			if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
985*4882a593Smuzhiyun 			if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
986*4882a593Smuzhiyun 			if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
987*4882a593Smuzhiyun 			head->flag &= (RMD1_ENP|RMD1_STP);
988*4882a593Smuzhiyun 		} else {
989*4882a593Smuzhiyun 			/* Malloc up new buffer, compatible with net-3. */
990*4882a593Smuzhiyun 			short pkt_len = head->msg_length & 0xfff;
991*4882a593Smuzhiyun 			struct sk_buff *skb;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 			if (pkt_len < 60) {
994*4882a593Smuzhiyun 				printk( "%s: Runt packet!\n", dev->name );
995*4882a593Smuzhiyun 				dev->stats.rx_errors++;
996*4882a593Smuzhiyun 			}
997*4882a593Smuzhiyun 			else {
998*4882a593Smuzhiyun 				skb = netdev_alloc_skb(dev, pkt_len + 2);
999*4882a593Smuzhiyun 				if (skb == NULL) {
1000*4882a593Smuzhiyun 					for( i = 0; i < RX_RING_SIZE; i++ )
1001*4882a593Smuzhiyun 						if (MEM->rx_head[(entry+i) & RX_RING_MOD_MASK].flag &
1002*4882a593Smuzhiyun 							RMD1_OWN_CHIP)
1003*4882a593Smuzhiyun 							break;
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 					if (i > RX_RING_SIZE - 2) {
1006*4882a593Smuzhiyun 						dev->stats.rx_dropped++;
1007*4882a593Smuzhiyun 						head->flag |= RMD1_OWN_CHIP;
1008*4882a593Smuzhiyun 						lp->cur_rx++;
1009*4882a593Smuzhiyun 					}
1010*4882a593Smuzhiyun 					break;
1011*4882a593Smuzhiyun 				}
1012*4882a593Smuzhiyun 
1013*4882a593Smuzhiyun 				if (lance_debug >= 3) {
1014*4882a593Smuzhiyun 					u_char *data = PKTBUF_ADDR(head);
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 					printk(KERN_DEBUG "%s: RX pkt type 0x%04x from %pM to %pM "
1017*4882a593Smuzhiyun 						   "data %8ph len %d\n",
1018*4882a593Smuzhiyun 						   dev->name, ((u_short *)data)[6],
1019*4882a593Smuzhiyun 						   &data[6], data, &data[15], pkt_len);
1020*4882a593Smuzhiyun 				}
1021*4882a593Smuzhiyun 
1022*4882a593Smuzhiyun 				skb_reserve( skb, 2 );	/* 16 byte align */
1023*4882a593Smuzhiyun 				skb_put( skb, pkt_len );	/* Make room */
1024*4882a593Smuzhiyun 				lp->memcpy_f( skb->data, PKTBUF_ADDR(head), pkt_len );
1025*4882a593Smuzhiyun 				skb->protocol = eth_type_trans( skb, dev );
1026*4882a593Smuzhiyun 				netif_rx( skb );
1027*4882a593Smuzhiyun 				dev->stats.rx_packets++;
1028*4882a593Smuzhiyun 				dev->stats.rx_bytes += pkt_len;
1029*4882a593Smuzhiyun 			}
1030*4882a593Smuzhiyun 		}
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 		head->flag |= RMD1_OWN_CHIP;
1033*4882a593Smuzhiyun 		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
1034*4882a593Smuzhiyun 	}
1035*4882a593Smuzhiyun 	lp->cur_rx &= RX_RING_MOD_MASK;
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun 	/* From lance.c (Donald Becker): */
1038*4882a593Smuzhiyun 	/* We should check that at least two ring entries are free.	 If not,
1039*4882a593Smuzhiyun 	   we should free one and mark stats->rx_dropped++. */
1040*4882a593Smuzhiyun 
1041*4882a593Smuzhiyun 	return 0;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun 
1044*4882a593Smuzhiyun 
lance_close(struct net_device * dev)1045*4882a593Smuzhiyun static int lance_close( struct net_device *dev )
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
1048*4882a593Smuzhiyun 	struct lance_ioreg	 *IO = lp->iobase;
1049*4882a593Smuzhiyun 
1050*4882a593Smuzhiyun 	netif_stop_queue (dev);
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun 	AREG = CSR0;
1053*4882a593Smuzhiyun 
1054*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
1055*4882a593Smuzhiyun 				  dev->name, DREG ));
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	/* We stop the LANCE here -- it occasionally polls
1058*4882a593Smuzhiyun 	   memory if we don't. */
1059*4882a593Smuzhiyun 	DREG = CSR0_STOP;
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun 	return 0;
1062*4882a593Smuzhiyun }
1063*4882a593Smuzhiyun 
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun /* Set or clear the multicast filter for this adaptor.
1066*4882a593Smuzhiyun    num_addrs == -1		Promiscuous mode, receive all packets
1067*4882a593Smuzhiyun    num_addrs == 0		Normal mode, clear multicast list
1068*4882a593Smuzhiyun    num_addrs > 0		Multicast mode, receive normal and MC packets, and do
1069*4882a593Smuzhiyun 						best-effort filtering.
1070*4882a593Smuzhiyun  */
1071*4882a593Smuzhiyun 
set_multicast_list(struct net_device * dev)1072*4882a593Smuzhiyun static void set_multicast_list( struct net_device *dev )
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
1075*4882a593Smuzhiyun 	struct lance_ioreg	 *IO = lp->iobase;
1076*4882a593Smuzhiyun 
1077*4882a593Smuzhiyun 	if (netif_running(dev))
1078*4882a593Smuzhiyun 		/* Only possible if board is already started */
1079*4882a593Smuzhiyun 		return;
1080*4882a593Smuzhiyun 
1081*4882a593Smuzhiyun 	/* We take the simple way out and always enable promiscuous mode. */
1082*4882a593Smuzhiyun 	DREG = CSR0_STOP; /* Temporarily stop the lance. */
1083*4882a593Smuzhiyun 
1084*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
1085*4882a593Smuzhiyun 		/* Log any net taps. */
1086*4882a593Smuzhiyun 		DPRINTK( 2, ( "%s: Promiscuous mode enabled.\n", dev->name ));
1087*4882a593Smuzhiyun 		REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
1088*4882a593Smuzhiyun 	} else {
1089*4882a593Smuzhiyun 		short multicast_table[4];
1090*4882a593Smuzhiyun 		int num_addrs = netdev_mc_count(dev);
1091*4882a593Smuzhiyun 		int i;
1092*4882a593Smuzhiyun 		/* We don't use the multicast table, but rely on upper-layer
1093*4882a593Smuzhiyun 		 * filtering. */
1094*4882a593Smuzhiyun 		memset( multicast_table, (num_addrs == 0) ? 0 : -1,
1095*4882a593Smuzhiyun 				sizeof(multicast_table) );
1096*4882a593Smuzhiyun 		for( i = 0; i < 4; i++ )
1097*4882a593Smuzhiyun 			REGA( CSR8+i ) = multicast_table[i];
1098*4882a593Smuzhiyun 		REGA( CSR15 ) = 0; /* Unset promiscuous mode */
1099*4882a593Smuzhiyun 	}
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	/*
1102*4882a593Smuzhiyun 	 * Always set BSWP after a STOP as STOP puts it back into
1103*4882a593Smuzhiyun 	 * little endian mode.
1104*4882a593Smuzhiyun 	 */
1105*4882a593Smuzhiyun 	REGA( CSR3 ) = CSR3_BSWP | (lp->cardtype == PAM_CARD ? CSR3_ACON : 0);
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	/* Resume normal operation and reset AREG to CSR0 */
1108*4882a593Smuzhiyun 	REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
1109*4882a593Smuzhiyun }
1110*4882a593Smuzhiyun 
1111*4882a593Smuzhiyun 
1112*4882a593Smuzhiyun /* This is needed for old RieblCards and possible for new RieblCards */
1113*4882a593Smuzhiyun 
lance_set_mac_address(struct net_device * dev,void * addr)1114*4882a593Smuzhiyun static int lance_set_mac_address( struct net_device *dev, void *addr )
1115*4882a593Smuzhiyun {
1116*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
1117*4882a593Smuzhiyun 	struct sockaddr *saddr = addr;
1118*4882a593Smuzhiyun 	int i;
1119*4882a593Smuzhiyun 
1120*4882a593Smuzhiyun 	if (lp->cardtype != OLD_RIEBL && lp->cardtype != NEW_RIEBL)
1121*4882a593Smuzhiyun 		return -EOPNOTSUPP;
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	if (netif_running(dev)) {
1124*4882a593Smuzhiyun 		/* Only possible while card isn't started */
1125*4882a593Smuzhiyun 		DPRINTK( 1, ( "%s: hwaddr can be set only while card isn't open.\n",
1126*4882a593Smuzhiyun 					  dev->name ));
1127*4882a593Smuzhiyun 		return -EIO;
1128*4882a593Smuzhiyun 	}
1129*4882a593Smuzhiyun 
1130*4882a593Smuzhiyun 	memcpy( dev->dev_addr, saddr->sa_data, dev->addr_len );
1131*4882a593Smuzhiyun 	for( i = 0; i < 6; i++ )
1132*4882a593Smuzhiyun 		MEM->init.hwaddr[i] = dev->dev_addr[i^1]; /* <- 16 bit swap! */
1133*4882a593Smuzhiyun 	lp->memcpy_f( RIEBL_HWADDR_ADDR, dev->dev_addr, 6 );
1134*4882a593Smuzhiyun 	/* set also the magic for future sessions */
1135*4882a593Smuzhiyun 	*RIEBL_MAGIC_ADDR = RIEBL_MAGIC;
1136*4882a593Smuzhiyun 
1137*4882a593Smuzhiyun 	return 0;
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun #ifdef MODULE
1142*4882a593Smuzhiyun static struct net_device *atarilance_dev;
1143*4882a593Smuzhiyun 
atarilance_module_init(void)1144*4882a593Smuzhiyun static int __init atarilance_module_init(void)
1145*4882a593Smuzhiyun {
1146*4882a593Smuzhiyun 	atarilance_dev = atarilance_probe(-1);
1147*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(atarilance_dev);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun 
atarilance_module_exit(void)1150*4882a593Smuzhiyun static void __exit atarilance_module_exit(void)
1151*4882a593Smuzhiyun {
1152*4882a593Smuzhiyun 	unregister_netdev(atarilance_dev);
1153*4882a593Smuzhiyun 	free_irq(atarilance_dev->irq, atarilance_dev);
1154*4882a593Smuzhiyun 	free_netdev(atarilance_dev);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun module_init(atarilance_module_init);
1157*4882a593Smuzhiyun module_exit(atarilance_module_exit);
1158*4882a593Smuzhiyun #endif /* MODULE */
1159*4882a593Smuzhiyun 
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun /*
1162*4882a593Smuzhiyun  * Local variables:
1163*4882a593Smuzhiyun  *  c-indent-level: 4
1164*4882a593Smuzhiyun  *  tab-width: 4
1165*4882a593Smuzhiyun  * End:
1166*4882a593Smuzhiyun  */
1167