xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/sun3lance.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* sun3lance.c: Ethernet driver for SUN3 Lance chip */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun 
4*4882a593Smuzhiyun   Sun3 Lance ethernet driver, by Sam Creasey (sammy@users.qual.net).
5*4882a593Smuzhiyun   This driver is a part of the linux kernel, and is thus distributed
6*4882a593Smuzhiyun   under the GNU General Public License.
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun   The values used in LANCE_OBIO and LANCE_IRQ seem to be empirically
9*4882a593Smuzhiyun   true for the correct IRQ and address of the lance registers.  They
10*4882a593Smuzhiyun   have not been widely tested, however.  What we probably need is a
11*4882a593Smuzhiyun   "proper" way to search for a device in the sun3's prom, but, alas,
12*4882a593Smuzhiyun   linux has no such thing.
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun   This driver is largely based on atarilance.c, by Roman Hodek.  Other
15*4882a593Smuzhiyun   sources of inspiration were the NetBSD sun3 am7990 driver, and the
16*4882a593Smuzhiyun   linux sparc lance driver (sunlance.c).
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun   There are more assumptions made throughout this driver, it almost
19*4882a593Smuzhiyun   certainly still needs work, but it does work at least for RARP/BOOTP and
20*4882a593Smuzhiyun   mounting the root NFS filesystem.
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun static const char version[] =
25*4882a593Smuzhiyun "sun3lance.c: v1.2 1/12/2001  Sam Creasey (sammy@sammy.net)\n";
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include <linux/module.h>
28*4882a593Smuzhiyun #include <linux/stddef.h>
29*4882a593Smuzhiyun #include <linux/kernel.h>
30*4882a593Smuzhiyun #include <linux/string.h>
31*4882a593Smuzhiyun #include <linux/errno.h>
32*4882a593Smuzhiyun #include <linux/interrupt.h>
33*4882a593Smuzhiyun #include <linux/init.h>
34*4882a593Smuzhiyun #include <linux/ioport.h>
35*4882a593Smuzhiyun #include <linux/delay.h>
36*4882a593Smuzhiyun #include <linux/netdevice.h>
37*4882a593Smuzhiyun #include <linux/etherdevice.h>
38*4882a593Smuzhiyun #include <linux/skbuff.h>
39*4882a593Smuzhiyun #include <linux/bitops.h>
40*4882a593Smuzhiyun #include <linux/pgtable.h>
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #include <asm/cacheflush.h>
43*4882a593Smuzhiyun #include <asm/setup.h>
44*4882a593Smuzhiyun #include <asm/irq.h>
45*4882a593Smuzhiyun #include <asm/io.h>
46*4882a593Smuzhiyun #include <asm/dvma.h>
47*4882a593Smuzhiyun #include <asm/idprom.h>
48*4882a593Smuzhiyun #include <asm/machines.h>
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun #ifdef CONFIG_SUN3
51*4882a593Smuzhiyun #include <asm/sun3mmu.h>
52*4882a593Smuzhiyun #else
53*4882a593Smuzhiyun #include <asm/sun3xprom.h>
54*4882a593Smuzhiyun #endif
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* sun3/60 addr/irq for the lance chip.  If your sun is different,
57*4882a593Smuzhiyun    change this. */
58*4882a593Smuzhiyun #define LANCE_OBIO 0x120000
59*4882a593Smuzhiyun #define LANCE_IRQ IRQ_AUTO_3
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* Debug level:
62*4882a593Smuzhiyun  *  0 = silent, print only serious errors
63*4882a593Smuzhiyun  *  1 = normal, print error messages
64*4882a593Smuzhiyun  *  2 = debug, print debug infos
65*4882a593Smuzhiyun  *  3 = debug, print even more debug infos (packet data)
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun #define	LANCE_DEBUG	0
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun #ifdef LANCE_DEBUG
71*4882a593Smuzhiyun static int lance_debug = LANCE_DEBUG;
72*4882a593Smuzhiyun #else
73*4882a593Smuzhiyun static int lance_debug = 1;
74*4882a593Smuzhiyun #endif
75*4882a593Smuzhiyun module_param(lance_debug, int, 0);
76*4882a593Smuzhiyun MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)");
77*4882a593Smuzhiyun MODULE_LICENSE("GPL");
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun #define	DPRINTK(n,a) \
80*4882a593Smuzhiyun 	do {  \
81*4882a593Smuzhiyun 		if (lance_debug >= n)  \
82*4882a593Smuzhiyun 			printk a; \
83*4882a593Smuzhiyun 	} while( 0 )
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /* we're only using 32k of memory, so we use 4 TX
87*4882a593Smuzhiyun    buffers and 16 RX buffers.  These values are expressed as log2. */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #define TX_LOG_RING_SIZE			3
90*4882a593Smuzhiyun #define RX_LOG_RING_SIZE			5
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* These are the derived values */
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun #define TX_RING_SIZE			(1 << TX_LOG_RING_SIZE)
95*4882a593Smuzhiyun #define TX_RING_LEN_BITS		(TX_LOG_RING_SIZE << 5)
96*4882a593Smuzhiyun #define	TX_RING_MOD_MASK		(TX_RING_SIZE - 1)
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun #define RX_RING_SIZE			(1 << RX_LOG_RING_SIZE)
99*4882a593Smuzhiyun #define RX_RING_LEN_BITS		(RX_LOG_RING_SIZE << 5)
100*4882a593Smuzhiyun #define	RX_RING_MOD_MASK		(RX_RING_SIZE - 1)
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun /* Definitions for packet buffer access: */
103*4882a593Smuzhiyun #define PKT_BUF_SZ		1544
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun /* Get the address of a packet buffer corresponding to a given buffer head */
106*4882a593Smuzhiyun #define	PKTBUF_ADDR(head)	(void *)((unsigned long)(MEM) | (head)->base)
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* The LANCE Rx and Tx ring descriptors. */
110*4882a593Smuzhiyun struct lance_rx_head {
111*4882a593Smuzhiyun 	unsigned short	base;		/* Low word of base addr */
112*4882a593Smuzhiyun 	volatile unsigned char	flag;
113*4882a593Smuzhiyun 	unsigned char  base_hi;	/* High word of base addr (unused) */
114*4882a593Smuzhiyun 	short buf_length;	/* This length is 2s complement! */
115*4882a593Smuzhiyun 	volatile short msg_length;	/* This length is "normal". */
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun struct lance_tx_head {
119*4882a593Smuzhiyun 	unsigned short base;		/* Low word of base addr */
120*4882a593Smuzhiyun 	volatile unsigned char	flag;
121*4882a593Smuzhiyun 	unsigned char base_hi;	/* High word of base addr (unused) */
122*4882a593Smuzhiyun 	short length;		/* Length is 2s complement! */
123*4882a593Smuzhiyun 	volatile short misc;
124*4882a593Smuzhiyun };
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun /* The LANCE initialization block, described in databook. */
127*4882a593Smuzhiyun struct lance_init_block {
128*4882a593Smuzhiyun 	unsigned short	mode;		/* Pre-set mode */
129*4882a593Smuzhiyun 	unsigned char	hwaddr[6];	/* Physical ethernet address */
130*4882a593Smuzhiyun 	unsigned int    filter[2];	/* Multicast filter (unused). */
131*4882a593Smuzhiyun 	/* Receive and transmit ring base, along with length bits. */
132*4882a593Smuzhiyun 	unsigned short rdra;
133*4882a593Smuzhiyun 	unsigned short rlen;
134*4882a593Smuzhiyun 	unsigned short tdra;
135*4882a593Smuzhiyun 	unsigned short tlen;
136*4882a593Smuzhiyun 	unsigned short pad[4]; /* is thie needed? */
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /* The whole layout of the Lance shared memory */
140*4882a593Smuzhiyun struct lance_memory {
141*4882a593Smuzhiyun 	struct lance_init_block	init;
142*4882a593Smuzhiyun 	struct lance_tx_head	tx_head[TX_RING_SIZE];
143*4882a593Smuzhiyun 	struct lance_rx_head	rx_head[RX_RING_SIZE];
144*4882a593Smuzhiyun 	char   rx_data[RX_RING_SIZE][PKT_BUF_SZ];
145*4882a593Smuzhiyun 	char   tx_data[TX_RING_SIZE][PKT_BUF_SZ];
146*4882a593Smuzhiyun };
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun /* The driver's private device structure */
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun struct lance_private {
151*4882a593Smuzhiyun 	volatile unsigned short	*iobase;
152*4882a593Smuzhiyun 	struct lance_memory	*mem;
153*4882a593Smuzhiyun      	int new_rx, new_tx;	/* The next free ring entry */
154*4882a593Smuzhiyun 	int old_tx, old_rx;     /* ring entry to be processed */
155*4882a593Smuzhiyun /* These two must be longs for set_bit() */
156*4882a593Smuzhiyun 	long	    tx_full;
157*4882a593Smuzhiyun 	long	    lock;
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun /* I/O register access macros */
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun #define	MEM	lp->mem
163*4882a593Smuzhiyun #define	DREG	lp->iobase[0]
164*4882a593Smuzhiyun #define	AREG	lp->iobase[1]
165*4882a593Smuzhiyun #define	REGA(a)	(*( AREG = (a), &DREG ))
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun /* Definitions for the Lance */
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /* tx_head flags */
170*4882a593Smuzhiyun #define TMD1_ENP		0x01	/* end of packet */
171*4882a593Smuzhiyun #define TMD1_STP		0x02	/* start of packet */
172*4882a593Smuzhiyun #define TMD1_DEF		0x04	/* deferred */
173*4882a593Smuzhiyun #define TMD1_ONE		0x08	/* one retry needed */
174*4882a593Smuzhiyun #define TMD1_MORE		0x10	/* more than one retry needed */
175*4882a593Smuzhiyun #define TMD1_ERR		0x40	/* error summary */
176*4882a593Smuzhiyun #define TMD1_OWN 		0x80	/* ownership (set: chip owns) */
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #define TMD1_OWN_CHIP	TMD1_OWN
179*4882a593Smuzhiyun #define TMD1_OWN_HOST	0
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun /* tx_head misc field */
182*4882a593Smuzhiyun #define TMD3_TDR		0x03FF	/* Time Domain Reflectometry counter */
183*4882a593Smuzhiyun #define TMD3_RTRY		0x0400	/* failed after 16 retries */
184*4882a593Smuzhiyun #define TMD3_LCAR		0x0800	/* carrier lost */
185*4882a593Smuzhiyun #define TMD3_LCOL		0x1000	/* late collision */
186*4882a593Smuzhiyun #define TMD3_UFLO		0x4000	/* underflow (late memory) */
187*4882a593Smuzhiyun #define TMD3_BUFF		0x8000	/* buffering error (no ENP) */
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun /* rx_head flags */
190*4882a593Smuzhiyun #define RMD1_ENP		0x01	/* end of packet */
191*4882a593Smuzhiyun #define RMD1_STP		0x02	/* start of packet */
192*4882a593Smuzhiyun #define RMD1_BUFF		0x04	/* buffer error */
193*4882a593Smuzhiyun #define RMD1_CRC		0x08	/* CRC error */
194*4882a593Smuzhiyun #define RMD1_OFLO		0x10	/* overflow */
195*4882a593Smuzhiyun #define RMD1_FRAM		0x20	/* framing error */
196*4882a593Smuzhiyun #define RMD1_ERR		0x40	/* error summary */
197*4882a593Smuzhiyun #define RMD1_OWN 		0x80	/* ownership (set: ship owns) */
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun #define RMD1_OWN_CHIP	RMD1_OWN
200*4882a593Smuzhiyun #define RMD1_OWN_HOST	0
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun /* register names */
203*4882a593Smuzhiyun #define CSR0	0		/* mode/status */
204*4882a593Smuzhiyun #define CSR1	1		/* init block addr (low) */
205*4882a593Smuzhiyun #define CSR2	2		/* init block addr (high) */
206*4882a593Smuzhiyun #define CSR3	3		/* misc */
207*4882a593Smuzhiyun #define CSR8	8	  	/* address filter */
208*4882a593Smuzhiyun #define CSR15	15		/* promiscuous mode */
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun /* CSR0 */
211*4882a593Smuzhiyun /* (R=readable, W=writeable, S=set on write, C=clear on write) */
212*4882a593Smuzhiyun #define CSR0_INIT	0x0001		/* initialize (RS) */
213*4882a593Smuzhiyun #define CSR0_STRT	0x0002		/* start (RS) */
214*4882a593Smuzhiyun #define CSR0_STOP	0x0004		/* stop (RS) */
215*4882a593Smuzhiyun #define CSR0_TDMD	0x0008		/* transmit demand (RS) */
216*4882a593Smuzhiyun #define CSR0_TXON	0x0010		/* transmitter on (R) */
217*4882a593Smuzhiyun #define CSR0_RXON	0x0020		/* receiver on (R) */
218*4882a593Smuzhiyun #define CSR0_INEA	0x0040		/* interrupt enable (RW) */
219*4882a593Smuzhiyun #define CSR0_INTR	0x0080		/* interrupt active (R) */
220*4882a593Smuzhiyun #define CSR0_IDON	0x0100		/* initialization done (RC) */
221*4882a593Smuzhiyun #define CSR0_TINT	0x0200		/* transmitter interrupt (RC) */
222*4882a593Smuzhiyun #define CSR0_RINT	0x0400		/* receiver interrupt (RC) */
223*4882a593Smuzhiyun #define CSR0_MERR	0x0800		/* memory error (RC) */
224*4882a593Smuzhiyun #define CSR0_MISS	0x1000		/* missed frame (RC) */
225*4882a593Smuzhiyun #define CSR0_CERR	0x2000		/* carrier error (no heartbeat :-) (RC) */
226*4882a593Smuzhiyun #define CSR0_BABL	0x4000		/* babble: tx-ed too many bits (RC) */
227*4882a593Smuzhiyun #define CSR0_ERR	0x8000		/* error (RC) */
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun /* CSR3 */
230*4882a593Smuzhiyun #define CSR3_BCON	0x0001		/* byte control */
231*4882a593Smuzhiyun #define CSR3_ACON	0x0002		/* ALE control */
232*4882a593Smuzhiyun #define CSR3_BSWP	0x0004		/* byte swap (1=big endian) */
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun /***************************** Prototypes *****************************/
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun static int lance_probe( struct net_device *dev);
237*4882a593Smuzhiyun static int lance_open( struct net_device *dev );
238*4882a593Smuzhiyun static void lance_init_ring( struct net_device *dev );
239*4882a593Smuzhiyun static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
240*4882a593Smuzhiyun 				    struct net_device *dev);
241*4882a593Smuzhiyun static irqreturn_t lance_interrupt( int irq, void *dev_id);
242*4882a593Smuzhiyun static int lance_rx( struct net_device *dev );
243*4882a593Smuzhiyun static int lance_close( struct net_device *dev );
244*4882a593Smuzhiyun static void set_multicast_list( struct net_device *dev );
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun /************************* End of Prototypes **************************/
247*4882a593Smuzhiyun 
sun3lance_probe(int unit)248*4882a593Smuzhiyun struct net_device * __init sun3lance_probe(int unit)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	struct net_device *dev;
251*4882a593Smuzhiyun 	static int found;
252*4882a593Smuzhiyun 	int err = -ENODEV;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	if (!MACH_IS_SUN3 && !MACH_IS_SUN3X)
255*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	/* check that this machine has an onboard lance */
258*4882a593Smuzhiyun 	switch(idprom->id_machtype) {
259*4882a593Smuzhiyun 	case SM_SUN3|SM_3_50:
260*4882a593Smuzhiyun 	case SM_SUN3|SM_3_60:
261*4882a593Smuzhiyun 	case SM_SUN3X|SM_3_80:
262*4882a593Smuzhiyun 		/* these machines have lance */
263*4882a593Smuzhiyun 		break;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	default:
266*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
267*4882a593Smuzhiyun 	}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	if (found)
270*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct lance_private));
273*4882a593Smuzhiyun 	if (!dev)
274*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
275*4882a593Smuzhiyun 	if (unit >= 0) {
276*4882a593Smuzhiyun 		sprintf(dev->name, "eth%d", unit);
277*4882a593Smuzhiyun 		netdev_boot_setup_check(dev);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (!lance_probe(dev))
281*4882a593Smuzhiyun 		goto out;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	err = register_netdev(dev);
284*4882a593Smuzhiyun 	if (err)
285*4882a593Smuzhiyun 		goto out1;
286*4882a593Smuzhiyun 	found = 1;
287*4882a593Smuzhiyun 	return dev;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun out1:
290*4882a593Smuzhiyun #ifdef CONFIG_SUN3
291*4882a593Smuzhiyun 	iounmap((void __iomem *)dev->base_addr);
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun out:
294*4882a593Smuzhiyun 	free_netdev(dev);
295*4882a593Smuzhiyun 	return ERR_PTR(err);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun static const struct net_device_ops lance_netdev_ops = {
299*4882a593Smuzhiyun 	.ndo_open		= lance_open,
300*4882a593Smuzhiyun 	.ndo_stop		= lance_close,
301*4882a593Smuzhiyun 	.ndo_start_xmit		= lance_start_xmit,
302*4882a593Smuzhiyun 	.ndo_set_rx_mode	= set_multicast_list,
303*4882a593Smuzhiyun 	.ndo_set_mac_address	= NULL,
304*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
305*4882a593Smuzhiyun };
306*4882a593Smuzhiyun 
lance_probe(struct net_device * dev)307*4882a593Smuzhiyun static int __init lance_probe( struct net_device *dev)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	unsigned long ioaddr;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	struct lance_private	*lp;
312*4882a593Smuzhiyun 	int 			i;
313*4882a593Smuzhiyun 	static int 		did_version;
314*4882a593Smuzhiyun 	volatile unsigned short *ioaddr_probe;
315*4882a593Smuzhiyun 	unsigned short tmp1, tmp2;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #ifdef CONFIG_SUN3
318*4882a593Smuzhiyun 	ioaddr = (unsigned long)ioremap(LANCE_OBIO, PAGE_SIZE);
319*4882a593Smuzhiyun 	if (!ioaddr)
320*4882a593Smuzhiyun 		return 0;
321*4882a593Smuzhiyun #else
322*4882a593Smuzhiyun 	ioaddr = SUN3X_LANCE;
323*4882a593Smuzhiyun #endif
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* test to see if there's really a lance here */
326*4882a593Smuzhiyun 	/* (CSRO_INIT shouldn't be readable) */
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	ioaddr_probe = (volatile unsigned short *)ioaddr;
329*4882a593Smuzhiyun 	tmp1 = ioaddr_probe[0];
330*4882a593Smuzhiyun 	tmp2 = ioaddr_probe[1];
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	ioaddr_probe[1] = CSR0;
333*4882a593Smuzhiyun 	ioaddr_probe[0] = CSR0_INIT | CSR0_STOP;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if(ioaddr_probe[0] != CSR0_STOP) {
336*4882a593Smuzhiyun 		ioaddr_probe[0] = tmp1;
337*4882a593Smuzhiyun 		ioaddr_probe[1] = tmp2;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun #ifdef CONFIG_SUN3
340*4882a593Smuzhiyun 		iounmap((void __iomem *)ioaddr);
341*4882a593Smuzhiyun #endif
342*4882a593Smuzhiyun 		return 0;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	lp = netdev_priv(dev);
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/* XXX - leak? */
348*4882a593Smuzhiyun 	MEM = dvma_malloc_align(sizeof(struct lance_memory), 0x10000);
349*4882a593Smuzhiyun 	if (MEM == NULL) {
350*4882a593Smuzhiyun #ifdef CONFIG_SUN3
351*4882a593Smuzhiyun 		iounmap((void __iomem *)ioaddr);
352*4882a593Smuzhiyun #endif
353*4882a593Smuzhiyun 		printk(KERN_WARNING "SUN3 Lance couldn't allocate DVMA memory\n");
354*4882a593Smuzhiyun 		return 0;
355*4882a593Smuzhiyun 	}
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	lp->iobase = (volatile unsigned short *)ioaddr;
358*4882a593Smuzhiyun 	dev->base_addr = (unsigned long)ioaddr; /* informational only */
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	REGA(CSR0) = CSR0_STOP;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (request_irq(LANCE_IRQ, lance_interrupt, 0, "SUN3 Lance", dev) < 0) {
363*4882a593Smuzhiyun #ifdef CONFIG_SUN3
364*4882a593Smuzhiyun 		iounmap((void __iomem *)ioaddr);
365*4882a593Smuzhiyun #endif
366*4882a593Smuzhiyun 		dvma_free((void *)MEM);
367*4882a593Smuzhiyun 		printk(KERN_WARNING "SUN3 Lance unable to allocate IRQ\n");
368*4882a593Smuzhiyun 		return 0;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 	dev->irq = (unsigned short)LANCE_IRQ;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	printk("%s: SUN3 Lance at io %#lx, mem %#lx, irq %d, hwaddr ",
374*4882a593Smuzhiyun 		   dev->name,
375*4882a593Smuzhiyun 		   (unsigned long)ioaddr,
376*4882a593Smuzhiyun 		   (unsigned long)MEM,
377*4882a593Smuzhiyun 		   dev->irq);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* copy in the ethernet address from the prom */
380*4882a593Smuzhiyun 	for(i = 0; i < 6 ; i++)
381*4882a593Smuzhiyun 	     dev->dev_addr[i] = idprom->id_ethaddr[i];
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* tell the card it's ether address, bytes swapped */
384*4882a593Smuzhiyun 	MEM->init.hwaddr[0] = dev->dev_addr[1];
385*4882a593Smuzhiyun 	MEM->init.hwaddr[1] = dev->dev_addr[0];
386*4882a593Smuzhiyun 	MEM->init.hwaddr[2] = dev->dev_addr[3];
387*4882a593Smuzhiyun 	MEM->init.hwaddr[3] = dev->dev_addr[2];
388*4882a593Smuzhiyun 	MEM->init.hwaddr[4] = dev->dev_addr[5];
389*4882a593Smuzhiyun 	MEM->init.hwaddr[5] = dev->dev_addr[4];
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	printk("%pM\n", dev->dev_addr);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	MEM->init.mode = 0x0000;
394*4882a593Smuzhiyun 	MEM->init.filter[0] = 0x00000000;
395*4882a593Smuzhiyun 	MEM->init.filter[1] = 0x00000000;
396*4882a593Smuzhiyun 	MEM->init.rdra = dvma_vtob(MEM->rx_head);
397*4882a593Smuzhiyun 	MEM->init.rlen    = (RX_LOG_RING_SIZE << 13) |
398*4882a593Smuzhiyun 		(dvma_vtob(MEM->rx_head) >> 16);
399*4882a593Smuzhiyun 	MEM->init.tdra = dvma_vtob(MEM->tx_head);
400*4882a593Smuzhiyun 	MEM->init.tlen    = (TX_LOG_RING_SIZE << 13) |
401*4882a593Smuzhiyun 		(dvma_vtob(MEM->tx_head) >> 16);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	DPRINTK(2, ("initaddr: %08lx rx_ring: %08lx tx_ring: %08lx\n",
404*4882a593Smuzhiyun 	       dvma_vtob(&(MEM->init)), dvma_vtob(MEM->rx_head),
405*4882a593Smuzhiyun 	       (dvma_vtob(MEM->tx_head))));
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	if (did_version++ == 0)
408*4882a593Smuzhiyun 		printk( version );
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	dev->netdev_ops = &lance_netdev_ops;
411*4882a593Smuzhiyun //	KLUDGE -- REMOVE ME
412*4882a593Smuzhiyun 	set_bit(__LINK_STATE_PRESENT, &dev->state);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	return 1;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
lance_open(struct net_device * dev)418*4882a593Smuzhiyun static int lance_open( struct net_device *dev )
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
421*4882a593Smuzhiyun 	int i;
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: lance_open()\n", dev->name ));
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	REGA(CSR0) = CSR0_STOP;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	lance_init_ring(dev);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	/* From now on, AREG is kept to point to CSR0 */
430*4882a593Smuzhiyun 	REGA(CSR0) = CSR0_INIT;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	i = 1000000;
433*4882a593Smuzhiyun 	while (--i > 0)
434*4882a593Smuzhiyun 		if (DREG & CSR0_IDON)
435*4882a593Smuzhiyun 			break;
436*4882a593Smuzhiyun 	if (i <= 0 || (DREG & CSR0_ERR)) {
437*4882a593Smuzhiyun 		DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
438*4882a593Smuzhiyun 					  dev->name, i, DREG ));
439*4882a593Smuzhiyun 		DREG = CSR0_STOP;
440*4882a593Smuzhiyun 		return -EIO;
441*4882a593Smuzhiyun 	}
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	DREG = CSR0_IDON | CSR0_STRT | CSR0_INEA;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	netif_start_queue(dev);
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: LANCE is open, csr0 %04x\n", dev->name, DREG ));
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	return 0;
450*4882a593Smuzhiyun }
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun /* Initialize the LANCE Rx and Tx rings. */
454*4882a593Smuzhiyun 
lance_init_ring(struct net_device * dev)455*4882a593Smuzhiyun static void lance_init_ring( struct net_device *dev )
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
458*4882a593Smuzhiyun 	int i;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 	lp->lock = 0;
461*4882a593Smuzhiyun 	lp->tx_full = 0;
462*4882a593Smuzhiyun 	lp->new_rx = lp->new_tx = 0;
463*4882a593Smuzhiyun 	lp->old_rx = lp->old_tx = 0;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	for( i = 0; i < TX_RING_SIZE; i++ ) {
466*4882a593Smuzhiyun 		MEM->tx_head[i].base = dvma_vtob(MEM->tx_data[i]);
467*4882a593Smuzhiyun 		MEM->tx_head[i].flag = 0;
468*4882a593Smuzhiyun  		MEM->tx_head[i].base_hi =
469*4882a593Smuzhiyun 			(dvma_vtob(MEM->tx_data[i])) >>16;
470*4882a593Smuzhiyun 		MEM->tx_head[i].length = 0;
471*4882a593Smuzhiyun 		MEM->tx_head[i].misc = 0;
472*4882a593Smuzhiyun 	}
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	for( i = 0; i < RX_RING_SIZE; i++ ) {
475*4882a593Smuzhiyun 		MEM->rx_head[i].base = dvma_vtob(MEM->rx_data[i]);
476*4882a593Smuzhiyun 		MEM->rx_head[i].flag = RMD1_OWN_CHIP;
477*4882a593Smuzhiyun 		MEM->rx_head[i].base_hi =
478*4882a593Smuzhiyun 			(dvma_vtob(MEM->rx_data[i])) >> 16;
479*4882a593Smuzhiyun 		MEM->rx_head[i].buf_length = -PKT_BUF_SZ | 0xf000;
480*4882a593Smuzhiyun 		MEM->rx_head[i].msg_length = 0;
481*4882a593Smuzhiyun 	}
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	/* tell the card it's ether address, bytes swapped */
484*4882a593Smuzhiyun 	MEM->init.hwaddr[0] = dev->dev_addr[1];
485*4882a593Smuzhiyun 	MEM->init.hwaddr[1] = dev->dev_addr[0];
486*4882a593Smuzhiyun 	MEM->init.hwaddr[2] = dev->dev_addr[3];
487*4882a593Smuzhiyun 	MEM->init.hwaddr[3] = dev->dev_addr[2];
488*4882a593Smuzhiyun 	MEM->init.hwaddr[4] = dev->dev_addr[5];
489*4882a593Smuzhiyun 	MEM->init.hwaddr[5] = dev->dev_addr[4];
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	MEM->init.mode = 0x0000;
492*4882a593Smuzhiyun 	MEM->init.filter[0] = 0x00000000;
493*4882a593Smuzhiyun 	MEM->init.filter[1] = 0x00000000;
494*4882a593Smuzhiyun 	MEM->init.rdra = dvma_vtob(MEM->rx_head);
495*4882a593Smuzhiyun 	MEM->init.rlen    = (RX_LOG_RING_SIZE << 13) |
496*4882a593Smuzhiyun 		(dvma_vtob(MEM->rx_head) >> 16);
497*4882a593Smuzhiyun 	MEM->init.tdra = dvma_vtob(MEM->tx_head);
498*4882a593Smuzhiyun 	MEM->init.tlen    = (TX_LOG_RING_SIZE << 13) |
499*4882a593Smuzhiyun 		(dvma_vtob(MEM->tx_head) >> 16);
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* tell the lance the address of its init block */
503*4882a593Smuzhiyun 	REGA(CSR1) = dvma_vtob(&(MEM->init));
504*4882a593Smuzhiyun 	REGA(CSR2) = dvma_vtob(&(MEM->init)) >> 16;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun #ifdef CONFIG_SUN3X
507*4882a593Smuzhiyun 	REGA(CSR3) = CSR3_BSWP | CSR3_ACON | CSR3_BCON;
508*4882a593Smuzhiyun #else
509*4882a593Smuzhiyun 	REGA(CSR3) = CSR3_BSWP;
510*4882a593Smuzhiyun #endif
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun static netdev_tx_t
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)516*4882a593Smuzhiyun lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
519*4882a593Smuzhiyun 	int entry, len;
520*4882a593Smuzhiyun 	struct lance_tx_head *head;
521*4882a593Smuzhiyun 	unsigned long flags;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	DPRINTK( 1, ( "%s: transmit start.\n",
524*4882a593Smuzhiyun 		      dev->name));
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* Transmitter timeout, serious problems. */
527*4882a593Smuzhiyun 	if (netif_queue_stopped(dev)) {
528*4882a593Smuzhiyun 		int tickssofar = jiffies - dev_trans_start(dev);
529*4882a593Smuzhiyun 		if (tickssofar < HZ/5)
530*4882a593Smuzhiyun 			return NETDEV_TX_BUSY;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 		DPRINTK( 1, ( "%s: transmit timed out, status %04x, resetting.\n",
533*4882a593Smuzhiyun 					  dev->name, DREG ));
534*4882a593Smuzhiyun 		DREG = CSR0_STOP;
535*4882a593Smuzhiyun 		/*
536*4882a593Smuzhiyun 		 * Always set BSWP after a STOP as STOP puts it back into
537*4882a593Smuzhiyun 		 * little endian mode.
538*4882a593Smuzhiyun 		 */
539*4882a593Smuzhiyun 		REGA(CSR3) = CSR3_BSWP;
540*4882a593Smuzhiyun 		dev->stats.tx_errors++;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		if(lance_debug >= 2) {
543*4882a593Smuzhiyun 			int i;
544*4882a593Smuzhiyun 			printk("Ring data: old_tx %d new_tx %d%s new_rx %d\n",
545*4882a593Smuzhiyun 			       lp->old_tx, lp->new_tx,
546*4882a593Smuzhiyun 			       lp->tx_full ? " (full)" : "",
547*4882a593Smuzhiyun 			       lp->new_rx );
548*4882a593Smuzhiyun 			for( i = 0 ; i < RX_RING_SIZE; i++ )
549*4882a593Smuzhiyun 				printk( "rx #%d: base=%04x blen=%04x mlen=%04x\n",
550*4882a593Smuzhiyun 					i, MEM->rx_head[i].base,
551*4882a593Smuzhiyun 					-MEM->rx_head[i].buf_length,
552*4882a593Smuzhiyun 					MEM->rx_head[i].msg_length);
553*4882a593Smuzhiyun 			for( i = 0 ; i < TX_RING_SIZE; i++ )
554*4882a593Smuzhiyun 				printk("tx #%d: base=%04x len=%04x misc=%04x\n",
555*4882a593Smuzhiyun 				       i, MEM->tx_head[i].base,
556*4882a593Smuzhiyun 				       -MEM->tx_head[i].length,
557*4882a593Smuzhiyun 				       MEM->tx_head[i].misc );
558*4882a593Smuzhiyun 		}
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 		lance_init_ring(dev);
561*4882a593Smuzhiyun 		REGA( CSR0 ) = CSR0_INEA | CSR0_INIT | CSR0_STRT;
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 		netif_start_queue(dev);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 		return NETDEV_TX_OK;
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	/* Block a timer-based transmit from overlapping.  This could better be
570*4882a593Smuzhiyun 	   done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	/* Block a timer-based transmit from overlapping with us by
573*4882a593Smuzhiyun 	   stopping the queue for a bit... */
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	netif_stop_queue(dev);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	if (test_and_set_bit( 0, (void*)&lp->lock ) != 0) {
578*4882a593Smuzhiyun 		printk( "%s: tx queue lock!.\n", dev->name);
579*4882a593Smuzhiyun 		/* don't clear dev->tbusy flag. */
580*4882a593Smuzhiyun 		return NETDEV_TX_BUSY;
581*4882a593Smuzhiyun 	}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	AREG = CSR0;
584*4882a593Smuzhiyun   	DPRINTK( 2, ( "%s: lance_start_xmit() called, csr0 %4.4x.\n",
585*4882a593Smuzhiyun   				  dev->name, DREG ));
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun #ifdef CONFIG_SUN3X
588*4882a593Smuzhiyun 	/* this weirdness doesn't appear on sun3... */
589*4882a593Smuzhiyun 	if(!(DREG & CSR0_INIT)) {
590*4882a593Smuzhiyun 		DPRINTK( 1, ("INIT not set, reinitializing...\n"));
591*4882a593Smuzhiyun 		REGA( CSR0 ) = CSR0_STOP;
592*4882a593Smuzhiyun 		lance_init_ring(dev);
593*4882a593Smuzhiyun 		REGA( CSR0 ) = CSR0_INIT | CSR0_STRT;
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun #endif
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/* Fill in a Tx ring entry */
598*4882a593Smuzhiyun #if 0
599*4882a593Smuzhiyun 	if (lance_debug >= 2) {
600*4882a593Smuzhiyun 		printk( "%s: TX pkt %d type 0x%04x"
601*4882a593Smuzhiyun 			" from %s to %s"
602*4882a593Smuzhiyun 			" data at 0x%08x len %d\n",
603*4882a593Smuzhiyun 			dev->name, lp->new_tx, ((u_short *)skb->data)[6],
604*4882a593Smuzhiyun 			DEV_ADDR(&skb->data[6]), DEV_ADDR(skb->data),
605*4882a593Smuzhiyun 			(int)skb->data, (int)skb->len );
606*4882a593Smuzhiyun 	}
607*4882a593Smuzhiyun #endif
608*4882a593Smuzhiyun 	/* We're not prepared for the int until the last flags are set/reset.
609*4882a593Smuzhiyun 	 * And the int may happen already after setting the OWN_CHIP... */
610*4882a593Smuzhiyun 	local_irq_save(flags);
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	/* Mask to ring buffer boundary. */
613*4882a593Smuzhiyun 	entry = lp->new_tx;
614*4882a593Smuzhiyun 	head  = &(MEM->tx_head[entry]);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* Caution: the write order is important here, set the "ownership" bits
617*4882a593Smuzhiyun 	 * last.
618*4882a593Smuzhiyun 	 */
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	/* the sun3's lance needs it's buffer padded to the minimum
621*4882a593Smuzhiyun 	   size */
622*4882a593Smuzhiyun 	len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun //	head->length = -len;
625*4882a593Smuzhiyun 	head->length = (-len) | 0xf000;
626*4882a593Smuzhiyun 	head->misc = 0;
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, PKTBUF_ADDR(head), skb->len);
629*4882a593Smuzhiyun 	if (len != skb->len)
630*4882a593Smuzhiyun 		memset(PKTBUF_ADDR(head) + skb->len, 0, len-skb->len);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	head->flag = TMD1_OWN_CHIP | TMD1_ENP | TMD1_STP;
633*4882a593Smuzhiyun 	lp->new_tx = (lp->new_tx + 1) & TX_RING_MOD_MASK;
634*4882a593Smuzhiyun 	dev->stats.tx_bytes += skb->len;
635*4882a593Smuzhiyun 
636*4882a593Smuzhiyun 	/* Trigger an immediate send poll. */
637*4882a593Smuzhiyun 	REGA(CSR0) = CSR0_INEA | CSR0_TDMD | CSR0_STRT;
638*4882a593Smuzhiyun 	AREG = CSR0;
639*4882a593Smuzhiyun   	DPRINTK( 2, ( "%s: lance_start_xmit() exiting, csr0 %4.4x.\n",
640*4882a593Smuzhiyun   				  dev->name, DREG ));
641*4882a593Smuzhiyun 	dev_kfree_skb(skb);
642*4882a593Smuzhiyun 
643*4882a593Smuzhiyun 	lp->lock = 0;
644*4882a593Smuzhiyun 	if ((MEM->tx_head[(entry+1) & TX_RING_MOD_MASK].flag & TMD1_OWN) ==
645*4882a593Smuzhiyun 	    TMD1_OWN_HOST)
646*4882a593Smuzhiyun 		netif_start_queue(dev);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 	local_irq_restore(flags);
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	return NETDEV_TX_OK;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun /* The LANCE interrupt handler. */
654*4882a593Smuzhiyun 
lance_interrupt(int irq,void * dev_id)655*4882a593Smuzhiyun static irqreturn_t lance_interrupt( int irq, void *dev_id)
656*4882a593Smuzhiyun {
657*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
658*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
659*4882a593Smuzhiyun 	int csr0;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun  still_more:
662*4882a593Smuzhiyun 	flush_cache_all();
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	AREG = CSR0;
665*4882a593Smuzhiyun 	csr0 = DREG;
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	/* ack interrupts */
668*4882a593Smuzhiyun 	DREG = csr0 & (CSR0_TINT | CSR0_RINT | CSR0_IDON);
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	/* clear errors */
671*4882a593Smuzhiyun 	if(csr0 & CSR0_ERR)
672*4882a593Smuzhiyun 		DREG = CSR0_BABL | CSR0_MERR | CSR0_CERR | CSR0_MISS;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: interrupt  csr0=%04x new csr=%04x.\n",
676*4882a593Smuzhiyun 		      dev->name, csr0, DREG ));
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 	if (csr0 & CSR0_TINT) {			/* Tx-done interrupt */
679*4882a593Smuzhiyun 		int old_tx = lp->old_tx;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun //		if(lance_debug >= 3) {
682*4882a593Smuzhiyun //			int i;
683*4882a593Smuzhiyun //
684*4882a593Smuzhiyun //			printk("%s: tx int\n", dev->name);
685*4882a593Smuzhiyun //
686*4882a593Smuzhiyun //			for(i = 0; i < TX_RING_SIZE; i++)
687*4882a593Smuzhiyun //				printk("ring %d flag=%04x\n", i,
688*4882a593Smuzhiyun //				       MEM->tx_head[i].flag);
689*4882a593Smuzhiyun //		}
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		while( old_tx != lp->new_tx) {
692*4882a593Smuzhiyun 			struct lance_tx_head *head = &(MEM->tx_head[old_tx]);
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 			DPRINTK(3, ("on tx_ring %d\n", old_tx));
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 			if (head->flag & TMD1_OWN_CHIP)
697*4882a593Smuzhiyun 				break; /* It still hasn't been Txed */
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 			if (head->flag & TMD1_ERR) {
700*4882a593Smuzhiyun 				int status = head->misc;
701*4882a593Smuzhiyun 				dev->stats.tx_errors++;
702*4882a593Smuzhiyun 				if (status & TMD3_RTRY) dev->stats.tx_aborted_errors++;
703*4882a593Smuzhiyun 				if (status & TMD3_LCAR) dev->stats.tx_carrier_errors++;
704*4882a593Smuzhiyun 				if (status & TMD3_LCOL) dev->stats.tx_window_errors++;
705*4882a593Smuzhiyun 				if (status & (TMD3_UFLO | TMD3_BUFF)) {
706*4882a593Smuzhiyun 					dev->stats.tx_fifo_errors++;
707*4882a593Smuzhiyun 					printk("%s: Tx FIFO error\n",
708*4882a593Smuzhiyun 					       dev->name);
709*4882a593Smuzhiyun 					REGA(CSR0) = CSR0_STOP;
710*4882a593Smuzhiyun 					REGA(CSR3) = CSR3_BSWP;
711*4882a593Smuzhiyun 					lance_init_ring(dev);
712*4882a593Smuzhiyun 					REGA(CSR0) = CSR0_STRT | CSR0_INEA;
713*4882a593Smuzhiyun 					return IRQ_HANDLED;
714*4882a593Smuzhiyun 				}
715*4882a593Smuzhiyun 			} else if(head->flag & (TMD1_ENP | TMD1_STP)) {
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 				head->flag &= ~(TMD1_ENP | TMD1_STP);
718*4882a593Smuzhiyun 				if(head->flag & (TMD1_ONE | TMD1_MORE))
719*4882a593Smuzhiyun 					dev->stats.collisions++;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 				dev->stats.tx_packets++;
722*4882a593Smuzhiyun 				DPRINTK(3, ("cleared tx ring %d\n", old_tx));
723*4882a593Smuzhiyun 			}
724*4882a593Smuzhiyun 			old_tx = (old_tx +1) & TX_RING_MOD_MASK;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 		lp->old_tx = old_tx;
728*4882a593Smuzhiyun 	}
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 	if (netif_queue_stopped(dev)) {
732*4882a593Smuzhiyun 		/* The ring is no longer full, clear tbusy. */
733*4882a593Smuzhiyun 		netif_start_queue(dev);
734*4882a593Smuzhiyun 		netif_wake_queue(dev);
735*4882a593Smuzhiyun 	}
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	if (csr0 & CSR0_RINT)			/* Rx interrupt */
738*4882a593Smuzhiyun 		lance_rx( dev );
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	/* Log misc errors. */
741*4882a593Smuzhiyun 	if (csr0 & CSR0_BABL) dev->stats.tx_errors++; /* Tx babble. */
742*4882a593Smuzhiyun 	if (csr0 & CSR0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */
743*4882a593Smuzhiyun 	if (csr0 & CSR0_MERR) {
744*4882a593Smuzhiyun 		DPRINTK( 1, ( "%s: Bus master arbitration failure (?!?), "
745*4882a593Smuzhiyun 			      "status %04x.\n", dev->name, csr0 ));
746*4882a593Smuzhiyun 		/* Restart the chip. */
747*4882a593Smuzhiyun 		REGA(CSR0) = CSR0_STOP;
748*4882a593Smuzhiyun 		REGA(CSR3) = CSR3_BSWP;
749*4882a593Smuzhiyun 		lance_init_ring(dev);
750*4882a593Smuzhiyun 		REGA(CSR0) = CSR0_STRT | CSR0_INEA;
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun     /* Clear any other interrupt, and set interrupt enable. */
755*4882a593Smuzhiyun //	DREG = CSR0_BABL | CSR0_CERR | CSR0_MISS | CSR0_MERR |
756*4882a593Smuzhiyun //		   CSR0_IDON | CSR0_INEA;
757*4882a593Smuzhiyun 
758*4882a593Smuzhiyun 	REGA(CSR0) = CSR0_INEA;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if(DREG & (CSR0_RINT | CSR0_TINT)) {
761*4882a593Smuzhiyun 	     DPRINTK(2, ("restarting interrupt, csr0=%#04x\n", DREG));
762*4882a593Smuzhiyun 	     goto still_more;
763*4882a593Smuzhiyun 	}
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: exiting interrupt, csr0=%#04x.\n",
766*4882a593Smuzhiyun 				  dev->name, DREG ));
767*4882a593Smuzhiyun 	return IRQ_HANDLED;
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun 
770*4882a593Smuzhiyun /* get packet, toss into skbuff */
lance_rx(struct net_device * dev)771*4882a593Smuzhiyun static int lance_rx( struct net_device *dev )
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
774*4882a593Smuzhiyun 	int entry = lp->new_rx;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	/* If we own the next entry, it's a new packet. Send it up. */
777*4882a593Smuzhiyun 	while( (MEM->rx_head[entry].flag & RMD1_OWN) == RMD1_OWN_HOST ) {
778*4882a593Smuzhiyun 		struct lance_rx_head *head = &(MEM->rx_head[entry]);
779*4882a593Smuzhiyun 		int status = head->flag;
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun 		if (status != (RMD1_ENP|RMD1_STP)) {  /* There was an error. */
782*4882a593Smuzhiyun 			/* There is a tricky error noted by John Murphy,
783*4882a593Smuzhiyun 			   <murf@perftech.com> to Russ Nelson: Even with
784*4882a593Smuzhiyun 			   full-sized buffers it's possible for a jabber packet to use two
785*4882a593Smuzhiyun 			   buffers, with only the last correctly noting the error. */
786*4882a593Smuzhiyun 			if (status & RMD1_ENP)	/* Only count a general error at the */
787*4882a593Smuzhiyun 				dev->stats.rx_errors++; /* end of a packet.*/
788*4882a593Smuzhiyun 			if (status & RMD1_FRAM) dev->stats.rx_frame_errors++;
789*4882a593Smuzhiyun 			if (status & RMD1_OFLO) dev->stats.rx_over_errors++;
790*4882a593Smuzhiyun 			if (status & RMD1_CRC) dev->stats.rx_crc_errors++;
791*4882a593Smuzhiyun 			if (status & RMD1_BUFF) dev->stats.rx_fifo_errors++;
792*4882a593Smuzhiyun 			head->flag &= (RMD1_ENP|RMD1_STP);
793*4882a593Smuzhiyun 		} else {
794*4882a593Smuzhiyun 			/* Malloc up new buffer, compatible with net-3. */
795*4882a593Smuzhiyun //			short pkt_len = head->msg_length;// & 0xfff;
796*4882a593Smuzhiyun 			short pkt_len = (head->msg_length & 0xfff) - 4;
797*4882a593Smuzhiyun 			struct sk_buff *skb;
798*4882a593Smuzhiyun 
799*4882a593Smuzhiyun 			if (pkt_len < 60) {
800*4882a593Smuzhiyun 				printk( "%s: Runt packet!\n", dev->name );
801*4882a593Smuzhiyun 				dev->stats.rx_errors++;
802*4882a593Smuzhiyun 			}
803*4882a593Smuzhiyun 			else {
804*4882a593Smuzhiyun 				skb = netdev_alloc_skb(dev, pkt_len + 2);
805*4882a593Smuzhiyun 				if (skb == NULL) {
806*4882a593Smuzhiyun 					dev->stats.rx_dropped++;
807*4882a593Smuzhiyun 					head->msg_length = 0;
808*4882a593Smuzhiyun 					head->flag |= RMD1_OWN_CHIP;
809*4882a593Smuzhiyun 					lp->new_rx = (lp->new_rx+1) &
810*4882a593Smuzhiyun 					     RX_RING_MOD_MASK;
811*4882a593Smuzhiyun 				}
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun #if 0
814*4882a593Smuzhiyun 				if (lance_debug >= 3) {
815*4882a593Smuzhiyun 					u_char *data = PKTBUF_ADDR(head);
816*4882a593Smuzhiyun 					printk("%s: RX pkt %d type 0x%04x"
817*4882a593Smuzhiyun 					       " from %pM to %pM",
818*4882a593Smuzhiyun 					       dev->name, lp->new_tx, ((u_short *)data)[6],
819*4882a593Smuzhiyun 					       &data[6], data);
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 					printk(" data %02x %02x %02x %02x %02x %02x %02x %02x "
822*4882a593Smuzhiyun 					       "len %d at %08x\n",
823*4882a593Smuzhiyun 					       data[15], data[16], data[17], data[18],
824*4882a593Smuzhiyun 					       data[19], data[20], data[21], data[22],
825*4882a593Smuzhiyun 					       pkt_len, data);
826*4882a593Smuzhiyun 				}
827*4882a593Smuzhiyun #endif
828*4882a593Smuzhiyun 				if (lance_debug >= 3) {
829*4882a593Smuzhiyun 					u_char *data = PKTBUF_ADDR(head);
830*4882a593Smuzhiyun 					printk( "%s: RX pkt %d type 0x%04x len %d\n ", dev->name, entry, ((u_short *)data)[6], pkt_len);
831*4882a593Smuzhiyun 				}
832*4882a593Smuzhiyun 
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 				skb_reserve( skb, 2 );	/* 16 byte align */
835*4882a593Smuzhiyun 				skb_put( skb, pkt_len );	/* Make room */
836*4882a593Smuzhiyun 				skb_copy_to_linear_data(skb,
837*4882a593Smuzhiyun 						 PKTBUF_ADDR(head),
838*4882a593Smuzhiyun 						 pkt_len);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 				skb->protocol = eth_type_trans( skb, dev );
841*4882a593Smuzhiyun 				netif_rx( skb );
842*4882a593Smuzhiyun 				dev->stats.rx_packets++;
843*4882a593Smuzhiyun 				dev->stats.rx_bytes += pkt_len;
844*4882a593Smuzhiyun 			}
845*4882a593Smuzhiyun 		}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun //		head->buf_length = -PKT_BUF_SZ | 0xf000;
848*4882a593Smuzhiyun 		head->msg_length = 0;
849*4882a593Smuzhiyun 		head->flag = RMD1_OWN_CHIP;
850*4882a593Smuzhiyun 
851*4882a593Smuzhiyun 		entry = lp->new_rx = (lp->new_rx +1) & RX_RING_MOD_MASK;
852*4882a593Smuzhiyun 	}
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	/* From lance.c (Donald Becker): */
855*4882a593Smuzhiyun 	/* We should check that at least two ring entries are free.
856*4882a593Smuzhiyun 	   If not, we should free one and mark stats->rx_dropped++. */
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	return 0;
859*4882a593Smuzhiyun }
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 
lance_close(struct net_device * dev)862*4882a593Smuzhiyun static int lance_close( struct net_device *dev )
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
865*4882a593Smuzhiyun 
866*4882a593Smuzhiyun 	netif_stop_queue(dev);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	AREG = CSR0;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	DPRINTK( 2, ( "%s: Shutting down ethercard, status was %2.2x.\n",
871*4882a593Smuzhiyun 				  dev->name, DREG ));
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	/* We stop the LANCE here -- it occasionally polls
874*4882a593Smuzhiyun 	   memory if we don't. */
875*4882a593Smuzhiyun 	DREG = CSR0_STOP;
876*4882a593Smuzhiyun 	return 0;
877*4882a593Smuzhiyun }
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun /* Set or clear the multicast filter for this adaptor.
881*4882a593Smuzhiyun    num_addrs == -1		Promiscuous mode, receive all packets
882*4882a593Smuzhiyun    num_addrs == 0		Normal mode, clear multicast list
883*4882a593Smuzhiyun    num_addrs > 0		Multicast mode, receive normal and MC packets, and do
884*4882a593Smuzhiyun 						best-effort filtering.
885*4882a593Smuzhiyun  */
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun /* completely untested on a sun3 */
set_multicast_list(struct net_device * dev)888*4882a593Smuzhiyun static void set_multicast_list( struct net_device *dev )
889*4882a593Smuzhiyun {
890*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	if(netif_queue_stopped(dev))
893*4882a593Smuzhiyun 		/* Only possible if board is already started */
894*4882a593Smuzhiyun 		return;
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	/* We take the simple way out and always enable promiscuous mode. */
897*4882a593Smuzhiyun 	DREG = CSR0_STOP; /* Temporarily stop the lance. */
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
900*4882a593Smuzhiyun 		/* Log any net taps. */
901*4882a593Smuzhiyun 		DPRINTK( 3, ( "%s: Promiscuous mode enabled.\n", dev->name ));
902*4882a593Smuzhiyun 		REGA( CSR15 ) = 0x8000; /* Set promiscuous mode */
903*4882a593Smuzhiyun 	} else {
904*4882a593Smuzhiyun 		short multicast_table[4];
905*4882a593Smuzhiyun 		int num_addrs = netdev_mc_count(dev);
906*4882a593Smuzhiyun 		int i;
907*4882a593Smuzhiyun 		/* We don't use the multicast table, but rely on upper-layer
908*4882a593Smuzhiyun 		 * filtering. */
909*4882a593Smuzhiyun 		memset( multicast_table, (num_addrs == 0) ? 0 : -1,
910*4882a593Smuzhiyun 				sizeof(multicast_table) );
911*4882a593Smuzhiyun 		for( i = 0; i < 4; i++ )
912*4882a593Smuzhiyun 			REGA( CSR8+i ) = multicast_table[i];
913*4882a593Smuzhiyun 		REGA( CSR15 ) = 0; /* Unset promiscuous mode */
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/*
917*4882a593Smuzhiyun 	 * Always set BSWP after a STOP as STOP puts it back into
918*4882a593Smuzhiyun 	 * little endian mode.
919*4882a593Smuzhiyun 	 */
920*4882a593Smuzhiyun 	REGA( CSR3 ) = CSR3_BSWP;
921*4882a593Smuzhiyun 
922*4882a593Smuzhiyun 	/* Resume normal operation and reset AREG to CSR0 */
923*4882a593Smuzhiyun 	REGA( CSR0 ) = CSR0_IDON | CSR0_INEA | CSR0_STRT;
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 
927*4882a593Smuzhiyun #ifdef MODULE
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun static struct net_device *sun3lance_dev;
930*4882a593Smuzhiyun 
init_module(void)931*4882a593Smuzhiyun int __init init_module(void)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun 	sun3lance_dev = sun3lance_probe(-1);
934*4882a593Smuzhiyun 	return PTR_ERR_OR_ZERO(sun3lance_dev);
935*4882a593Smuzhiyun }
936*4882a593Smuzhiyun 
cleanup_module(void)937*4882a593Smuzhiyun void __exit cleanup_module(void)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	unregister_netdev(sun3lance_dev);
940*4882a593Smuzhiyun #ifdef CONFIG_SUN3
941*4882a593Smuzhiyun 	iounmap((void __iomem *)sun3lance_dev->base_addr);
942*4882a593Smuzhiyun #endif
943*4882a593Smuzhiyun 	free_netdev(sun3lance_dev);
944*4882a593Smuzhiyun }
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun #endif /* MODULE */
947*4882a593Smuzhiyun 
948