xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/a2065.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Amiga Linux/68k A2065 Ethernet Driver
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * (C) Copyright 1995-2003 by Geert Uytterhoeven <geert@linux-m68k.org>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Fixes and tips by:
7*4882a593Smuzhiyun  *	- Janos Farkas (CHEXUM@sparta.banki.hu)
8*4882a593Smuzhiyun  *	- Jes Degn Soerensen (jds@kom.auc.dk)
9*4882a593Smuzhiyun  *	- Matt Domsch (Matt_Domsch@dell.com)
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * ----------------------------------------------------------------------------
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This program is based on
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *	ariadne.?:	Amiga Linux/68k Ariadne Ethernet Driver
16*4882a593Smuzhiyun  *			(C) Copyright 1995 by Geert Uytterhoeven,
17*4882a593Smuzhiyun  *                                            Peter De Schrijver
18*4882a593Smuzhiyun  *
19*4882a593Smuzhiyun  *	lance.c:	An AMD LANCE ethernet driver for linux.
20*4882a593Smuzhiyun  *			Written 1993-94 by Donald Becker.
21*4882a593Smuzhiyun  *
22*4882a593Smuzhiyun  *	Am79C960:	PCnet(tm)-ISA Single-Chip Ethernet Controller
23*4882a593Smuzhiyun  *			Advanced Micro Devices
24*4882a593Smuzhiyun  *			Publication #16907, Rev. B, Amendment/0, May 1994
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * ----------------------------------------------------------------------------
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  * This file is subject to the terms and conditions of the GNU General Public
29*4882a593Smuzhiyun  * License.  See the file COPYING in the main directory of the Linux
30*4882a593Smuzhiyun  * distribution for more details.
31*4882a593Smuzhiyun  *
32*4882a593Smuzhiyun  * ----------------------------------------------------------------------------
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * The A2065 is a Zorro-II board made by Commodore/Ameristar. It contains:
35*4882a593Smuzhiyun  *
36*4882a593Smuzhiyun  *	- an Am7990 Local Area Network Controller for Ethernet (LANCE) with
37*4882a593Smuzhiyun  *	  both 10BASE-2 (thin coax) and AUI (DB-15) connectors
38*4882a593Smuzhiyun  */
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /*#define DEBUG*/
43*4882a593Smuzhiyun /*#define TEST_HITS*/
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #include <linux/errno.h>
46*4882a593Smuzhiyun #include <linux/netdevice.h>
47*4882a593Smuzhiyun #include <linux/etherdevice.h>
48*4882a593Smuzhiyun #include <linux/module.h>
49*4882a593Smuzhiyun #include <linux/stddef.h>
50*4882a593Smuzhiyun #include <linux/kernel.h>
51*4882a593Smuzhiyun #include <linux/interrupt.h>
52*4882a593Smuzhiyun #include <linux/ioport.h>
53*4882a593Smuzhiyun #include <linux/skbuff.h>
54*4882a593Smuzhiyun #include <linux/string.h>
55*4882a593Smuzhiyun #include <linux/init.h>
56*4882a593Smuzhiyun #include <linux/crc32.h>
57*4882a593Smuzhiyun #include <linux/zorro.h>
58*4882a593Smuzhiyun #include <linux/bitops.h>
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #include <asm/byteorder.h>
61*4882a593Smuzhiyun #include <asm/irq.h>
62*4882a593Smuzhiyun #include <asm/amigaints.h>
63*4882a593Smuzhiyun #include <asm/amigahw.h>
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun #include "a2065.h"
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun /* Transmit/Receive Ring Definitions */
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define LANCE_LOG_TX_BUFFERS	(2)
70*4882a593Smuzhiyun #define LANCE_LOG_RX_BUFFERS	(4)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun #define TX_RING_SIZE		(1 << LANCE_LOG_TX_BUFFERS)
73*4882a593Smuzhiyun #define RX_RING_SIZE		(1 << LANCE_LOG_RX_BUFFERS)
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun #define TX_RING_MOD_MASK	(TX_RING_SIZE - 1)
76*4882a593Smuzhiyun #define RX_RING_MOD_MASK	(RX_RING_SIZE - 1)
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun #define PKT_BUF_SIZE		(1544)
79*4882a593Smuzhiyun #define RX_BUFF_SIZE            PKT_BUF_SIZE
80*4882a593Smuzhiyun #define TX_BUFF_SIZE            PKT_BUF_SIZE
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /* Layout of the Lance's RAM Buffer */
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun struct lance_init_block {
85*4882a593Smuzhiyun 	unsigned short mode;		/* Pre-set mode (reg. 15) */
86*4882a593Smuzhiyun 	unsigned char phys_addr[6];     /* Physical ethernet address */
87*4882a593Smuzhiyun 	unsigned filter[2];		/* Multicast filter. */
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/* Receive and transmit ring base, along with extra bits. */
90*4882a593Smuzhiyun 	unsigned short rx_ptr;		/* receive descriptor addr */
91*4882a593Smuzhiyun 	unsigned short rx_len;		/* receive len and high addr */
92*4882a593Smuzhiyun 	unsigned short tx_ptr;		/* transmit descriptor addr */
93*4882a593Smuzhiyun 	unsigned short tx_len;		/* transmit len and high addr */
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun 	/* The Tx and Rx ring entries must aligned on 8-byte boundaries. */
96*4882a593Smuzhiyun 	struct lance_rx_desc brx_ring[RX_RING_SIZE];
97*4882a593Smuzhiyun 	struct lance_tx_desc btx_ring[TX_RING_SIZE];
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	char rx_buf[RX_RING_SIZE][RX_BUFF_SIZE];
100*4882a593Smuzhiyun 	char tx_buf[TX_RING_SIZE][TX_BUFF_SIZE];
101*4882a593Smuzhiyun };
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun /* Private Device Data */
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun struct lance_private {
106*4882a593Smuzhiyun 	char *name;
107*4882a593Smuzhiyun 	volatile struct lance_regs *ll;
108*4882a593Smuzhiyun 	volatile struct lance_init_block *init_block;	    /* Hosts view */
109*4882a593Smuzhiyun 	volatile struct lance_init_block *lance_init_block; /* Lance view */
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	int rx_new, tx_new;
112*4882a593Smuzhiyun 	int rx_old, tx_old;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	int lance_log_rx_bufs, lance_log_tx_bufs;
115*4882a593Smuzhiyun 	int rx_ring_mod_mask, tx_ring_mod_mask;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	int tpe;		      /* cable-selection is TPE */
118*4882a593Smuzhiyun 	int auto_select;	      /* cable-selection by carrier */
119*4882a593Smuzhiyun 	unsigned short busmaster_regval;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	struct timer_list         multicast_timer;
122*4882a593Smuzhiyun 	struct net_device	  *dev;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun #define LANCE_ADDR(x) ((int)(x) & ~0xff000000)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun /* Load the CSR registers */
load_csrs(struct lance_private * lp)128*4882a593Smuzhiyun static void load_csrs(struct lance_private *lp)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
131*4882a593Smuzhiyun 	volatile struct lance_init_block *aib = lp->lance_init_block;
132*4882a593Smuzhiyun 	int leptr = LANCE_ADDR(aib);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	ll->rap = LE_CSR1;
135*4882a593Smuzhiyun 	ll->rdp = (leptr & 0xFFFF);
136*4882a593Smuzhiyun 	ll->rap = LE_CSR2;
137*4882a593Smuzhiyun 	ll->rdp = leptr >> 16;
138*4882a593Smuzhiyun 	ll->rap = LE_CSR3;
139*4882a593Smuzhiyun 	ll->rdp = lp->busmaster_regval;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	/* Point back to csr0 */
142*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun /* Setup the Lance Rx and Tx rings */
lance_init_ring(struct net_device * dev)146*4882a593Smuzhiyun static void lance_init_ring(struct net_device *dev)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
149*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
150*4882a593Smuzhiyun 	volatile struct lance_init_block *aib = lp->lance_init_block;
151*4882a593Smuzhiyun 					/* for LANCE_ADDR computations */
152*4882a593Smuzhiyun 	int leptr;
153*4882a593Smuzhiyun 	int i;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	/* Lock out other processes while setting up hardware */
156*4882a593Smuzhiyun 	netif_stop_queue(dev);
157*4882a593Smuzhiyun 	lp->rx_new = lp->tx_new = 0;
158*4882a593Smuzhiyun 	lp->rx_old = lp->tx_old = 0;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	ib->mode = 0;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	/* Copy the ethernet address to the lance init block
163*4882a593Smuzhiyun 	 * Note that on the sparc you need to swap the ethernet address.
164*4882a593Smuzhiyun 	 */
165*4882a593Smuzhiyun 	ib->phys_addr[0] = dev->dev_addr[1];
166*4882a593Smuzhiyun 	ib->phys_addr[1] = dev->dev_addr[0];
167*4882a593Smuzhiyun 	ib->phys_addr[2] = dev->dev_addr[3];
168*4882a593Smuzhiyun 	ib->phys_addr[3] = dev->dev_addr[2];
169*4882a593Smuzhiyun 	ib->phys_addr[4] = dev->dev_addr[5];
170*4882a593Smuzhiyun 	ib->phys_addr[5] = dev->dev_addr[4];
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	/* Setup the Tx ring entries */
173*4882a593Smuzhiyun 	netdev_dbg(dev, "TX rings:\n");
174*4882a593Smuzhiyun 	for (i = 0; i <= 1 << lp->lance_log_tx_bufs; i++) {
175*4882a593Smuzhiyun 		leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
176*4882a593Smuzhiyun 		ib->btx_ring[i].tmd0      = leptr;
177*4882a593Smuzhiyun 		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
178*4882a593Smuzhiyun 		ib->btx_ring[i].tmd1_bits = 0;
179*4882a593Smuzhiyun 		ib->btx_ring[i].length    = 0xf000; /* The ones required by tmd2 */
180*4882a593Smuzhiyun 		ib->btx_ring[i].misc      = 0;
181*4882a593Smuzhiyun 		if (i < 3)
182*4882a593Smuzhiyun 			netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	/* Setup the Rx ring entries */
186*4882a593Smuzhiyun 	netdev_dbg(dev, "RX rings:\n");
187*4882a593Smuzhiyun 	for (i = 0; i < 1 << lp->lance_log_rx_bufs; i++) {
188*4882a593Smuzhiyun 		leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 		ib->brx_ring[i].rmd0      = leptr;
191*4882a593Smuzhiyun 		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
192*4882a593Smuzhiyun 		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
193*4882a593Smuzhiyun 		ib->brx_ring[i].length    = -RX_BUFF_SIZE | 0xf000;
194*4882a593Smuzhiyun 		ib->brx_ring[i].mblength  = 0;
195*4882a593Smuzhiyun 		if (i < 3)
196*4882a593Smuzhiyun 			netdev_dbg(dev, "%d: 0x%08x\n", i, leptr);
197*4882a593Smuzhiyun 	}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	/* Setup the initialization block */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	/* Setup rx descriptor pointer */
202*4882a593Smuzhiyun 	leptr = LANCE_ADDR(&aib->brx_ring);
203*4882a593Smuzhiyun 	ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
204*4882a593Smuzhiyun 	ib->rx_ptr = leptr;
205*4882a593Smuzhiyun 	netdev_dbg(dev, "RX ptr: %08x\n", leptr);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* Setup tx descriptor pointer */
208*4882a593Smuzhiyun 	leptr = LANCE_ADDR(&aib->btx_ring);
209*4882a593Smuzhiyun 	ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
210*4882a593Smuzhiyun 	ib->tx_ptr = leptr;
211*4882a593Smuzhiyun 	netdev_dbg(dev, "TX ptr: %08x\n", leptr);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* Clear the multicast filter */
214*4882a593Smuzhiyun 	ib->filter[0] = 0;
215*4882a593Smuzhiyun 	ib->filter[1] = 0;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
init_restart_lance(struct lance_private * lp)218*4882a593Smuzhiyun static int init_restart_lance(struct lance_private *lp)
219*4882a593Smuzhiyun {
220*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
221*4882a593Smuzhiyun 	int i;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
224*4882a593Smuzhiyun 	ll->rdp = LE_C0_INIT;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* Wait for the lance to complete initialization */
227*4882a593Smuzhiyun 	for (i = 0; (i < 100) && !(ll->rdp & (LE_C0_ERR | LE_C0_IDON)); i++)
228*4882a593Smuzhiyun 		barrier();
229*4882a593Smuzhiyun 	if ((i == 100) || (ll->rdp & LE_C0_ERR)) {
230*4882a593Smuzhiyun 		pr_err("unopened after %d ticks, csr0=%04x\n", i, ll->rdp);
231*4882a593Smuzhiyun 		return -EIO;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	/* Clear IDON by writing a "1", enable interrupts and start lance */
235*4882a593Smuzhiyun 	ll->rdp = LE_C0_IDON;
236*4882a593Smuzhiyun 	ll->rdp = LE_C0_INEA | LE_C0_STRT;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	return 0;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun 
lance_rx(struct net_device * dev)241*4882a593Smuzhiyun static int lance_rx(struct net_device *dev)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
244*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
245*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
246*4882a593Smuzhiyun 	volatile struct lance_rx_desc *rd;
247*4882a593Smuzhiyun 	unsigned char bits;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun #ifdef TEST_HITS
250*4882a593Smuzhiyun 	int i;
251*4882a593Smuzhiyun 	char buf[RX_RING_SIZE + 1];
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
254*4882a593Smuzhiyun 		char r1_own = ib->brx_ring[i].rmd1_bits & LE_R1_OWN;
255*4882a593Smuzhiyun 		if (i == lp->rx_new)
256*4882a593Smuzhiyun 			buf[i] = r1_own ? '_' : 'X';
257*4882a593Smuzhiyun 		else
258*4882a593Smuzhiyun 			buf[i] = r1_own ? '.' : '1';
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	buf[RX_RING_SIZE] = 0;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	pr_debug("RxRing TestHits: [%s]\n", buf);
263*4882a593Smuzhiyun #endif
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	ll->rdp = LE_C0_RINT | LE_C0_INEA;
266*4882a593Smuzhiyun 	for (rd = &ib->brx_ring[lp->rx_new];
267*4882a593Smuzhiyun 	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
268*4882a593Smuzhiyun 	     rd = &ib->brx_ring[lp->rx_new]) {
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		/* We got an incomplete frame? */
271*4882a593Smuzhiyun 		if ((bits & LE_R1_POK) != LE_R1_POK) {
272*4882a593Smuzhiyun 			dev->stats.rx_over_errors++;
273*4882a593Smuzhiyun 			dev->stats.rx_errors++;
274*4882a593Smuzhiyun 			continue;
275*4882a593Smuzhiyun 		} else if (bits & LE_R1_ERR) {
276*4882a593Smuzhiyun 			/* Count only the end frame as a rx error,
277*4882a593Smuzhiyun 			 * not the beginning
278*4882a593Smuzhiyun 			 */
279*4882a593Smuzhiyun 			if (bits & LE_R1_BUF)
280*4882a593Smuzhiyun 				dev->stats.rx_fifo_errors++;
281*4882a593Smuzhiyun 			if (bits & LE_R1_CRC)
282*4882a593Smuzhiyun 				dev->stats.rx_crc_errors++;
283*4882a593Smuzhiyun 			if (bits & LE_R1_OFL)
284*4882a593Smuzhiyun 				dev->stats.rx_over_errors++;
285*4882a593Smuzhiyun 			if (bits & LE_R1_FRA)
286*4882a593Smuzhiyun 				dev->stats.rx_frame_errors++;
287*4882a593Smuzhiyun 			if (bits & LE_R1_EOP)
288*4882a593Smuzhiyun 				dev->stats.rx_errors++;
289*4882a593Smuzhiyun 		} else {
290*4882a593Smuzhiyun 			int len = (rd->mblength & 0xfff) - 4;
291*4882a593Smuzhiyun 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 			if (!skb) {
294*4882a593Smuzhiyun 				dev->stats.rx_dropped++;
295*4882a593Smuzhiyun 				rd->mblength = 0;
296*4882a593Smuzhiyun 				rd->rmd1_bits = LE_R1_OWN;
297*4882a593Smuzhiyun 				lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
298*4882a593Smuzhiyun 				return 0;
299*4882a593Smuzhiyun 			}
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 			skb_reserve(skb, 2);		/* 16 byte align */
302*4882a593Smuzhiyun 			skb_put(skb, len);		/* make room */
303*4882a593Smuzhiyun 			skb_copy_to_linear_data(skb,
304*4882a593Smuzhiyun 				 (unsigned char *)&ib->rx_buf[lp->rx_new][0],
305*4882a593Smuzhiyun 				 len);
306*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
307*4882a593Smuzhiyun 			netif_rx(skb);
308*4882a593Smuzhiyun 			dev->stats.rx_packets++;
309*4882a593Smuzhiyun 			dev->stats.rx_bytes += len;
310*4882a593Smuzhiyun 		}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 		/* Return the packet to the pool */
313*4882a593Smuzhiyun 		rd->mblength = 0;
314*4882a593Smuzhiyun 		rd->rmd1_bits = LE_R1_OWN;
315*4882a593Smuzhiyun 		lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 	return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
lance_tx(struct net_device * dev)320*4882a593Smuzhiyun static int lance_tx(struct net_device *dev)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
323*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
324*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
325*4882a593Smuzhiyun 	volatile struct lance_tx_desc *td;
326*4882a593Smuzhiyun 	int i, j;
327*4882a593Smuzhiyun 	int status;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/* csr0 is 2f3 */
330*4882a593Smuzhiyun 	ll->rdp = LE_C0_TINT | LE_C0_INEA;
331*4882a593Smuzhiyun 	/* csr0 is 73 */
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	j = lp->tx_old;
334*4882a593Smuzhiyun 	for (i = j; i != lp->tx_new; i = j) {
335*4882a593Smuzhiyun 		td = &ib->btx_ring[i];
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 		/* If we hit a packet not owned by us, stop */
338*4882a593Smuzhiyun 		if (td->tmd1_bits & LE_T1_OWN)
339*4882a593Smuzhiyun 			break;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 		if (td->tmd1_bits & LE_T1_ERR) {
342*4882a593Smuzhiyun 			status = td->misc;
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 			dev->stats.tx_errors++;
345*4882a593Smuzhiyun 			if (status & LE_T3_RTY)
346*4882a593Smuzhiyun 				dev->stats.tx_aborted_errors++;
347*4882a593Smuzhiyun 			if (status & LE_T3_LCOL)
348*4882a593Smuzhiyun 				dev->stats.tx_window_errors++;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 			if (status & LE_T3_CLOS) {
351*4882a593Smuzhiyun 				dev->stats.tx_carrier_errors++;
352*4882a593Smuzhiyun 				if (lp->auto_select) {
353*4882a593Smuzhiyun 					lp->tpe = 1 - lp->tpe;
354*4882a593Smuzhiyun 					netdev_err(dev, "Carrier Lost, trying %s\n",
355*4882a593Smuzhiyun 						   lp->tpe ? "TPE" : "AUI");
356*4882a593Smuzhiyun 					/* Stop the lance */
357*4882a593Smuzhiyun 					ll->rap = LE_CSR0;
358*4882a593Smuzhiyun 					ll->rdp = LE_C0_STOP;
359*4882a593Smuzhiyun 					lance_init_ring(dev);
360*4882a593Smuzhiyun 					load_csrs(lp);
361*4882a593Smuzhiyun 					init_restart_lance(lp);
362*4882a593Smuzhiyun 					return 0;
363*4882a593Smuzhiyun 				}
364*4882a593Smuzhiyun 			}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 			/* buffer errors and underflows turn off
367*4882a593Smuzhiyun 			 * the transmitter, so restart the adapter
368*4882a593Smuzhiyun 			 */
369*4882a593Smuzhiyun 			if (status & (LE_T3_BUF | LE_T3_UFL)) {
370*4882a593Smuzhiyun 				dev->stats.tx_fifo_errors++;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 				netdev_err(dev, "Tx: ERR_BUF|ERR_UFL, restarting\n");
373*4882a593Smuzhiyun 				/* Stop the lance */
374*4882a593Smuzhiyun 				ll->rap = LE_CSR0;
375*4882a593Smuzhiyun 				ll->rdp = LE_C0_STOP;
376*4882a593Smuzhiyun 				lance_init_ring(dev);
377*4882a593Smuzhiyun 				load_csrs(lp);
378*4882a593Smuzhiyun 				init_restart_lance(lp);
379*4882a593Smuzhiyun 				return 0;
380*4882a593Smuzhiyun 			}
381*4882a593Smuzhiyun 		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
382*4882a593Smuzhiyun 			/* So we don't count the packet more than once. */
383*4882a593Smuzhiyun 			td->tmd1_bits &= ~(LE_T1_POK);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 			/* One collision before packet was sent. */
386*4882a593Smuzhiyun 			if (td->tmd1_bits & LE_T1_EONE)
387*4882a593Smuzhiyun 				dev->stats.collisions++;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 			/* More than one collision, be optimistic. */
390*4882a593Smuzhiyun 			if (td->tmd1_bits & LE_T1_EMORE)
391*4882a593Smuzhiyun 				dev->stats.collisions += 2;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 			dev->stats.tx_packets++;
394*4882a593Smuzhiyun 		}
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		j = (j + 1) & lp->tx_ring_mod_mask;
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun 	lp->tx_old = j;
399*4882a593Smuzhiyun 	ll->rdp = LE_C0_TINT | LE_C0_INEA;
400*4882a593Smuzhiyun 	return 0;
401*4882a593Smuzhiyun }
402*4882a593Smuzhiyun 
lance_tx_buffs_avail(struct lance_private * lp)403*4882a593Smuzhiyun static int lance_tx_buffs_avail(struct lance_private *lp)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	if (lp->tx_old <= lp->tx_new)
406*4882a593Smuzhiyun 		return lp->tx_old + lp->tx_ring_mod_mask - lp->tx_new;
407*4882a593Smuzhiyun 	return lp->tx_old - lp->tx_new - 1;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun 
lance_interrupt(int irq,void * dev_id)410*4882a593Smuzhiyun static irqreturn_t lance_interrupt(int irq, void *dev_id)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
413*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
414*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
415*4882a593Smuzhiyun 	int csr0;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	ll->rap = LE_CSR0;		/* LANCE Controller Status */
418*4882a593Smuzhiyun 	csr0 = ll->rdp;
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	if (!(csr0 & LE_C0_INTR))	/* Check if any interrupt has */
421*4882a593Smuzhiyun 		return IRQ_NONE;	/* been generated by the Lance. */
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	/* Acknowledge all the interrupt sources ASAP */
424*4882a593Smuzhiyun 	ll->rdp = csr0 & ~(LE_C0_INEA | LE_C0_TDMD | LE_C0_STOP | LE_C0_STRT |
425*4882a593Smuzhiyun 			   LE_C0_INIT);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	if (csr0 & LE_C0_ERR) {
428*4882a593Smuzhiyun 		/* Clear the error condition */
429*4882a593Smuzhiyun 		ll->rdp = LE_C0_BABL | LE_C0_ERR | LE_C0_MISS | LE_C0_INEA;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (csr0 & LE_C0_RINT)
433*4882a593Smuzhiyun 		lance_rx(dev);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (csr0 & LE_C0_TINT)
436*4882a593Smuzhiyun 		lance_tx(dev);
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	/* Log misc errors. */
439*4882a593Smuzhiyun 	if (csr0 & LE_C0_BABL)
440*4882a593Smuzhiyun 		dev->stats.tx_errors++;       /* Tx babble. */
441*4882a593Smuzhiyun 	if (csr0 & LE_C0_MISS)
442*4882a593Smuzhiyun 		dev->stats.rx_errors++;       /* Missed a Rx frame. */
443*4882a593Smuzhiyun 	if (csr0 & LE_C0_MERR) {
444*4882a593Smuzhiyun 		netdev_err(dev, "Bus master arbitration failure, status %04x\n",
445*4882a593Smuzhiyun 			   csr0);
446*4882a593Smuzhiyun 		/* Restart the chip. */
447*4882a593Smuzhiyun 		ll->rdp = LE_C0_STRT;
448*4882a593Smuzhiyun 	}
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 	if (netif_queue_stopped(dev) && lance_tx_buffs_avail(lp) > 0)
451*4882a593Smuzhiyun 		netif_wake_queue(dev);
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
454*4882a593Smuzhiyun 	ll->rdp = (LE_C0_BABL | LE_C0_CERR | LE_C0_MISS | LE_C0_MERR |
455*4882a593Smuzhiyun 		   LE_C0_IDON | LE_C0_INEA);
456*4882a593Smuzhiyun 	return IRQ_HANDLED;
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun 
lance_open(struct net_device * dev)459*4882a593Smuzhiyun static int lance_open(struct net_device *dev)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
462*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
463*4882a593Smuzhiyun 	int ret;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	/* Stop the Lance */
466*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
467*4882a593Smuzhiyun 	ll->rdp = LE_C0_STOP;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	/* Install the Interrupt handler */
470*4882a593Smuzhiyun 	ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, IRQF_SHARED,
471*4882a593Smuzhiyun 			  dev->name, dev);
472*4882a593Smuzhiyun 	if (ret)
473*4882a593Smuzhiyun 		return ret;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	load_csrs(lp);
476*4882a593Smuzhiyun 	lance_init_ring(dev);
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	netif_start_queue(dev);
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	return init_restart_lance(lp);
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun 
lance_close(struct net_device * dev)483*4882a593Smuzhiyun static int lance_close(struct net_device *dev)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
486*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	netif_stop_queue(dev);
489*4882a593Smuzhiyun 	del_timer_sync(&lp->multicast_timer);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	/* Stop the card */
492*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
493*4882a593Smuzhiyun 	ll->rdp = LE_C0_STOP;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	free_irq(IRQ_AMIGA_PORTS, dev);
496*4882a593Smuzhiyun 	return 0;
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
lance_reset(struct net_device * dev)499*4882a593Smuzhiyun static inline int lance_reset(struct net_device *dev)
500*4882a593Smuzhiyun {
501*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
502*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
503*4882a593Smuzhiyun 	int status;
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/* Stop the lance */
506*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
507*4882a593Smuzhiyun 	ll->rdp = LE_C0_STOP;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	load_csrs(lp);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	lance_init_ring(dev);
512*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
513*4882a593Smuzhiyun 	netif_start_queue(dev);
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	status = init_restart_lance(lp);
516*4882a593Smuzhiyun 	netdev_dbg(dev, "Lance restart=%d\n", status);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	return status;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun 
lance_tx_timeout(struct net_device * dev,unsigned int txqueue)521*4882a593Smuzhiyun static void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
524*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	netdev_err(dev, "transmit timed out, status %04x, reset\n", ll->rdp);
527*4882a593Smuzhiyun 	lance_reset(dev);
528*4882a593Smuzhiyun 	netif_wake_queue(dev);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun 
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)531*4882a593Smuzhiyun static netdev_tx_t lance_start_xmit(struct sk_buff *skb,
532*4882a593Smuzhiyun 				    struct net_device *dev)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
535*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
536*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
537*4882a593Smuzhiyun 	int entry, skblen;
538*4882a593Smuzhiyun 	int status = NETDEV_TX_OK;
539*4882a593Smuzhiyun 	unsigned long flags;
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	if (skb_padto(skb, ETH_ZLEN))
542*4882a593Smuzhiyun 		return NETDEV_TX_OK;
543*4882a593Smuzhiyun 	skblen = max_t(unsigned, skb->len, ETH_ZLEN);
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	local_irq_save(flags);
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	if (!lance_tx_buffs_avail(lp))
548*4882a593Smuzhiyun 		goto out_free;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	/* dump the packet */
551*4882a593Smuzhiyun 	print_hex_dump_debug("skb->data: ", DUMP_PREFIX_NONE, 16, 1, skb->data,
552*4882a593Smuzhiyun 			     64, true);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	entry = lp->tx_new & lp->tx_ring_mod_mask;
555*4882a593Smuzhiyun 	ib->btx_ring[entry].length = (-skblen) | 0xf000;
556*4882a593Smuzhiyun 	ib->btx_ring[entry].misc = 0;
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
559*4882a593Smuzhiyun 
560*4882a593Smuzhiyun 	/* Now, give the packet to the lance */
561*4882a593Smuzhiyun 	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK | LE_T1_OWN);
562*4882a593Smuzhiyun 	lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask;
563*4882a593Smuzhiyun 	dev->stats.tx_bytes += skblen;
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	if (lance_tx_buffs_avail(lp) <= 0)
566*4882a593Smuzhiyun 		netif_stop_queue(dev);
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun 	/* Kick the lance: transmit now */
569*4882a593Smuzhiyun 	ll->rdp = LE_C0_INEA | LE_C0_TDMD;
570*4882a593Smuzhiyun  out_free:
571*4882a593Smuzhiyun 	dev_kfree_skb(skb);
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	local_irq_restore(flags);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 	return status;
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun /* taken from the depca driver */
lance_load_multicast(struct net_device * dev)579*4882a593Smuzhiyun static void lance_load_multicast(struct net_device *dev)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
582*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
583*4882a593Smuzhiyun 	volatile u16 *mcast_table = (u16 *)&ib->filter;
584*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
585*4882a593Smuzhiyun 	u32 crc;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	/* set all multicast bits */
588*4882a593Smuzhiyun 	if (dev->flags & IFF_ALLMULTI) {
589*4882a593Smuzhiyun 		ib->filter[0] = 0xffffffff;
590*4882a593Smuzhiyun 		ib->filter[1] = 0xffffffff;
591*4882a593Smuzhiyun 		return;
592*4882a593Smuzhiyun 	}
593*4882a593Smuzhiyun 	/* clear the multicast filter */
594*4882a593Smuzhiyun 	ib->filter[0] = 0;
595*4882a593Smuzhiyun 	ib->filter[1] = 0;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	/* Add addresses */
598*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, dev) {
599*4882a593Smuzhiyun 		crc = ether_crc_le(6, ha->addr);
600*4882a593Smuzhiyun 		crc = crc >> 26;
601*4882a593Smuzhiyun 		mcast_table[crc >> 4] |= 1 << (crc & 0xf);
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun 
lance_set_multicast(struct net_device * dev)605*4882a593Smuzhiyun static void lance_set_multicast(struct net_device *dev)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
608*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
609*4882a593Smuzhiyun 	volatile struct lance_regs *ll = lp->ll;
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (!netif_running(dev))
612*4882a593Smuzhiyun 		return;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	if (lp->tx_old != lp->tx_new) {
615*4882a593Smuzhiyun 		mod_timer(&lp->multicast_timer, jiffies + 4);
616*4882a593Smuzhiyun 		netif_wake_queue(dev);
617*4882a593Smuzhiyun 		return;
618*4882a593Smuzhiyun 	}
619*4882a593Smuzhiyun 
620*4882a593Smuzhiyun 	netif_stop_queue(dev);
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	ll->rap = LE_CSR0;
623*4882a593Smuzhiyun 	ll->rdp = LE_C0_STOP;
624*4882a593Smuzhiyun 	lance_init_ring(dev);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
627*4882a593Smuzhiyun 		ib->mode |= LE_MO_PROM;
628*4882a593Smuzhiyun 	} else {
629*4882a593Smuzhiyun 		ib->mode &= ~LE_MO_PROM;
630*4882a593Smuzhiyun 		lance_load_multicast(dev);
631*4882a593Smuzhiyun 	}
632*4882a593Smuzhiyun 	load_csrs(lp);
633*4882a593Smuzhiyun 	init_restart_lance(lp);
634*4882a593Smuzhiyun 	netif_wake_queue(dev);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun 
lance_set_multicast_retry(struct timer_list * t)637*4882a593Smuzhiyun static void lance_set_multicast_retry(struct timer_list *t)
638*4882a593Smuzhiyun {
639*4882a593Smuzhiyun 	struct lance_private *lp = from_timer(lp, t, multicast_timer);
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	lance_set_multicast(lp->dev);
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun static int a2065_init_one(struct zorro_dev *z,
645*4882a593Smuzhiyun 			  const struct zorro_device_id *ent);
646*4882a593Smuzhiyun static void a2065_remove_one(struct zorro_dev *z);
647*4882a593Smuzhiyun 
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun static const struct zorro_device_id a2065_zorro_tbl[] = {
650*4882a593Smuzhiyun 	{ ZORRO_PROD_CBM_A2065_1 },
651*4882a593Smuzhiyun 	{ ZORRO_PROD_CBM_A2065_2 },
652*4882a593Smuzhiyun 	{ ZORRO_PROD_AMERISTAR_A2065 },
653*4882a593Smuzhiyun 	{ 0 }
654*4882a593Smuzhiyun };
655*4882a593Smuzhiyun MODULE_DEVICE_TABLE(zorro, a2065_zorro_tbl);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun static struct zorro_driver a2065_driver = {
658*4882a593Smuzhiyun 	.name		= "a2065",
659*4882a593Smuzhiyun 	.id_table	= a2065_zorro_tbl,
660*4882a593Smuzhiyun 	.probe		= a2065_init_one,
661*4882a593Smuzhiyun 	.remove		= a2065_remove_one,
662*4882a593Smuzhiyun };
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun static const struct net_device_ops lance_netdev_ops = {
665*4882a593Smuzhiyun 	.ndo_open		= lance_open,
666*4882a593Smuzhiyun 	.ndo_stop		= lance_close,
667*4882a593Smuzhiyun 	.ndo_start_xmit		= lance_start_xmit,
668*4882a593Smuzhiyun 	.ndo_tx_timeout		= lance_tx_timeout,
669*4882a593Smuzhiyun 	.ndo_set_rx_mode	= lance_set_multicast,
670*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
671*4882a593Smuzhiyun 	.ndo_set_mac_address	= eth_mac_addr,
672*4882a593Smuzhiyun };
673*4882a593Smuzhiyun 
a2065_init_one(struct zorro_dev * z,const struct zorro_device_id * ent)674*4882a593Smuzhiyun static int a2065_init_one(struct zorro_dev *z,
675*4882a593Smuzhiyun 			  const struct zorro_device_id *ent)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct net_device *dev;
678*4882a593Smuzhiyun 	struct lance_private *priv;
679*4882a593Smuzhiyun 	unsigned long board = z->resource.start;
680*4882a593Smuzhiyun 	unsigned long base_addr = board + A2065_LANCE;
681*4882a593Smuzhiyun 	unsigned long mem_start = board + A2065_RAM;
682*4882a593Smuzhiyun 	struct resource *r1, *r2;
683*4882a593Smuzhiyun 	u32 serial;
684*4882a593Smuzhiyun 	int err;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	r1 = request_mem_region(base_addr, sizeof(struct lance_regs),
687*4882a593Smuzhiyun 				"Am7990");
688*4882a593Smuzhiyun 	if (!r1)
689*4882a593Smuzhiyun 		return -EBUSY;
690*4882a593Smuzhiyun 	r2 = request_mem_region(mem_start, A2065_RAM_SIZE, "RAM");
691*4882a593Smuzhiyun 	if (!r2) {
692*4882a593Smuzhiyun 		release_mem_region(base_addr, sizeof(struct lance_regs));
693*4882a593Smuzhiyun 		return -EBUSY;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof(struct lance_private));
697*4882a593Smuzhiyun 	if (dev == NULL) {
698*4882a593Smuzhiyun 		release_mem_region(base_addr, sizeof(struct lance_regs));
699*4882a593Smuzhiyun 		release_mem_region(mem_start, A2065_RAM_SIZE);
700*4882a593Smuzhiyun 		return -ENOMEM;
701*4882a593Smuzhiyun 	}
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	priv = netdev_priv(dev);
704*4882a593Smuzhiyun 
705*4882a593Smuzhiyun 	r1->name = dev->name;
706*4882a593Smuzhiyun 	r2->name = dev->name;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	serial = be32_to_cpu(z->rom.er_SerialNumber);
709*4882a593Smuzhiyun 	dev->dev_addr[0] = 0x00;
710*4882a593Smuzhiyun 	if (z->id != ZORRO_PROD_AMERISTAR_A2065) {	/* Commodore */
711*4882a593Smuzhiyun 		dev->dev_addr[1] = 0x80;
712*4882a593Smuzhiyun 		dev->dev_addr[2] = 0x10;
713*4882a593Smuzhiyun 	} else {					/* Ameristar */
714*4882a593Smuzhiyun 		dev->dev_addr[1] = 0x00;
715*4882a593Smuzhiyun 		dev->dev_addr[2] = 0x9f;
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 	dev->dev_addr[3] = (serial >> 16) & 0xff;
718*4882a593Smuzhiyun 	dev->dev_addr[4] = (serial >> 8) & 0xff;
719*4882a593Smuzhiyun 	dev->dev_addr[5] = serial & 0xff;
720*4882a593Smuzhiyun 	dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
721*4882a593Smuzhiyun 	dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
722*4882a593Smuzhiyun 	dev->mem_end = dev->mem_start + A2065_RAM_SIZE;
723*4882a593Smuzhiyun 
724*4882a593Smuzhiyun 	priv->ll = (volatile struct lance_regs *)dev->base_addr;
725*4882a593Smuzhiyun 	priv->init_block = (struct lance_init_block *)dev->mem_start;
726*4882a593Smuzhiyun 	priv->lance_init_block = (struct lance_init_block *)A2065_RAM;
727*4882a593Smuzhiyun 	priv->auto_select = 0;
728*4882a593Smuzhiyun 	priv->busmaster_regval = LE_C3_BSWP;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	priv->lance_log_rx_bufs = LANCE_LOG_RX_BUFFERS;
731*4882a593Smuzhiyun 	priv->lance_log_tx_bufs = LANCE_LOG_TX_BUFFERS;
732*4882a593Smuzhiyun 	priv->rx_ring_mod_mask = RX_RING_MOD_MASK;
733*4882a593Smuzhiyun 	priv->tx_ring_mod_mask = TX_RING_MOD_MASK;
734*4882a593Smuzhiyun 	priv->dev = dev;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	dev->netdev_ops = &lance_netdev_ops;
737*4882a593Smuzhiyun 	dev->watchdog_timeo = 5*HZ;
738*4882a593Smuzhiyun 	dev->dma = 0;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	timer_setup(&priv->multicast_timer, lance_set_multicast_retry, 0);
741*4882a593Smuzhiyun 
742*4882a593Smuzhiyun 	err = register_netdev(dev);
743*4882a593Smuzhiyun 	if (err) {
744*4882a593Smuzhiyun 		release_mem_region(base_addr, sizeof(struct lance_regs));
745*4882a593Smuzhiyun 		release_mem_region(mem_start, A2065_RAM_SIZE);
746*4882a593Smuzhiyun 		free_netdev(dev);
747*4882a593Smuzhiyun 		return err;
748*4882a593Smuzhiyun 	}
749*4882a593Smuzhiyun 	zorro_set_drvdata(z, dev);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	netdev_info(dev, "A2065 at 0x%08lx, Ethernet Address %pM\n",
752*4882a593Smuzhiyun 		    board, dev->dev_addr);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	return 0;
755*4882a593Smuzhiyun }
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 
a2065_remove_one(struct zorro_dev * z)758*4882a593Smuzhiyun static void a2065_remove_one(struct zorro_dev *z)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun 	struct net_device *dev = zorro_get_drvdata(z);
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun 	unregister_netdev(dev);
763*4882a593Smuzhiyun 	release_mem_region(ZTWO_PADDR(dev->base_addr),
764*4882a593Smuzhiyun 			   sizeof(struct lance_regs));
765*4882a593Smuzhiyun 	release_mem_region(ZTWO_PADDR(dev->mem_start), A2065_RAM_SIZE);
766*4882a593Smuzhiyun 	free_netdev(dev);
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun 
a2065_init_module(void)769*4882a593Smuzhiyun static int __init a2065_init_module(void)
770*4882a593Smuzhiyun {
771*4882a593Smuzhiyun 	return zorro_register_driver(&a2065_driver);
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun 
a2065_cleanup_module(void)774*4882a593Smuzhiyun static void __exit a2065_cleanup_module(void)
775*4882a593Smuzhiyun {
776*4882a593Smuzhiyun 	zorro_unregister_driver(&a2065_driver);
777*4882a593Smuzhiyun }
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun module_init(a2065_init_module);
780*4882a593Smuzhiyun module_exit(a2065_cleanup_module);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun MODULE_LICENSE("GPL");
783