1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Amiga Linux/m68k Ariadne Ethernet Driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * © Copyright 1995-2003 by Geert Uytterhoeven (geert@linux-m68k.org)
5*4882a593Smuzhiyun * Peter De Schrijver (p2@mind.be)
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * ---------------------------------------------------------------------------
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is based on
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * lance.c: An AMD LANCE ethernet driver for linux.
12*4882a593Smuzhiyun * Written 1993-94 by Donald Becker.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * Am79C960: PCnet(tm)-ISA Single-Chip Ethernet Controller
15*4882a593Smuzhiyun * Advanced Micro Devices
16*4882a593Smuzhiyun * Publication #16907, Rev. B, Amendment/0, May 1994
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * MC68230: Parallel Interface/Timer (PI/T)
19*4882a593Smuzhiyun * Motorola Semiconductors, December, 1983
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * ---------------------------------------------------------------------------
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * This file is subject to the terms and conditions of the GNU General Public
24*4882a593Smuzhiyun * License. See the file COPYING in the main directory of the Linux
25*4882a593Smuzhiyun * distribution for more details.
26*4882a593Smuzhiyun *
27*4882a593Smuzhiyun * ---------------------------------------------------------------------------
28*4882a593Smuzhiyun *
29*4882a593Smuzhiyun * The Ariadne is a Zorro-II board made by Village Tronic. It contains:
30*4882a593Smuzhiyun *
31*4882a593Smuzhiyun * - an Am79C960 PCnet-ISA Single-Chip Ethernet Controller with both
32*4882a593Smuzhiyun * 10BASE-2 (thin coax) and 10BASE-T (UTP) connectors
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * - an MC68230 Parallel Interface/Timer configured as 2 parallel ports
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38*4882a593Smuzhiyun /*#define DEBUG*/
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #include <linux/module.h>
41*4882a593Smuzhiyun #include <linux/stddef.h>
42*4882a593Smuzhiyun #include <linux/kernel.h>
43*4882a593Smuzhiyun #include <linux/string.h>
44*4882a593Smuzhiyun #include <linux/errno.h>
45*4882a593Smuzhiyun #include <linux/ioport.h>
46*4882a593Smuzhiyun #include <linux/netdevice.h>
47*4882a593Smuzhiyun #include <linux/etherdevice.h>
48*4882a593Smuzhiyun #include <linux/interrupt.h>
49*4882a593Smuzhiyun #include <linux/skbuff.h>
50*4882a593Smuzhiyun #include <linux/init.h>
51*4882a593Smuzhiyun #include <linux/zorro.h>
52*4882a593Smuzhiyun #include <linux/bitops.h>
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #include <asm/byteorder.h>
55*4882a593Smuzhiyun #include <asm/amigaints.h>
56*4882a593Smuzhiyun #include <asm/amigahw.h>
57*4882a593Smuzhiyun #include <asm/irq.h>
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun #include "ariadne.h"
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #ifdef ARIADNE_DEBUG
62*4882a593Smuzhiyun int ariadne_debug = ARIADNE_DEBUG;
63*4882a593Smuzhiyun #else
64*4882a593Smuzhiyun int ariadne_debug = 1;
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* Macros to Fix Endianness problems */
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun /* Swap the Bytes in a WORD */
70*4882a593Smuzhiyun #define swapw(x) (((x >> 8) & 0x00ff) | ((x << 8) & 0xff00))
71*4882a593Smuzhiyun /* Get the Low BYTE in a WORD */
72*4882a593Smuzhiyun #define lowb(x) (x & 0xff)
73*4882a593Smuzhiyun /* Get the Swapped High WORD in a LONG */
74*4882a593Smuzhiyun #define swhighw(x) ((((x) >> 8) & 0xff00) | (((x) >> 24) & 0x00ff))
75*4882a593Smuzhiyun /* Get the Swapped Low WORD in a LONG */
76*4882a593Smuzhiyun #define swloww(x) ((((x) << 8) & 0xff00) | (((x) >> 8) & 0x00ff))
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Transmit/Receive Ring Definitions */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun #define TX_RING_SIZE 5
81*4882a593Smuzhiyun #define RX_RING_SIZE 16
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define PKT_BUF_SIZE 1520
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* Private Device Data */
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun struct ariadne_private {
88*4882a593Smuzhiyun volatile struct TDRE *tx_ring[TX_RING_SIZE];
89*4882a593Smuzhiyun volatile struct RDRE *rx_ring[RX_RING_SIZE];
90*4882a593Smuzhiyun volatile u_short *tx_buff[TX_RING_SIZE];
91*4882a593Smuzhiyun volatile u_short *rx_buff[RX_RING_SIZE];
92*4882a593Smuzhiyun int cur_tx, cur_rx; /* The next free ring entry */
93*4882a593Smuzhiyun int dirty_tx; /* The ring entries to be free()ed */
94*4882a593Smuzhiyun char tx_full;
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* Structure Created in the Ariadne's RAM Buffer */
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun struct lancedata {
100*4882a593Smuzhiyun struct TDRE tx_ring[TX_RING_SIZE];
101*4882a593Smuzhiyun struct RDRE rx_ring[RX_RING_SIZE];
102*4882a593Smuzhiyun u_short tx_buff[TX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
103*4882a593Smuzhiyun u_short rx_buff[RX_RING_SIZE][PKT_BUF_SIZE / sizeof(u_short)];
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun
memcpyw(volatile u_short * dest,u_short * src,int len)106*4882a593Smuzhiyun static void memcpyw(volatile u_short *dest, u_short *src, int len)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun while (len >= 2) {
109*4882a593Smuzhiyun *(dest++) = *(src++);
110*4882a593Smuzhiyun len -= 2;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun if (len == 1)
113*4882a593Smuzhiyun *dest = (*(u_char *)src) << 8;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
ariadne_init_ring(struct net_device * dev)116*4882a593Smuzhiyun static void ariadne_init_ring(struct net_device *dev)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct ariadne_private *priv = netdev_priv(dev);
119*4882a593Smuzhiyun volatile struct lancedata *lancedata = (struct lancedata *)dev->mem_start;
120*4882a593Smuzhiyun int i;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun netif_stop_queue(dev);
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun priv->tx_full = 0;
125*4882a593Smuzhiyun priv->cur_rx = priv->cur_tx = 0;
126*4882a593Smuzhiyun priv->dirty_tx = 0;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* Set up TX Ring */
129*4882a593Smuzhiyun for (i = 0; i < TX_RING_SIZE; i++) {
130*4882a593Smuzhiyun volatile struct TDRE *t = &lancedata->tx_ring[i];
131*4882a593Smuzhiyun t->TMD0 = swloww(ARIADNE_RAM +
132*4882a593Smuzhiyun offsetof(struct lancedata, tx_buff[i]));
133*4882a593Smuzhiyun t->TMD1 = swhighw(ARIADNE_RAM +
134*4882a593Smuzhiyun offsetof(struct lancedata, tx_buff[i])) |
135*4882a593Smuzhiyun TF_STP | TF_ENP;
136*4882a593Smuzhiyun t->TMD2 = swapw((u_short)-PKT_BUF_SIZE);
137*4882a593Smuzhiyun t->TMD3 = 0;
138*4882a593Smuzhiyun priv->tx_ring[i] = &lancedata->tx_ring[i];
139*4882a593Smuzhiyun priv->tx_buff[i] = lancedata->tx_buff[i];
140*4882a593Smuzhiyun netdev_dbg(dev, "TX Entry %2d at %p, Buf at %p\n",
141*4882a593Smuzhiyun i, &lancedata->tx_ring[i], lancedata->tx_buff[i]);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun /* Set up RX Ring */
145*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
146*4882a593Smuzhiyun volatile struct RDRE *r = &lancedata->rx_ring[i];
147*4882a593Smuzhiyun r->RMD0 = swloww(ARIADNE_RAM +
148*4882a593Smuzhiyun offsetof(struct lancedata, rx_buff[i]));
149*4882a593Smuzhiyun r->RMD1 = swhighw(ARIADNE_RAM +
150*4882a593Smuzhiyun offsetof(struct lancedata, rx_buff[i])) |
151*4882a593Smuzhiyun RF_OWN;
152*4882a593Smuzhiyun r->RMD2 = swapw((u_short)-PKT_BUF_SIZE);
153*4882a593Smuzhiyun r->RMD3 = 0x0000;
154*4882a593Smuzhiyun priv->rx_ring[i] = &lancedata->rx_ring[i];
155*4882a593Smuzhiyun priv->rx_buff[i] = lancedata->rx_buff[i];
156*4882a593Smuzhiyun netdev_dbg(dev, "RX Entry %2d at %p, Buf at %p\n",
157*4882a593Smuzhiyun i, &lancedata->rx_ring[i], lancedata->rx_buff[i]);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
ariadne_rx(struct net_device * dev)161*4882a593Smuzhiyun static int ariadne_rx(struct net_device *dev)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun struct ariadne_private *priv = netdev_priv(dev);
164*4882a593Smuzhiyun int entry = priv->cur_rx % RX_RING_SIZE;
165*4882a593Smuzhiyun int i;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* If we own the next entry, it's a new packet. Send it up */
168*4882a593Smuzhiyun while (!(lowb(priv->rx_ring[entry]->RMD1) & RF_OWN)) {
169*4882a593Smuzhiyun int status = lowb(priv->rx_ring[entry]->RMD1);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (status != (RF_STP | RF_ENP)) { /* There was an error */
172*4882a593Smuzhiyun /* There is a tricky error noted by
173*4882a593Smuzhiyun * John Murphy <murf@perftech.com> to Russ Nelson:
174*4882a593Smuzhiyun * Even with full-sized buffers it's possible for a
175*4882a593Smuzhiyun * jabber packet to use two buffers, with only the
176*4882a593Smuzhiyun * last correctly noting the error
177*4882a593Smuzhiyun */
178*4882a593Smuzhiyun /* Only count a general error at the end of a packet */
179*4882a593Smuzhiyun if (status & RF_ENP)
180*4882a593Smuzhiyun dev->stats.rx_errors++;
181*4882a593Smuzhiyun if (status & RF_FRAM)
182*4882a593Smuzhiyun dev->stats.rx_frame_errors++;
183*4882a593Smuzhiyun if (status & RF_OFLO)
184*4882a593Smuzhiyun dev->stats.rx_over_errors++;
185*4882a593Smuzhiyun if (status & RF_CRC)
186*4882a593Smuzhiyun dev->stats.rx_crc_errors++;
187*4882a593Smuzhiyun if (status & RF_BUFF)
188*4882a593Smuzhiyun dev->stats.rx_fifo_errors++;
189*4882a593Smuzhiyun priv->rx_ring[entry]->RMD1 &= 0xff00 | RF_STP | RF_ENP;
190*4882a593Smuzhiyun } else {
191*4882a593Smuzhiyun /* Malloc up new buffer, compatible with net-3 */
192*4882a593Smuzhiyun short pkt_len = swapw(priv->rx_ring[entry]->RMD3);
193*4882a593Smuzhiyun struct sk_buff *skb;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun skb = netdev_alloc_skb(dev, pkt_len + 2);
196*4882a593Smuzhiyun if (skb == NULL) {
197*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++)
198*4882a593Smuzhiyun if (lowb(priv->rx_ring[(entry + i) % RX_RING_SIZE]->RMD1) & RF_OWN)
199*4882a593Smuzhiyun break;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (i > RX_RING_SIZE - 2) {
202*4882a593Smuzhiyun dev->stats.rx_dropped++;
203*4882a593Smuzhiyun priv->rx_ring[entry]->RMD1 |= RF_OWN;
204*4882a593Smuzhiyun priv->cur_rx++;
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun break;
207*4882a593Smuzhiyun }
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun skb_reserve(skb, 2); /* 16 byte align */
211*4882a593Smuzhiyun skb_put(skb, pkt_len); /* Make room */
212*4882a593Smuzhiyun skb_copy_to_linear_data(skb,
213*4882a593Smuzhiyun (const void *)priv->rx_buff[entry],
214*4882a593Smuzhiyun pkt_len);
215*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, dev);
216*4882a593Smuzhiyun netdev_dbg(dev, "RX pkt type 0x%04x from %pM to %pM data %p len %u\n",
217*4882a593Smuzhiyun ((u_short *)skb->data)[6],
218*4882a593Smuzhiyun skb->data + 6, skb->data,
219*4882a593Smuzhiyun skb->data, skb->len);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun netif_rx(skb);
222*4882a593Smuzhiyun dev->stats.rx_packets++;
223*4882a593Smuzhiyun dev->stats.rx_bytes += pkt_len;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun priv->rx_ring[entry]->RMD1 |= RF_OWN;
227*4882a593Smuzhiyun entry = (++priv->cur_rx) % RX_RING_SIZE;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun priv->cur_rx = priv->cur_rx % RX_RING_SIZE;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* We should check that at least two ring entries are free.
233*4882a593Smuzhiyun * If not, we should free one and mark stats->rx_dropped++
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun return 0;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun
ariadne_interrupt(int irq,void * data)239*4882a593Smuzhiyun static irqreturn_t ariadne_interrupt(int irq, void *data)
240*4882a593Smuzhiyun {
241*4882a593Smuzhiyun struct net_device *dev = (struct net_device *)data;
242*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
243*4882a593Smuzhiyun struct ariadne_private *priv;
244*4882a593Smuzhiyun int csr0, boguscnt;
245*4882a593Smuzhiyun int handled = 0;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun if (!(lance->RDP & INTR)) /* Check if any interrupt has been */
250*4882a593Smuzhiyun return IRQ_NONE; /* generated by the board */
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun priv = netdev_priv(dev);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun boguscnt = 10;
255*4882a593Smuzhiyun while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) {
256*4882a593Smuzhiyun /* Acknowledge all of the current interrupt sources ASAP */
257*4882a593Smuzhiyun lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun #ifdef DEBUG
260*4882a593Smuzhiyun if (ariadne_debug > 5) {
261*4882a593Smuzhiyun netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [",
262*4882a593Smuzhiyun csr0, lance->RDP);
263*4882a593Smuzhiyun if (csr0 & INTR)
264*4882a593Smuzhiyun pr_cont(" INTR");
265*4882a593Smuzhiyun if (csr0 & INEA)
266*4882a593Smuzhiyun pr_cont(" INEA");
267*4882a593Smuzhiyun if (csr0 & RXON)
268*4882a593Smuzhiyun pr_cont(" RXON");
269*4882a593Smuzhiyun if (csr0 & TXON)
270*4882a593Smuzhiyun pr_cont(" TXON");
271*4882a593Smuzhiyun if (csr0 & TDMD)
272*4882a593Smuzhiyun pr_cont(" TDMD");
273*4882a593Smuzhiyun if (csr0 & STOP)
274*4882a593Smuzhiyun pr_cont(" STOP");
275*4882a593Smuzhiyun if (csr0 & STRT)
276*4882a593Smuzhiyun pr_cont(" STRT");
277*4882a593Smuzhiyun if (csr0 & INIT)
278*4882a593Smuzhiyun pr_cont(" INIT");
279*4882a593Smuzhiyun if (csr0 & ERR)
280*4882a593Smuzhiyun pr_cont(" ERR");
281*4882a593Smuzhiyun if (csr0 & BABL)
282*4882a593Smuzhiyun pr_cont(" BABL");
283*4882a593Smuzhiyun if (csr0 & CERR)
284*4882a593Smuzhiyun pr_cont(" CERR");
285*4882a593Smuzhiyun if (csr0 & MISS)
286*4882a593Smuzhiyun pr_cont(" MISS");
287*4882a593Smuzhiyun if (csr0 & MERR)
288*4882a593Smuzhiyun pr_cont(" MERR");
289*4882a593Smuzhiyun if (csr0 & RINT)
290*4882a593Smuzhiyun pr_cont(" RINT");
291*4882a593Smuzhiyun if (csr0 & TINT)
292*4882a593Smuzhiyun pr_cont(" TINT");
293*4882a593Smuzhiyun if (csr0 & IDON)
294*4882a593Smuzhiyun pr_cont(" IDON");
295*4882a593Smuzhiyun pr_cont(" ]\n");
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun #endif
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun if (csr0 & RINT) { /* Rx interrupt */
300*4882a593Smuzhiyun handled = 1;
301*4882a593Smuzhiyun ariadne_rx(dev);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (csr0 & TINT) { /* Tx-done interrupt */
305*4882a593Smuzhiyun int dirty_tx = priv->dirty_tx;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun handled = 1;
308*4882a593Smuzhiyun while (dirty_tx < priv->cur_tx) {
309*4882a593Smuzhiyun int entry = dirty_tx % TX_RING_SIZE;
310*4882a593Smuzhiyun int status = lowb(priv->tx_ring[entry]->TMD1);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun if (status & TF_OWN)
313*4882a593Smuzhiyun break; /* It still hasn't been Txed */
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun priv->tx_ring[entry]->TMD1 &= 0xff00;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun if (status & TF_ERR) {
318*4882a593Smuzhiyun /* There was an major error, log it */
319*4882a593Smuzhiyun int err_status = priv->tx_ring[entry]->TMD3;
320*4882a593Smuzhiyun dev->stats.tx_errors++;
321*4882a593Smuzhiyun if (err_status & EF_RTRY)
322*4882a593Smuzhiyun dev->stats.tx_aborted_errors++;
323*4882a593Smuzhiyun if (err_status & EF_LCAR)
324*4882a593Smuzhiyun dev->stats.tx_carrier_errors++;
325*4882a593Smuzhiyun if (err_status & EF_LCOL)
326*4882a593Smuzhiyun dev->stats.tx_window_errors++;
327*4882a593Smuzhiyun if (err_status & EF_UFLO) {
328*4882a593Smuzhiyun /* Ackk! On FIFO errors the Tx unit is turned off! */
329*4882a593Smuzhiyun dev->stats.tx_fifo_errors++;
330*4882a593Smuzhiyun /* Remove this verbosity later! */
331*4882a593Smuzhiyun netdev_err(dev, "Tx FIFO error! Status %04x\n",
332*4882a593Smuzhiyun csr0);
333*4882a593Smuzhiyun /* Restart the chip */
334*4882a593Smuzhiyun lance->RDP = STRT;
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun } else {
337*4882a593Smuzhiyun if (status & (TF_MORE | TF_ONE))
338*4882a593Smuzhiyun dev->stats.collisions++;
339*4882a593Smuzhiyun dev->stats.tx_packets++;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun dirty_tx++;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun #ifndef final_version
345*4882a593Smuzhiyun if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) {
346*4882a593Smuzhiyun netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n",
347*4882a593Smuzhiyun dirty_tx, priv->cur_tx,
348*4882a593Smuzhiyun priv->tx_full);
349*4882a593Smuzhiyun dirty_tx += TX_RING_SIZE;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun #endif
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (priv->tx_full && netif_queue_stopped(dev) &&
354*4882a593Smuzhiyun dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) {
355*4882a593Smuzhiyun /* The ring is no longer full */
356*4882a593Smuzhiyun priv->tx_full = 0;
357*4882a593Smuzhiyun netif_wake_queue(dev);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun priv->dirty_tx = dirty_tx;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun /* Log misc errors */
364*4882a593Smuzhiyun if (csr0 & BABL) {
365*4882a593Smuzhiyun handled = 1;
366*4882a593Smuzhiyun dev->stats.tx_errors++; /* Tx babble */
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun if (csr0 & MISS) {
369*4882a593Smuzhiyun handled = 1;
370*4882a593Smuzhiyun dev->stats.rx_errors++; /* Missed a Rx frame */
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun if (csr0 & MERR) {
373*4882a593Smuzhiyun handled = 1;
374*4882a593Smuzhiyun netdev_err(dev, "Bus master arbitration failure, status %04x\n",
375*4882a593Smuzhiyun csr0);
376*4882a593Smuzhiyun /* Restart the chip */
377*4882a593Smuzhiyun lance->RDP = STRT;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* Clear any other interrupt, and set interrupt enable */
382*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
383*4882a593Smuzhiyun lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun if (ariadne_debug > 4)
386*4882a593Smuzhiyun netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n",
387*4882a593Smuzhiyun lance->RAP, lance->RDP);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun return IRQ_RETVAL(handled);
390*4882a593Smuzhiyun }
391*4882a593Smuzhiyun
ariadne_open(struct net_device * dev)392*4882a593Smuzhiyun static int ariadne_open(struct net_device *dev)
393*4882a593Smuzhiyun {
394*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
395*4882a593Smuzhiyun u_short in;
396*4882a593Smuzhiyun u_long version;
397*4882a593Smuzhiyun int i;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun /* Reset the LANCE */
400*4882a593Smuzhiyun in = lance->Reset;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* Stop the LANCE */
403*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
404*4882a593Smuzhiyun lance->RDP = STOP;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun /* Check the LANCE version */
407*4882a593Smuzhiyun lance->RAP = CSR88; /* Chip ID */
408*4882a593Smuzhiyun version = swapw(lance->RDP);
409*4882a593Smuzhiyun lance->RAP = CSR89; /* Chip ID */
410*4882a593Smuzhiyun version |= swapw(lance->RDP) << 16;
411*4882a593Smuzhiyun if ((version & 0x00000fff) != 0x00000003) {
412*4882a593Smuzhiyun pr_warn("Couldn't find AMD Ethernet Chip\n");
413*4882a593Smuzhiyun return -EAGAIN;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun if ((version & 0x0ffff000) != 0x00003000) {
416*4882a593Smuzhiyun pr_warn("Couldn't find Am79C960 (Wrong part number = %ld)\n",
417*4882a593Smuzhiyun (version & 0x0ffff000) >> 12);
418*4882a593Smuzhiyun return -EAGAIN;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun netdev_dbg(dev, "Am79C960 (PCnet-ISA) Revision %ld\n",
422*4882a593Smuzhiyun (version & 0xf0000000) >> 28);
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun ariadne_init_ring(dev);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /* Miscellaneous Stuff */
427*4882a593Smuzhiyun lance->RAP = CSR3; /* Interrupt Masks and Deferral Control */
428*4882a593Smuzhiyun lance->RDP = 0x0000;
429*4882a593Smuzhiyun lance->RAP = CSR4; /* Test and Features Control */
430*4882a593Smuzhiyun lance->RDP = DPOLL | APAD_XMT | MFCOM | RCVCCOM | TXSTRTM | JABM;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* Set the Multicast Table */
433*4882a593Smuzhiyun lance->RAP = CSR8; /* Logical Address Filter, LADRF[15:0] */
434*4882a593Smuzhiyun lance->RDP = 0x0000;
435*4882a593Smuzhiyun lance->RAP = CSR9; /* Logical Address Filter, LADRF[31:16] */
436*4882a593Smuzhiyun lance->RDP = 0x0000;
437*4882a593Smuzhiyun lance->RAP = CSR10; /* Logical Address Filter, LADRF[47:32] */
438*4882a593Smuzhiyun lance->RDP = 0x0000;
439*4882a593Smuzhiyun lance->RAP = CSR11; /* Logical Address Filter, LADRF[63:48] */
440*4882a593Smuzhiyun lance->RDP = 0x0000;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* Set the Ethernet Hardware Address */
443*4882a593Smuzhiyun lance->RAP = CSR12; /* Physical Address Register, PADR[15:0] */
444*4882a593Smuzhiyun lance->RDP = ((u_short *)&dev->dev_addr[0])[0];
445*4882a593Smuzhiyun lance->RAP = CSR13; /* Physical Address Register, PADR[31:16] */
446*4882a593Smuzhiyun lance->RDP = ((u_short *)&dev->dev_addr[0])[1];
447*4882a593Smuzhiyun lance->RAP = CSR14; /* Physical Address Register, PADR[47:32] */
448*4882a593Smuzhiyun lance->RDP = ((u_short *)&dev->dev_addr[0])[2];
449*4882a593Smuzhiyun
450*4882a593Smuzhiyun /* Set the Init Block Mode */
451*4882a593Smuzhiyun lance->RAP = CSR15; /* Mode Register */
452*4882a593Smuzhiyun lance->RDP = 0x0000;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun /* Set the Transmit Descriptor Ring Pointer */
455*4882a593Smuzhiyun lance->RAP = CSR30; /* Base Address of Transmit Ring */
456*4882a593Smuzhiyun lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
457*4882a593Smuzhiyun lance->RAP = CSR31; /* Base Address of transmit Ring */
458*4882a593Smuzhiyun lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, tx_ring));
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* Set the Receive Descriptor Ring Pointer */
461*4882a593Smuzhiyun lance->RAP = CSR24; /* Base Address of Receive Ring */
462*4882a593Smuzhiyun lance->RDP = swloww(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
463*4882a593Smuzhiyun lance->RAP = CSR25; /* Base Address of Receive Ring */
464*4882a593Smuzhiyun lance->RDP = swhighw(ARIADNE_RAM + offsetof(struct lancedata, rx_ring));
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Set the Number of RX and TX Ring Entries */
467*4882a593Smuzhiyun lance->RAP = CSR76; /* Receive Ring Length */
468*4882a593Smuzhiyun lance->RDP = swapw(((u_short)-RX_RING_SIZE));
469*4882a593Smuzhiyun lance->RAP = CSR78; /* Transmit Ring Length */
470*4882a593Smuzhiyun lance->RDP = swapw(((u_short)-TX_RING_SIZE));
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun /* Enable Media Interface Port Auto Select (10BASE-2/10BASE-T) */
473*4882a593Smuzhiyun lance->RAP = ISACSR2; /* Miscellaneous Configuration */
474*4882a593Smuzhiyun lance->IDP = ASEL;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* LED Control */
477*4882a593Smuzhiyun lance->RAP = ISACSR5; /* LED1 Status */
478*4882a593Smuzhiyun lance->IDP = PSE|XMTE;
479*4882a593Smuzhiyun lance->RAP = ISACSR6; /* LED2 Status */
480*4882a593Smuzhiyun lance->IDP = PSE|COLE;
481*4882a593Smuzhiyun lance->RAP = ISACSR7; /* LED3 Status */
482*4882a593Smuzhiyun lance->IDP = PSE|RCVE;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun netif_start_queue(dev);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, IRQF_SHARED,
487*4882a593Smuzhiyun dev->name, dev);
488*4882a593Smuzhiyun if (i)
489*4882a593Smuzhiyun return i;
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
492*4882a593Smuzhiyun lance->RDP = INEA | STRT;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun return 0;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
ariadne_close(struct net_device * dev)497*4882a593Smuzhiyun static int ariadne_close(struct net_device *dev)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun netif_stop_queue(dev);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun lance->RAP = CSR112; /* Missed Frame Count */
504*4882a593Smuzhiyun dev->stats.rx_missed_errors = swapw(lance->RDP);
505*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (ariadne_debug > 1) {
508*4882a593Smuzhiyun netdev_dbg(dev, "Shutting down ethercard, status was %02x\n",
509*4882a593Smuzhiyun lance->RDP);
510*4882a593Smuzhiyun netdev_dbg(dev, "%lu packets missed\n",
511*4882a593Smuzhiyun dev->stats.rx_missed_errors);
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /* We stop the LANCE here -- it occasionally polls memory if we don't */
515*4882a593Smuzhiyun lance->RDP = STOP;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun free_irq(IRQ_AMIGA_PORTS, dev);
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
ariadne_reset(struct net_device * dev)522*4882a593Smuzhiyun static inline void ariadne_reset(struct net_device *dev)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
527*4882a593Smuzhiyun lance->RDP = STOP;
528*4882a593Smuzhiyun ariadne_init_ring(dev);
529*4882a593Smuzhiyun lance->RDP = INEA | STRT;
530*4882a593Smuzhiyun netif_start_queue(dev);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
ariadne_tx_timeout(struct net_device * dev,unsigned int txqueue)533*4882a593Smuzhiyun static void ariadne_tx_timeout(struct net_device *dev, unsigned int txqueue)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun netdev_err(dev, "transmit timed out, status %04x, resetting\n",
538*4882a593Smuzhiyun lance->RDP);
539*4882a593Smuzhiyun ariadne_reset(dev);
540*4882a593Smuzhiyun netif_wake_queue(dev);
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
ariadne_start_xmit(struct sk_buff * skb,struct net_device * dev)543*4882a593Smuzhiyun static netdev_tx_t ariadne_start_xmit(struct sk_buff *skb,
544*4882a593Smuzhiyun struct net_device *dev)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun struct ariadne_private *priv = netdev_priv(dev);
547*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
548*4882a593Smuzhiyun int entry;
549*4882a593Smuzhiyun unsigned long flags;
550*4882a593Smuzhiyun int len = skb->len;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun #if 0
553*4882a593Smuzhiyun if (ariadne_debug > 3) {
554*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
555*4882a593Smuzhiyun netdev_dbg(dev, "%s: csr0 %04x\n", __func__, lance->RDP);
556*4882a593Smuzhiyun lance->RDP = 0x0000;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun #endif
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* FIXME: is the 79C960 new enough to do its own padding right ? */
561*4882a593Smuzhiyun if (skb->len < ETH_ZLEN) {
562*4882a593Smuzhiyun if (skb_padto(skb, ETH_ZLEN))
563*4882a593Smuzhiyun return NETDEV_TX_OK;
564*4882a593Smuzhiyun len = ETH_ZLEN;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun /* Fill in a Tx ring entry */
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun netdev_dbg(dev, "TX pkt type 0x%04x from %pM to %pM data %p len %u\n",
570*4882a593Smuzhiyun ((u_short *)skb->data)[6],
571*4882a593Smuzhiyun skb->data + 6, skb->data,
572*4882a593Smuzhiyun skb->data, skb->len);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun local_irq_save(flags);
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun entry = priv->cur_tx % TX_RING_SIZE;
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun /* Caution: the write order is important here, set the base address with
579*4882a593Smuzhiyun the "ownership" bits last */
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun priv->tx_ring[entry]->TMD2 = swapw((u_short)-skb->len);
582*4882a593Smuzhiyun priv->tx_ring[entry]->TMD3 = 0x0000;
583*4882a593Smuzhiyun memcpyw(priv->tx_buff[entry], (u_short *)skb->data, len);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun #ifdef DEBUG
586*4882a593Smuzhiyun print_hex_dump(KERN_DEBUG, "tx_buff: ", DUMP_PREFIX_OFFSET, 16, 1,
587*4882a593Smuzhiyun (void *)priv->tx_buff[entry],
588*4882a593Smuzhiyun skb->len > 64 ? 64 : skb->len, true);
589*4882a593Smuzhiyun #endif
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun priv->tx_ring[entry]->TMD1 = (priv->tx_ring[entry]->TMD1 & 0xff00)
592*4882a593Smuzhiyun | TF_OWN | TF_STP | TF_ENP;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun dev_kfree_skb(skb);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun priv->cur_tx++;
597*4882a593Smuzhiyun if ((priv->cur_tx >= TX_RING_SIZE) &&
598*4882a593Smuzhiyun (priv->dirty_tx >= TX_RING_SIZE)) {
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun netdev_dbg(dev, "*** Subtracting TX_RING_SIZE from cur_tx (%d) and dirty_tx (%d)\n",
601*4882a593Smuzhiyun priv->cur_tx, priv->dirty_tx);
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun priv->cur_tx -= TX_RING_SIZE;
604*4882a593Smuzhiyun priv->dirty_tx -= TX_RING_SIZE;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun dev->stats.tx_bytes += len;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun /* Trigger an immediate send poll */
609*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
610*4882a593Smuzhiyun lance->RDP = INEA | TDMD;
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun if (lowb(priv->tx_ring[(entry + 1) % TX_RING_SIZE]->TMD1) != 0) {
613*4882a593Smuzhiyun netif_stop_queue(dev);
614*4882a593Smuzhiyun priv->tx_full = 1;
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun local_irq_restore(flags);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun return NETDEV_TX_OK;
619*4882a593Smuzhiyun }
620*4882a593Smuzhiyun
ariadne_get_stats(struct net_device * dev)621*4882a593Smuzhiyun static struct net_device_stats *ariadne_get_stats(struct net_device *dev)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
624*4882a593Smuzhiyun short saved_addr;
625*4882a593Smuzhiyun unsigned long flags;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun local_irq_save(flags);
628*4882a593Smuzhiyun saved_addr = lance->RAP;
629*4882a593Smuzhiyun lance->RAP = CSR112; /* Missed Frame Count */
630*4882a593Smuzhiyun dev->stats.rx_missed_errors = swapw(lance->RDP);
631*4882a593Smuzhiyun lance->RAP = saved_addr;
632*4882a593Smuzhiyun local_irq_restore(flags);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun return &dev->stats;
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /* Set or clear the multicast filter for this adaptor.
638*4882a593Smuzhiyun * num_addrs == -1 Promiscuous mode, receive all packets
639*4882a593Smuzhiyun * num_addrs == 0 Normal mode, clear multicast list
640*4882a593Smuzhiyun * num_addrs > 0 Multicast mode, receive normal and MC packets,
641*4882a593Smuzhiyun * and do best-effort filtering.
642*4882a593Smuzhiyun */
set_multicast_list(struct net_device * dev)643*4882a593Smuzhiyun static void set_multicast_list(struct net_device *dev)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr;
646*4882a593Smuzhiyun
647*4882a593Smuzhiyun if (!netif_running(dev))
648*4882a593Smuzhiyun return;
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun netif_stop_queue(dev);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* We take the simple way out and always enable promiscuous mode */
653*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
654*4882a593Smuzhiyun lance->RDP = STOP; /* Temporarily stop the lance */
655*4882a593Smuzhiyun ariadne_init_ring(dev);
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun if (dev->flags & IFF_PROMISC) {
658*4882a593Smuzhiyun lance->RAP = CSR15; /* Mode Register */
659*4882a593Smuzhiyun lance->RDP = PROM; /* Set promiscuous mode */
660*4882a593Smuzhiyun } else {
661*4882a593Smuzhiyun short multicast_table[4];
662*4882a593Smuzhiyun int num_addrs = netdev_mc_count(dev);
663*4882a593Smuzhiyun int i;
664*4882a593Smuzhiyun /* We don't use the multicast table,
665*4882a593Smuzhiyun * but rely on upper-layer filtering
666*4882a593Smuzhiyun */
667*4882a593Smuzhiyun memset(multicast_table, (num_addrs == 0) ? 0 : -1,
668*4882a593Smuzhiyun sizeof(multicast_table));
669*4882a593Smuzhiyun for (i = 0; i < 4; i++) {
670*4882a593Smuzhiyun lance->RAP = CSR8 + (i << 8);
671*4882a593Smuzhiyun /* Logical Address Filter */
672*4882a593Smuzhiyun lance->RDP = swapw(multicast_table[i]);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun lance->RAP = CSR15; /* Mode Register */
675*4882a593Smuzhiyun lance->RDP = 0x0000; /* Unset promiscuous mode */
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun
678*4882a593Smuzhiyun lance->RAP = CSR0; /* PCnet-ISA Controller Status */
679*4882a593Smuzhiyun lance->RDP = INEA | STRT | IDON;/* Resume normal operation */
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun netif_wake_queue(dev);
682*4882a593Smuzhiyun }
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun
ariadne_remove_one(struct zorro_dev * z)685*4882a593Smuzhiyun static void ariadne_remove_one(struct zorro_dev *z)
686*4882a593Smuzhiyun {
687*4882a593Smuzhiyun struct net_device *dev = zorro_get_drvdata(z);
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun unregister_netdev(dev);
690*4882a593Smuzhiyun release_mem_region(ZTWO_PADDR(dev->base_addr), sizeof(struct Am79C960));
691*4882a593Smuzhiyun release_mem_region(ZTWO_PADDR(dev->mem_start), ARIADNE_RAM_SIZE);
692*4882a593Smuzhiyun free_netdev(dev);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
695*4882a593Smuzhiyun static const struct zorro_device_id ariadne_zorro_tbl[] = {
696*4882a593Smuzhiyun { ZORRO_PROD_VILLAGE_TRONIC_ARIADNE },
697*4882a593Smuzhiyun { 0 }
698*4882a593Smuzhiyun };
699*4882a593Smuzhiyun MODULE_DEVICE_TABLE(zorro, ariadne_zorro_tbl);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun static const struct net_device_ops ariadne_netdev_ops = {
702*4882a593Smuzhiyun .ndo_open = ariadne_open,
703*4882a593Smuzhiyun .ndo_stop = ariadne_close,
704*4882a593Smuzhiyun .ndo_start_xmit = ariadne_start_xmit,
705*4882a593Smuzhiyun .ndo_tx_timeout = ariadne_tx_timeout,
706*4882a593Smuzhiyun .ndo_get_stats = ariadne_get_stats,
707*4882a593Smuzhiyun .ndo_set_rx_mode = set_multicast_list,
708*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
709*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
710*4882a593Smuzhiyun };
711*4882a593Smuzhiyun
ariadne_init_one(struct zorro_dev * z,const struct zorro_device_id * ent)712*4882a593Smuzhiyun static int ariadne_init_one(struct zorro_dev *z,
713*4882a593Smuzhiyun const struct zorro_device_id *ent)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun unsigned long board = z->resource.start;
716*4882a593Smuzhiyun unsigned long base_addr = board + ARIADNE_LANCE;
717*4882a593Smuzhiyun unsigned long mem_start = board + ARIADNE_RAM;
718*4882a593Smuzhiyun struct resource *r1, *r2;
719*4882a593Smuzhiyun struct net_device *dev;
720*4882a593Smuzhiyun u32 serial;
721*4882a593Smuzhiyun int err;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun r1 = request_mem_region(base_addr, sizeof(struct Am79C960), "Am79C960");
724*4882a593Smuzhiyun if (!r1)
725*4882a593Smuzhiyun return -EBUSY;
726*4882a593Smuzhiyun r2 = request_mem_region(mem_start, ARIADNE_RAM_SIZE, "RAM");
727*4882a593Smuzhiyun if (!r2) {
728*4882a593Smuzhiyun release_mem_region(base_addr, sizeof(struct Am79C960));
729*4882a593Smuzhiyun return -EBUSY;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun dev = alloc_etherdev(sizeof(struct ariadne_private));
733*4882a593Smuzhiyun if (dev == NULL) {
734*4882a593Smuzhiyun release_mem_region(base_addr, sizeof(struct Am79C960));
735*4882a593Smuzhiyun release_mem_region(mem_start, ARIADNE_RAM_SIZE);
736*4882a593Smuzhiyun return -ENOMEM;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun r1->name = dev->name;
740*4882a593Smuzhiyun r2->name = dev->name;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun serial = be32_to_cpu(z->rom.er_SerialNumber);
743*4882a593Smuzhiyun dev->dev_addr[0] = 0x00;
744*4882a593Smuzhiyun dev->dev_addr[1] = 0x60;
745*4882a593Smuzhiyun dev->dev_addr[2] = 0x30;
746*4882a593Smuzhiyun dev->dev_addr[3] = (serial >> 16) & 0xff;
747*4882a593Smuzhiyun dev->dev_addr[4] = (serial >> 8) & 0xff;
748*4882a593Smuzhiyun dev->dev_addr[5] = serial & 0xff;
749*4882a593Smuzhiyun dev->base_addr = (unsigned long)ZTWO_VADDR(base_addr);
750*4882a593Smuzhiyun dev->mem_start = (unsigned long)ZTWO_VADDR(mem_start);
751*4882a593Smuzhiyun dev->mem_end = dev->mem_start + ARIADNE_RAM_SIZE;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun dev->netdev_ops = &ariadne_netdev_ops;
754*4882a593Smuzhiyun dev->watchdog_timeo = 5 * HZ;
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun err = register_netdev(dev);
757*4882a593Smuzhiyun if (err) {
758*4882a593Smuzhiyun release_mem_region(base_addr, sizeof(struct Am79C960));
759*4882a593Smuzhiyun release_mem_region(mem_start, ARIADNE_RAM_SIZE);
760*4882a593Smuzhiyun free_netdev(dev);
761*4882a593Smuzhiyun return err;
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun zorro_set_drvdata(z, dev);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun netdev_info(dev, "Ariadne at 0x%08lx, Ethernet Address %pM\n",
766*4882a593Smuzhiyun board, dev->dev_addr);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun return 0;
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun static struct zorro_driver ariadne_driver = {
772*4882a593Smuzhiyun .name = "ariadne",
773*4882a593Smuzhiyun .id_table = ariadne_zorro_tbl,
774*4882a593Smuzhiyun .probe = ariadne_init_one,
775*4882a593Smuzhiyun .remove = ariadne_remove_one,
776*4882a593Smuzhiyun };
777*4882a593Smuzhiyun
ariadne_init_module(void)778*4882a593Smuzhiyun static int __init ariadne_init_module(void)
779*4882a593Smuzhiyun {
780*4882a593Smuzhiyun return zorro_register_driver(&ariadne_driver);
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
ariadne_cleanup_module(void)783*4882a593Smuzhiyun static void __exit ariadne_cleanup_module(void)
784*4882a593Smuzhiyun {
785*4882a593Smuzhiyun zorro_unregister_driver(&ariadne_driver);
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
788*4882a593Smuzhiyun module_init(ariadne_init_module);
789*4882a593Smuzhiyun module_exit(ariadne_cleanup_module);
790*4882a593Smuzhiyun
791*4882a593Smuzhiyun MODULE_LICENSE("GPL");
792