xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/amd/7990.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * 7990.c -- LANCE ethernet IC generic routines.
4*4882a593Smuzhiyun  * This is an attempt to separate out the bits of various ethernet
5*4882a593Smuzhiyun  * drivers that are common because they all use the AMD 7990 LANCE
6*4882a593Smuzhiyun  * (Local Area Network Controller for Ethernet) chip.
7*4882a593Smuzhiyun  *
8*4882a593Smuzhiyun  * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk>
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  * Most of this stuff was obtained by looking at other LANCE drivers,
11*4882a593Smuzhiyun  * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful.
12*4882a593Smuzhiyun  * NB: this was made easy by the fact that Jes Sorensen had cleaned up
13*4882a593Smuzhiyun  * most of a2025 and sunlance with the aim of merging them, so the
14*4882a593Smuzhiyun  * common code was pretty obvious.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun #include <linux/crc32.h>
17*4882a593Smuzhiyun #include <linux/delay.h>
18*4882a593Smuzhiyun #include <linux/errno.h>
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include <linux/etherdevice.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun #include <linux/kernel.h>
23*4882a593Smuzhiyun #include <linux/types.h>
24*4882a593Smuzhiyun #include <linux/fcntl.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/ioport.h>
27*4882a593Smuzhiyun #include <linux/in.h>
28*4882a593Smuzhiyun #include <linux/route.h>
29*4882a593Smuzhiyun #include <linux/string.h>
30*4882a593Smuzhiyun #include <linux/skbuff.h>
31*4882a593Smuzhiyun #include <linux/pgtable.h>
32*4882a593Smuzhiyun #include <asm/irq.h>
33*4882a593Smuzhiyun /* Used for the temporal inet entries and routing */
34*4882a593Smuzhiyun #include <linux/socket.h>
35*4882a593Smuzhiyun #include <linux/bitops.h>
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun #include <asm/io.h>
38*4882a593Smuzhiyun #include <asm/dma.h>
39*4882a593Smuzhiyun #ifdef CONFIG_HP300
40*4882a593Smuzhiyun #include <asm/blinken.h>
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #include "7990.h"
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define WRITERAP(lp, x)	out_be16(lp->base + LANCE_RAP, (x))
46*4882a593Smuzhiyun #define WRITERDP(lp, x)	out_be16(lp->base + LANCE_RDP, (x))
47*4882a593Smuzhiyun #define READRDP(lp)	in_be16(lp->base + LANCE_RDP)
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_HPLANCE)
50*4882a593Smuzhiyun #include "hplance.h"
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #undef WRITERAP
53*4882a593Smuzhiyun #undef WRITERDP
54*4882a593Smuzhiyun #undef READRDP
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_MVME147_NET)
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* Lossage Factor Nine, Mr Sulu. */
59*4882a593Smuzhiyun #define WRITERAP(lp, x)	(lp->writerap(lp, x))
60*4882a593Smuzhiyun #define WRITERDP(lp, x)	(lp->writerdp(lp, x))
61*4882a593Smuzhiyun #define READRDP(lp)	(lp->readrdp(lp))
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun #else
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* These inlines can be used if only CONFIG_HPLANCE is defined */
WRITERAP(struct lance_private * lp,__u16 value)66*4882a593Smuzhiyun static inline void WRITERAP(struct lance_private *lp, __u16 value)
67*4882a593Smuzhiyun {
68*4882a593Smuzhiyun 	do {
69*4882a593Smuzhiyun 		out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value);
70*4882a593Smuzhiyun 	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun 
WRITERDP(struct lance_private * lp,__u16 value)73*4882a593Smuzhiyun static inline void WRITERDP(struct lance_private *lp, __u16 value)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun 	do {
76*4882a593Smuzhiyun 		out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value);
77*4882a593Smuzhiyun 	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
READRDP(struct lance_private * lp)80*4882a593Smuzhiyun static inline __u16 READRDP(struct lance_private *lp)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	__u16 value;
83*4882a593Smuzhiyun 	do {
84*4882a593Smuzhiyun 		value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP);
85*4882a593Smuzhiyun 	} while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0);
86*4882a593Smuzhiyun 	return value;
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #endif
90*4882a593Smuzhiyun #endif /* IS_ENABLED(CONFIG_HPLANCE) */
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /* debugging output macros, various flavours */
93*4882a593Smuzhiyun /* #define TEST_HITS */
94*4882a593Smuzhiyun #ifdef UNDEF
95*4882a593Smuzhiyun #define PRINT_RINGS() \
96*4882a593Smuzhiyun do { \
97*4882a593Smuzhiyun 	int t; \
98*4882a593Smuzhiyun 	for (t = 0; t < RX_RING_SIZE; t++) { \
99*4882a593Smuzhiyun 		printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n", \
100*4882a593Smuzhiyun 		       t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0, \
101*4882a593Smuzhiyun 		       ib->brx_ring[t].length, \
102*4882a593Smuzhiyun 		       ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits); \
103*4882a593Smuzhiyun 	} \
104*4882a593Smuzhiyun 	for (t = 0; t < TX_RING_SIZE; t++) { \
105*4882a593Smuzhiyun 		printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n", \
106*4882a593Smuzhiyun 		       t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0, \
107*4882a593Smuzhiyun 		       ib->btx_ring[t].length, \
108*4882a593Smuzhiyun 		       ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits); \
109*4882a593Smuzhiyun 	} \
110*4882a593Smuzhiyun } while (0)
111*4882a593Smuzhiyun #else
112*4882a593Smuzhiyun #define PRINT_RINGS()
113*4882a593Smuzhiyun #endif
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /* Load the CSR registers. The LANCE has to be STOPped when we do this! */
load_csrs(struct lance_private * lp)116*4882a593Smuzhiyun static void load_csrs(struct lance_private *lp)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun 	volatile struct lance_init_block *aib = lp->lance_init_block;
119*4882a593Smuzhiyun 	int leptr;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	leptr = LANCE_ADDR(aib);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR1);                    /* load address of init block */
124*4882a593Smuzhiyun 	WRITERDP(lp, leptr & 0xFFFF);
125*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR2);
126*4882a593Smuzhiyun 	WRITERDP(lp, leptr >> 16);
127*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR3);
128*4882a593Smuzhiyun 	WRITERDP(lp, lp->busmaster_regval);       /* set byteswap/ALEctrl/byte ctrl */
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	/* Point back to csr0 */
131*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* #define to 0 or 1 appropriately */
135*4882a593Smuzhiyun #define DEBUG_IRING 0
136*4882a593Smuzhiyun /* Set up the Lance Rx and Tx rings and the init block */
lance_init_ring(struct net_device * dev)137*4882a593Smuzhiyun static void lance_init_ring(struct net_device *dev)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
140*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
141*4882a593Smuzhiyun 	volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */
142*4882a593Smuzhiyun 	int leptr;
143*4882a593Smuzhiyun 	int i;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	aib = lp->lance_init_block;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	lp->rx_new = lp->tx_new = 0;
148*4882a593Smuzhiyun 	lp->rx_old = lp->tx_old = 0;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	ib->mode = LE_MO_PROM;                             /* normal, enable Tx & Rx */
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	/* Copy the ethernet address to the lance init block
153*4882a593Smuzhiyun 	 * Notice that we do a byteswap if we're big endian.
154*4882a593Smuzhiyun 	 * [I think this is the right criterion; at least, sunlance,
155*4882a593Smuzhiyun 	 * a2065 and atarilance do the byteswap and lance.c (PC) doesn't.
156*4882a593Smuzhiyun 	 * However, the datasheet says that the BSWAP bit doesn't affect
157*4882a593Smuzhiyun 	 * the init block, so surely it should be low byte first for
158*4882a593Smuzhiyun 	 * everybody? Um.]
159*4882a593Smuzhiyun 	 * We could define the ib->physaddr as three 16bit values and
160*4882a593Smuzhiyun 	 * use (addr[1] << 8) | addr[0] & co, but this is more efficient.
161*4882a593Smuzhiyun 	 */
162*4882a593Smuzhiyun #ifdef __BIG_ENDIAN
163*4882a593Smuzhiyun 	ib->phys_addr[0] = dev->dev_addr[1];
164*4882a593Smuzhiyun 	ib->phys_addr[1] = dev->dev_addr[0];
165*4882a593Smuzhiyun 	ib->phys_addr[2] = dev->dev_addr[3];
166*4882a593Smuzhiyun 	ib->phys_addr[3] = dev->dev_addr[2];
167*4882a593Smuzhiyun 	ib->phys_addr[4] = dev->dev_addr[5];
168*4882a593Smuzhiyun 	ib->phys_addr[5] = dev->dev_addr[4];
169*4882a593Smuzhiyun #else
170*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
171*4882a593Smuzhiyun 	       ib->phys_addr[i] = dev->dev_addr[i];
172*4882a593Smuzhiyun #endif
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	if (DEBUG_IRING)
175*4882a593Smuzhiyun 		printk("TX rings:\n");
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	lp->tx_full = 0;
178*4882a593Smuzhiyun 	/* Setup the Tx ring entries */
179*4882a593Smuzhiyun 	for (i = 0; i < (1 << lp->lance_log_tx_bufs); i++) {
180*4882a593Smuzhiyun 		leptr = LANCE_ADDR(&aib->tx_buf[i][0]);
181*4882a593Smuzhiyun 		ib->btx_ring[i].tmd0      = leptr;
182*4882a593Smuzhiyun 		ib->btx_ring[i].tmd1_hadr = leptr >> 16;
183*4882a593Smuzhiyun 		ib->btx_ring[i].tmd1_bits = 0;
184*4882a593Smuzhiyun 		ib->btx_ring[i].length    = 0xf000; /* The ones required by tmd2 */
185*4882a593Smuzhiyun 		ib->btx_ring[i].misc      = 0;
186*4882a593Smuzhiyun 		if (DEBUG_IRING)
187*4882a593Smuzhiyun 			printk("%d: 0x%8.8x\n", i, leptr);
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* Setup the Rx ring entries */
191*4882a593Smuzhiyun 	if (DEBUG_IRING)
192*4882a593Smuzhiyun 		printk("RX rings:\n");
193*4882a593Smuzhiyun 	for (i = 0; i < (1 << lp->lance_log_rx_bufs); i++) {
194*4882a593Smuzhiyun 		leptr = LANCE_ADDR(&aib->rx_buf[i][0]);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 		ib->brx_ring[i].rmd0      = leptr;
197*4882a593Smuzhiyun 		ib->brx_ring[i].rmd1_hadr = leptr >> 16;
198*4882a593Smuzhiyun 		ib->brx_ring[i].rmd1_bits = LE_R1_OWN;
199*4882a593Smuzhiyun 		/* 0xf000 == bits that must be one (reserved, presumably) */
200*4882a593Smuzhiyun 		ib->brx_ring[i].length    = -RX_BUFF_SIZE | 0xf000;
201*4882a593Smuzhiyun 		ib->brx_ring[i].mblength  = 0;
202*4882a593Smuzhiyun 		if (DEBUG_IRING)
203*4882a593Smuzhiyun 			printk("%d: 0x%8.8x\n", i, leptr);
204*4882a593Smuzhiyun 	}
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* Setup the initialization block */
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun 	/* Setup rx descriptor pointer */
209*4882a593Smuzhiyun 	leptr = LANCE_ADDR(&aib->brx_ring);
210*4882a593Smuzhiyun 	ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16);
211*4882a593Smuzhiyun 	ib->rx_ptr = leptr;
212*4882a593Smuzhiyun 	if (DEBUG_IRING)
213*4882a593Smuzhiyun 		printk("RX ptr: %8.8x\n", leptr);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* Setup tx descriptor pointer */
216*4882a593Smuzhiyun 	leptr = LANCE_ADDR(&aib->btx_ring);
217*4882a593Smuzhiyun 	ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16);
218*4882a593Smuzhiyun 	ib->tx_ptr = leptr;
219*4882a593Smuzhiyun 	if (DEBUG_IRING)
220*4882a593Smuzhiyun 		printk("TX ptr: %8.8x\n", leptr);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* Clear the multicast filter */
223*4882a593Smuzhiyun 	ib->filter[0] = 0;
224*4882a593Smuzhiyun 	ib->filter[1] = 0;
225*4882a593Smuzhiyun 	PRINT_RINGS();
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun /* LANCE must be STOPped before we do this, too... */
init_restart_lance(struct lance_private * lp)229*4882a593Smuzhiyun static int init_restart_lance(struct lance_private *lp)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun 	int i;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
234*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_INIT);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* Need a hook here for sunlance ledma stuff */
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* Wait for the lance to complete initialization */
239*4882a593Smuzhiyun 	for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++)
240*4882a593Smuzhiyun 		barrier();
241*4882a593Smuzhiyun 	if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) {
242*4882a593Smuzhiyun 		printk("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp));
243*4882a593Smuzhiyun 		return -1;
244*4882a593Smuzhiyun 	}
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* Clear IDON by writing a "1", enable interrupts and start lance */
247*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_IDON);
248*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_INEA | LE_C0_STRT);
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	return 0;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
lance_reset(struct net_device * dev)253*4882a593Smuzhiyun static int lance_reset(struct net_device *dev)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
256*4882a593Smuzhiyun 	int status;
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	/* Stop the lance */
259*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
260*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_STOP);
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	load_csrs(lp);
263*4882a593Smuzhiyun 	lance_init_ring(dev);
264*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
265*4882a593Smuzhiyun 	status = init_restart_lance(lp);
266*4882a593Smuzhiyun #ifdef DEBUG_DRIVER
267*4882a593Smuzhiyun 	printk("Lance restart=%d\n", status);
268*4882a593Smuzhiyun #endif
269*4882a593Smuzhiyun 	return status;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun 
lance_rx(struct net_device * dev)272*4882a593Smuzhiyun static int lance_rx(struct net_device *dev)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
275*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
276*4882a593Smuzhiyun 	volatile struct lance_rx_desc *rd;
277*4882a593Smuzhiyun 	unsigned char bits;
278*4882a593Smuzhiyun #ifdef TEST_HITS
279*4882a593Smuzhiyun 	int i;
280*4882a593Smuzhiyun #endif
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun #ifdef TEST_HITS
283*4882a593Smuzhiyun 	printk("[");
284*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
285*4882a593Smuzhiyun 		if (i == lp->rx_new)
286*4882a593Smuzhiyun 			printk("%s",
287*4882a593Smuzhiyun 			       ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "_" : "X");
288*4882a593Smuzhiyun 		else
289*4882a593Smuzhiyun 			printk("%s",
290*4882a593Smuzhiyun 			      ib->brx_ring[i].rmd1_bits & LE_R1_OWN ? "." : "1");
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 	printk("]");
293*4882a593Smuzhiyun #endif
294*4882a593Smuzhiyun #ifdef CONFIG_HP300
295*4882a593Smuzhiyun 	blinken_leds(0x40, 0);
296*4882a593Smuzhiyun #endif
297*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_RINT | LE_C0_INEA);     /* ack Rx int, reenable ints */
298*4882a593Smuzhiyun 	for (rd = &ib->brx_ring[lp->rx_new];     /* For each Rx ring we own... */
299*4882a593Smuzhiyun 	     !((bits = rd->rmd1_bits) & LE_R1_OWN);
300*4882a593Smuzhiyun 	     rd = &ib->brx_ring[lp->rx_new]) {
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 		/* We got an incomplete frame? */
303*4882a593Smuzhiyun 		if ((bits & LE_R1_POK) != LE_R1_POK) {
304*4882a593Smuzhiyun 			dev->stats.rx_over_errors++;
305*4882a593Smuzhiyun 			dev->stats.rx_errors++;
306*4882a593Smuzhiyun 			continue;
307*4882a593Smuzhiyun 		} else if (bits & LE_R1_ERR) {
308*4882a593Smuzhiyun 			/* Count only the end frame as a rx error,
309*4882a593Smuzhiyun 			 * not the beginning
310*4882a593Smuzhiyun 			 */
311*4882a593Smuzhiyun 			if (bits & LE_R1_BUF)
312*4882a593Smuzhiyun 				dev->stats.rx_fifo_errors++;
313*4882a593Smuzhiyun 			if (bits & LE_R1_CRC)
314*4882a593Smuzhiyun 				dev->stats.rx_crc_errors++;
315*4882a593Smuzhiyun 			if (bits & LE_R1_OFL)
316*4882a593Smuzhiyun 				dev->stats.rx_over_errors++;
317*4882a593Smuzhiyun 			if (bits & LE_R1_FRA)
318*4882a593Smuzhiyun 				dev->stats.rx_frame_errors++;
319*4882a593Smuzhiyun 			if (bits & LE_R1_EOP)
320*4882a593Smuzhiyun 				dev->stats.rx_errors++;
321*4882a593Smuzhiyun 		} else {
322*4882a593Smuzhiyun 			int len = (rd->mblength & 0xfff) - 4;
323*4882a593Smuzhiyun 			struct sk_buff *skb = netdev_alloc_skb(dev, len + 2);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 			if (!skb) {
326*4882a593Smuzhiyun 				dev->stats.rx_dropped++;
327*4882a593Smuzhiyun 				rd->mblength = 0;
328*4882a593Smuzhiyun 				rd->rmd1_bits = LE_R1_OWN;
329*4882a593Smuzhiyun 				lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
330*4882a593Smuzhiyun 				return 0;
331*4882a593Smuzhiyun 			}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 			skb_reserve(skb, 2);           /* 16 byte align */
334*4882a593Smuzhiyun 			skb_put(skb, len);             /* make room */
335*4882a593Smuzhiyun 			skb_copy_to_linear_data(skb,
336*4882a593Smuzhiyun 					 (unsigned char *)&(ib->rx_buf[lp->rx_new][0]),
337*4882a593Smuzhiyun 					 len);
338*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
339*4882a593Smuzhiyun 			netif_rx(skb);
340*4882a593Smuzhiyun 			dev->stats.rx_packets++;
341*4882a593Smuzhiyun 			dev->stats.rx_bytes += len;
342*4882a593Smuzhiyun 		}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 		/* Return the packet to the pool */
345*4882a593Smuzhiyun 		rd->mblength = 0;
346*4882a593Smuzhiyun 		rd->rmd1_bits = LE_R1_OWN;
347*4882a593Smuzhiyun 		lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask;
348*4882a593Smuzhiyun 	}
349*4882a593Smuzhiyun 	return 0;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun 
lance_tx(struct net_device * dev)352*4882a593Smuzhiyun static int lance_tx(struct net_device *dev)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
355*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
356*4882a593Smuzhiyun 	volatile struct lance_tx_desc *td;
357*4882a593Smuzhiyun 	int i, j;
358*4882a593Smuzhiyun 	int status;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun #ifdef CONFIG_HP300
361*4882a593Smuzhiyun 	blinken_leds(0x80, 0);
362*4882a593Smuzhiyun #endif
363*4882a593Smuzhiyun 	/* csr0 is 2f3 */
364*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
365*4882a593Smuzhiyun 	/* csr0 is 73 */
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	j = lp->tx_old;
368*4882a593Smuzhiyun 	for (i = j; i != lp->tx_new; i = j) {
369*4882a593Smuzhiyun 		td = &ib->btx_ring[i];
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 		/* If we hit a packet not owned by us, stop */
372*4882a593Smuzhiyun 		if (td->tmd1_bits & LE_T1_OWN)
373*4882a593Smuzhiyun 			break;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 		if (td->tmd1_bits & LE_T1_ERR) {
376*4882a593Smuzhiyun 			status = td->misc;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 			dev->stats.tx_errors++;
379*4882a593Smuzhiyun 			if (status & LE_T3_RTY)
380*4882a593Smuzhiyun 				dev->stats.tx_aborted_errors++;
381*4882a593Smuzhiyun 			if (status & LE_T3_LCOL)
382*4882a593Smuzhiyun 				dev->stats.tx_window_errors++;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 			if (status & LE_T3_CLOS) {
385*4882a593Smuzhiyun 				dev->stats.tx_carrier_errors++;
386*4882a593Smuzhiyun 				if (lp->auto_select) {
387*4882a593Smuzhiyun 					lp->tpe = 1 - lp->tpe;
388*4882a593Smuzhiyun 					printk("%s: Carrier Lost, trying %s\n",
389*4882a593Smuzhiyun 					       dev->name,
390*4882a593Smuzhiyun 					       lp->tpe ? "TPE" : "AUI");
391*4882a593Smuzhiyun 					/* Stop the lance */
392*4882a593Smuzhiyun 					WRITERAP(lp, LE_CSR0);
393*4882a593Smuzhiyun 					WRITERDP(lp, LE_C0_STOP);
394*4882a593Smuzhiyun 					lance_init_ring(dev);
395*4882a593Smuzhiyun 					load_csrs(lp);
396*4882a593Smuzhiyun 					init_restart_lance(lp);
397*4882a593Smuzhiyun 					return 0;
398*4882a593Smuzhiyun 				}
399*4882a593Smuzhiyun 			}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 			/* buffer errors and underflows turn off the transmitter */
402*4882a593Smuzhiyun 			/* Restart the adapter */
403*4882a593Smuzhiyun 			if (status & (LE_T3_BUF|LE_T3_UFL)) {
404*4882a593Smuzhiyun 				dev->stats.tx_fifo_errors++;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 				printk("%s: Tx: ERR_BUF|ERR_UFL, restarting\n",
407*4882a593Smuzhiyun 				       dev->name);
408*4882a593Smuzhiyun 				/* Stop the lance */
409*4882a593Smuzhiyun 				WRITERAP(lp, LE_CSR0);
410*4882a593Smuzhiyun 				WRITERDP(lp, LE_C0_STOP);
411*4882a593Smuzhiyun 				lance_init_ring(dev);
412*4882a593Smuzhiyun 				load_csrs(lp);
413*4882a593Smuzhiyun 				init_restart_lance(lp);
414*4882a593Smuzhiyun 				return 0;
415*4882a593Smuzhiyun 			}
416*4882a593Smuzhiyun 		} else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) {
417*4882a593Smuzhiyun 			/*
418*4882a593Smuzhiyun 			 * So we don't count the packet more than once.
419*4882a593Smuzhiyun 			 */
420*4882a593Smuzhiyun 			td->tmd1_bits &= ~(LE_T1_POK);
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 			/* One collision before packet was sent. */
423*4882a593Smuzhiyun 			if (td->tmd1_bits & LE_T1_EONE)
424*4882a593Smuzhiyun 				dev->stats.collisions++;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 			/* More than one collision, be optimistic. */
427*4882a593Smuzhiyun 			if (td->tmd1_bits & LE_T1_EMORE)
428*4882a593Smuzhiyun 				dev->stats.collisions += 2;
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 			dev->stats.tx_packets++;
431*4882a593Smuzhiyun 		}
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 		j = (j + 1) & lp->tx_ring_mod_mask;
434*4882a593Smuzhiyun 	}
435*4882a593Smuzhiyun 	lp->tx_old = j;
436*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_TINT | LE_C0_INEA);
437*4882a593Smuzhiyun 	return 0;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun static irqreturn_t
lance_interrupt(int irq,void * dev_id)441*4882a593Smuzhiyun lance_interrupt(int irq, void *dev_id)
442*4882a593Smuzhiyun {
443*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *)dev_id;
444*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
445*4882a593Smuzhiyun 	int csr0;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	spin_lock(&lp->devlock);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);              /* LANCE Controller Status */
450*4882a593Smuzhiyun 	csr0 = READRDP(lp);
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	PRINT_RINGS();
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (!(csr0 & LE_C0_INTR)) {     /* Check if any interrupt has */
455*4882a593Smuzhiyun 		spin_unlock(&lp->devlock);
456*4882a593Smuzhiyun 		return IRQ_NONE;        /* been generated by the Lance. */
457*4882a593Smuzhiyun 	}
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	/* Acknowledge all the interrupt sources ASAP */
460*4882a593Smuzhiyun 	WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT));
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 	if ((csr0 & LE_C0_ERR)) {
463*4882a593Smuzhiyun 		/* Clear the error condition */
464*4882a593Smuzhiyun 		WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (csr0 & LE_C0_RINT)
468*4882a593Smuzhiyun 		lance_rx(dev);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	if (csr0 & LE_C0_TINT)
471*4882a593Smuzhiyun 		lance_tx(dev);
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	/* Log misc errors. */
474*4882a593Smuzhiyun 	if (csr0 & LE_C0_BABL)
475*4882a593Smuzhiyun 		dev->stats.tx_errors++;       /* Tx babble. */
476*4882a593Smuzhiyun 	if (csr0 & LE_C0_MISS)
477*4882a593Smuzhiyun 		dev->stats.rx_errors++;       /* Missed a Rx frame. */
478*4882a593Smuzhiyun 	if (csr0 & LE_C0_MERR) {
479*4882a593Smuzhiyun 		printk("%s: Bus master arbitration failure, status %4.4x.\n",
480*4882a593Smuzhiyun 		       dev->name, csr0);
481*4882a593Smuzhiyun 		/* Restart the chip. */
482*4882a593Smuzhiyun 		WRITERDP(lp, LE_C0_STRT);
483*4882a593Smuzhiyun 	}
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) {
486*4882a593Smuzhiyun 		lp->tx_full = 0;
487*4882a593Smuzhiyun 		netif_wake_queue(dev);
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
491*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	spin_unlock(&lp->devlock);
494*4882a593Smuzhiyun 	return IRQ_HANDLED;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun 
lance_open(struct net_device * dev)497*4882a593Smuzhiyun int lance_open(struct net_device *dev)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
500*4882a593Smuzhiyun 	int res;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
503*4882a593Smuzhiyun 	if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev))
504*4882a593Smuzhiyun 		return -EAGAIN;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	res = lance_reset(dev);
507*4882a593Smuzhiyun 	spin_lock_init(&lp->devlock);
508*4882a593Smuzhiyun 	netif_start_queue(dev);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	return res;
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lance_open);
513*4882a593Smuzhiyun 
lance_close(struct net_device * dev)514*4882a593Smuzhiyun int lance_close(struct net_device *dev)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	netif_stop_queue(dev);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* Stop the LANCE */
521*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
522*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_STOP);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	free_irq(lp->irq, dev);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lance_close);
529*4882a593Smuzhiyun 
lance_tx_timeout(struct net_device * dev,unsigned int txqueue)530*4882a593Smuzhiyun void lance_tx_timeout(struct net_device *dev, unsigned int txqueue)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun 	printk("lance_tx_timeout\n");
533*4882a593Smuzhiyun 	lance_reset(dev);
534*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
535*4882a593Smuzhiyun 	netif_wake_queue(dev);
536*4882a593Smuzhiyun }
537*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lance_tx_timeout);
538*4882a593Smuzhiyun 
lance_start_xmit(struct sk_buff * skb,struct net_device * dev)539*4882a593Smuzhiyun netdev_tx_t lance_start_xmit(struct sk_buff *skb, struct net_device *dev)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
542*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
543*4882a593Smuzhiyun 	int entry, skblen, len;
544*4882a593Smuzhiyun 	static int outs;
545*4882a593Smuzhiyun 	unsigned long flags;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	netif_stop_queue(dev);
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	if (!TX_BUFFS_AVAIL) {
550*4882a593Smuzhiyun 		dev_consume_skb_any(skb);
551*4882a593Smuzhiyun 		return NETDEV_TX_OK;
552*4882a593Smuzhiyun 	}
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	skblen = skb->len;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun #ifdef DEBUG_DRIVER
557*4882a593Smuzhiyun 	/* dump the packet */
558*4882a593Smuzhiyun 	{
559*4882a593Smuzhiyun 		int i;
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 		for (i = 0; i < 64; i++) {
562*4882a593Smuzhiyun 			if ((i % 16) == 0)
563*4882a593Smuzhiyun 				printk("\n");
564*4882a593Smuzhiyun 			printk("%2.2x ", skb->data[i]);
565*4882a593Smuzhiyun 		}
566*4882a593Smuzhiyun 	}
567*4882a593Smuzhiyun #endif
568*4882a593Smuzhiyun 	len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen;
569*4882a593Smuzhiyun 	entry = lp->tx_new & lp->tx_ring_mod_mask;
570*4882a593Smuzhiyun 	ib->btx_ring[entry].length = (-len) | 0xf000;
571*4882a593Smuzhiyun 	ib->btx_ring[entry].misc = 0;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	if (skb->len < ETH_ZLEN)
574*4882a593Smuzhiyun 		memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN);
575*4882a593Smuzhiyun 	skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	/* Now, give the packet to the lance */
578*4882a593Smuzhiyun 	ib->btx_ring[entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN);
579*4882a593Smuzhiyun 	lp->tx_new = (lp->tx_new + 1) & lp->tx_ring_mod_mask;
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	outs++;
582*4882a593Smuzhiyun 	/* Kick the lance: transmit now */
583*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD);
584*4882a593Smuzhiyun 	dev_consume_skb_any(skb);
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	spin_lock_irqsave(&lp->devlock, flags);
587*4882a593Smuzhiyun 	if (TX_BUFFS_AVAIL)
588*4882a593Smuzhiyun 		netif_start_queue(dev);
589*4882a593Smuzhiyun 	else
590*4882a593Smuzhiyun 		lp->tx_full = 1;
591*4882a593Smuzhiyun 	spin_unlock_irqrestore(&lp->devlock, flags);
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	return NETDEV_TX_OK;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lance_start_xmit);
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun /* taken from the depca driver via a2065.c */
lance_load_multicast(struct net_device * dev)598*4882a593Smuzhiyun static void lance_load_multicast(struct net_device *dev)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
601*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
602*4882a593Smuzhiyun 	volatile u16 *mcast_table = (u16 *)&ib->filter;
603*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
604*4882a593Smuzhiyun 	u32 crc;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	/* set all multicast bits */
607*4882a593Smuzhiyun 	if (dev->flags & IFF_ALLMULTI) {
608*4882a593Smuzhiyun 		ib->filter[0] = 0xffffffff;
609*4882a593Smuzhiyun 		ib->filter[1] = 0xffffffff;
610*4882a593Smuzhiyun 		return;
611*4882a593Smuzhiyun 	}
612*4882a593Smuzhiyun 	/* clear the multicast filter */
613*4882a593Smuzhiyun 	ib->filter[0] = 0;
614*4882a593Smuzhiyun 	ib->filter[1] = 0;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* Add addresses */
617*4882a593Smuzhiyun 	netdev_for_each_mc_addr(ha, dev) {
618*4882a593Smuzhiyun 		crc = ether_crc_le(6, ha->addr);
619*4882a593Smuzhiyun 		crc = crc >> 26;
620*4882a593Smuzhiyun 		mcast_table[crc >> 4] |= 1 << (crc & 0xf);
621*4882a593Smuzhiyun 	}
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 
lance_set_multicast(struct net_device * dev)625*4882a593Smuzhiyun void lance_set_multicast(struct net_device *dev)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
628*4882a593Smuzhiyun 	volatile struct lance_init_block *ib = lp->init_block;
629*4882a593Smuzhiyun 	int stopped;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 	stopped = netif_queue_stopped(dev);
632*4882a593Smuzhiyun 	if (!stopped)
633*4882a593Smuzhiyun 		netif_stop_queue(dev);
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	while (lp->tx_old != lp->tx_new)
636*4882a593Smuzhiyun 		schedule();
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
639*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_STOP);
640*4882a593Smuzhiyun 	lance_init_ring(dev);
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) {
643*4882a593Smuzhiyun 		ib->mode |= LE_MO_PROM;
644*4882a593Smuzhiyun 	} else {
645*4882a593Smuzhiyun 		ib->mode &= ~LE_MO_PROM;
646*4882a593Smuzhiyun 		lance_load_multicast(dev);
647*4882a593Smuzhiyun 	}
648*4882a593Smuzhiyun 	load_csrs(lp);
649*4882a593Smuzhiyun 	init_restart_lance(lp);
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	if (!stopped)
652*4882a593Smuzhiyun 		netif_start_queue(dev);
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lance_set_multicast);
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
lance_poll(struct net_device * dev)657*4882a593Smuzhiyun void lance_poll(struct net_device *dev)
658*4882a593Smuzhiyun {
659*4882a593Smuzhiyun 	struct lance_private *lp = netdev_priv(dev);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	spin_lock(&lp->devlock);
662*4882a593Smuzhiyun 	WRITERAP(lp, LE_CSR0);
663*4882a593Smuzhiyun 	WRITERDP(lp, LE_C0_STRT);
664*4882a593Smuzhiyun 	spin_unlock(&lp->devlock);
665*4882a593Smuzhiyun 	lance_interrupt(dev->irq, dev);
666*4882a593Smuzhiyun }
667*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(lance_poll);
668*4882a593Smuzhiyun #endif
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun MODULE_LICENSE("GPL");
671