xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/seeq/sgiseeq.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * sgiseeq.c: Seeq8003 ethernet driver for SGI machines.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #undef DEBUG
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/module.h>
13*4882a593Smuzhiyun #include <linux/slab.h>
14*4882a593Smuzhiyun #include <linux/errno.h>
15*4882a593Smuzhiyun #include <linux/types.h>
16*4882a593Smuzhiyun #include <linux/interrupt.h>
17*4882a593Smuzhiyun #include <linux/string.h>
18*4882a593Smuzhiyun #include <linux/delay.h>
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/etherdevice.h>
22*4882a593Smuzhiyun #include <linux/skbuff.h>
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun #include <asm/sgi/hpc3.h>
25*4882a593Smuzhiyun #include <asm/sgi/ip22.h>
26*4882a593Smuzhiyun #include <asm/sgi/seeq.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include "sgiseeq.h"
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static char *sgiseeqstr = "SGI Seeq8003";
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * If you want speed, you do something silly, it always has worked for me.  So,
34*4882a593Smuzhiyun  * with that in mind, I've decided to make this driver look completely like a
35*4882a593Smuzhiyun  * stupid Lance from a driver architecture perspective.  Only difference is that
36*4882a593Smuzhiyun  * here our "ring buffer" looks and acts like a real Lance one does but is
37*4882a593Smuzhiyun  * laid out like how the HPC DMA and the Seeq want it to.  You'd be surprised
38*4882a593Smuzhiyun  * how a stupid idea like this can pay off in performance, not to mention
39*4882a593Smuzhiyun  * making this driver 2,000 times easier to write. ;-)
40*4882a593Smuzhiyun  */
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* Tune these if we tend to run out often etc. */
43*4882a593Smuzhiyun #define SEEQ_RX_BUFFERS  16
44*4882a593Smuzhiyun #define SEEQ_TX_BUFFERS  16
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun #define PKT_BUF_SZ       1584
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define NEXT_RX(i)  (((i) + 1) & (SEEQ_RX_BUFFERS - 1))
49*4882a593Smuzhiyun #define NEXT_TX(i)  (((i) + 1) & (SEEQ_TX_BUFFERS - 1))
50*4882a593Smuzhiyun #define PREV_RX(i)  (((i) - 1) & (SEEQ_RX_BUFFERS - 1))
51*4882a593Smuzhiyun #define PREV_TX(i)  (((i) - 1) & (SEEQ_TX_BUFFERS - 1))
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun #define TX_BUFFS_AVAIL(sp) ((sp->tx_old <= sp->tx_new) ? \
54*4882a593Smuzhiyun 			    sp->tx_old + (SEEQ_TX_BUFFERS - 1) - sp->tx_new : \
55*4882a593Smuzhiyun 			    sp->tx_old - sp->tx_new - 1)
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun #define VIRT_TO_DMA(sp, v) ((sp)->srings_dma +                                 \
58*4882a593Smuzhiyun 				  (dma_addr_t)((unsigned long)(v) -            \
59*4882a593Smuzhiyun 					       (unsigned long)((sp)->rx_desc)))
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* Copy frames shorter than rx_copybreak, otherwise pass on up in
62*4882a593Smuzhiyun  * a full sized sk_buff.  Value of 100 stolen from tulip.c (!alpha).
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun static int rx_copybreak = 100;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define PAD_SIZE    (128 - sizeof(struct hpc_dma_desc) - sizeof(void *))
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun struct sgiseeq_rx_desc {
69*4882a593Smuzhiyun 	volatile struct hpc_dma_desc rdma;
70*4882a593Smuzhiyun 	u8 padding[PAD_SIZE];
71*4882a593Smuzhiyun 	struct sk_buff *skb;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun struct sgiseeq_tx_desc {
75*4882a593Smuzhiyun 	volatile struct hpc_dma_desc tdma;
76*4882a593Smuzhiyun 	u8 padding[PAD_SIZE];
77*4882a593Smuzhiyun 	struct sk_buff *skb;
78*4882a593Smuzhiyun };
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /*
81*4882a593Smuzhiyun  * Warning: This structure is laid out in a certain way because HPC dma
82*4882a593Smuzhiyun  *          descriptors must be 8-byte aligned.  So don't touch this without
83*4882a593Smuzhiyun  *          some care.
84*4882a593Smuzhiyun  */
85*4882a593Smuzhiyun struct sgiseeq_init_block { /* Note the name ;-) */
86*4882a593Smuzhiyun 	struct sgiseeq_rx_desc rxvector[SEEQ_RX_BUFFERS];
87*4882a593Smuzhiyun 	struct sgiseeq_tx_desc txvector[SEEQ_TX_BUFFERS];
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun struct sgiseeq_private {
91*4882a593Smuzhiyun 	struct sgiseeq_init_block *srings;
92*4882a593Smuzhiyun 	dma_addr_t srings_dma;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	/* Ptrs to the descriptors in uncached space. */
95*4882a593Smuzhiyun 	struct sgiseeq_rx_desc *rx_desc;
96*4882a593Smuzhiyun 	struct sgiseeq_tx_desc *tx_desc;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	char *name;
99*4882a593Smuzhiyun 	struct hpc3_ethregs *hregs;
100*4882a593Smuzhiyun 	struct sgiseeq_regs *sregs;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	/* Ring entry counters. */
103*4882a593Smuzhiyun 	unsigned int rx_new, tx_new;
104*4882a593Smuzhiyun 	unsigned int rx_old, tx_old;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	int is_edlc;
107*4882a593Smuzhiyun 	unsigned char control;
108*4882a593Smuzhiyun 	unsigned char mode;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	spinlock_t tx_lock;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
dma_sync_desc_cpu(struct net_device * dev,void * addr)113*4882a593Smuzhiyun static inline void dma_sync_desc_cpu(struct net_device *dev, void *addr)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	dma_sync_single_for_cpu(dev->dev.parent, VIRT_TO_DMA(sp, addr),
118*4882a593Smuzhiyun 			sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
dma_sync_desc_dev(struct net_device * dev,void * addr)121*4882a593Smuzhiyun static inline void dma_sync_desc_dev(struct net_device *dev, void *addr)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	dma_sync_single_for_device(dev->dev.parent, VIRT_TO_DMA(sp, addr),
126*4882a593Smuzhiyun 			sizeof(struct sgiseeq_rx_desc), DMA_BIDIRECTIONAL);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
hpc3_eth_reset(struct hpc3_ethregs * hregs)129*4882a593Smuzhiyun static inline void hpc3_eth_reset(struct hpc3_ethregs *hregs)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	hregs->reset = HPC3_ERST_CRESET | HPC3_ERST_CLRIRQ;
132*4882a593Smuzhiyun 	udelay(20);
133*4882a593Smuzhiyun 	hregs->reset = 0;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
reset_hpc3_and_seeq(struct hpc3_ethregs * hregs,struct sgiseeq_regs * sregs)136*4882a593Smuzhiyun static inline void reset_hpc3_and_seeq(struct hpc3_ethregs *hregs,
137*4882a593Smuzhiyun 				       struct sgiseeq_regs *sregs)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	hregs->rx_ctrl = hregs->tx_ctrl = 0;
140*4882a593Smuzhiyun 	hpc3_eth_reset(hregs);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun #define RSTAT_GO_BITS (SEEQ_RCMD_IGOOD | SEEQ_RCMD_IEOF | SEEQ_RCMD_ISHORT | \
144*4882a593Smuzhiyun 		       SEEQ_RCMD_IDRIB | SEEQ_RCMD_ICRC)
145*4882a593Smuzhiyun 
seeq_go(struct sgiseeq_private * sp,struct hpc3_ethregs * hregs,struct sgiseeq_regs * sregs)146*4882a593Smuzhiyun static inline void seeq_go(struct sgiseeq_private *sp,
147*4882a593Smuzhiyun 			   struct hpc3_ethregs *hregs,
148*4882a593Smuzhiyun 			   struct sgiseeq_regs *sregs)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	sregs->rstat = sp->mode | RSTAT_GO_BITS;
151*4882a593Smuzhiyun 	hregs->rx_ctrl = HPC3_ERXCTRL_ACTIVE;
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun 
__sgiseeq_set_mac_address(struct net_device * dev)154*4882a593Smuzhiyun static inline void __sgiseeq_set_mac_address(struct net_device *dev)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
157*4882a593Smuzhiyun 	struct sgiseeq_regs *sregs = sp->sregs;
158*4882a593Smuzhiyun 	int i;
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun 	sregs->tstat = SEEQ_TCMD_RB0;
161*4882a593Smuzhiyun 	for (i = 0; i < 6; i++)
162*4882a593Smuzhiyun 		sregs->rw.eth_addr[i] = dev->dev_addr[i];
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
sgiseeq_set_mac_address(struct net_device * dev,void * addr)165*4882a593Smuzhiyun static int sgiseeq_set_mac_address(struct net_device *dev, void *addr)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
168*4882a593Smuzhiyun 	struct sockaddr *sa = addr;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	memcpy(dev->dev_addr, sa->sa_data, dev->addr_len);
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	spin_lock_irq(&sp->tx_lock);
173*4882a593Smuzhiyun 	__sgiseeq_set_mac_address(dev);
174*4882a593Smuzhiyun 	spin_unlock_irq(&sp->tx_lock);
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	return 0;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun #define TCNTINFO_INIT (HPCDMA_EOX | HPCDMA_ETXD)
180*4882a593Smuzhiyun #define RCNTCFG_INIT  (HPCDMA_OWN | HPCDMA_EORP | HPCDMA_XIE)
181*4882a593Smuzhiyun #define RCNTINFO_INIT (RCNTCFG_INIT | (PKT_BUF_SZ & HPCDMA_BCNT))
182*4882a593Smuzhiyun 
seeq_init_ring(struct net_device * dev)183*4882a593Smuzhiyun static int seeq_init_ring(struct net_device *dev)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
186*4882a593Smuzhiyun 	int i;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	netif_stop_queue(dev);
189*4882a593Smuzhiyun 	sp->rx_new = sp->tx_new = 0;
190*4882a593Smuzhiyun 	sp->rx_old = sp->tx_old = 0;
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	__sgiseeq_set_mac_address(dev);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	/* Setup tx ring. */
195*4882a593Smuzhiyun 	for(i = 0; i < SEEQ_TX_BUFFERS; i++) {
196*4882a593Smuzhiyun 		sp->tx_desc[i].tdma.cntinfo = TCNTINFO_INIT;
197*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, &sp->tx_desc[i]);
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	/* And now the rx ring. */
201*4882a593Smuzhiyun 	for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
202*4882a593Smuzhiyun 		if (!sp->rx_desc[i].skb) {
203*4882a593Smuzhiyun 			dma_addr_t dma_addr;
204*4882a593Smuzhiyun 			struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 			if (skb == NULL)
207*4882a593Smuzhiyun 				return -ENOMEM;
208*4882a593Smuzhiyun 			skb_reserve(skb, 2);
209*4882a593Smuzhiyun 			dma_addr = dma_map_single(dev->dev.parent,
210*4882a593Smuzhiyun 						  skb->data - 2,
211*4882a593Smuzhiyun 						  PKT_BUF_SZ, DMA_FROM_DEVICE);
212*4882a593Smuzhiyun 			sp->rx_desc[i].skb = skb;
213*4882a593Smuzhiyun 			sp->rx_desc[i].rdma.pbuf = dma_addr;
214*4882a593Smuzhiyun 		}
215*4882a593Smuzhiyun 		sp->rx_desc[i].rdma.cntinfo = RCNTINFO_INIT;
216*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, &sp->rx_desc[i]);
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 	sp->rx_desc[i - 1].rdma.cntinfo |= HPCDMA_EOR;
219*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, &sp->rx_desc[i - 1]);
220*4882a593Smuzhiyun 	return 0;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
seeq_purge_ring(struct net_device * dev)223*4882a593Smuzhiyun static void seeq_purge_ring(struct net_device *dev)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
226*4882a593Smuzhiyun 	int i;
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* clear tx ring. */
229*4882a593Smuzhiyun 	for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
230*4882a593Smuzhiyun 		if (sp->tx_desc[i].skb) {
231*4882a593Smuzhiyun 			dev_kfree_skb(sp->tx_desc[i].skb);
232*4882a593Smuzhiyun 			sp->tx_desc[i].skb = NULL;
233*4882a593Smuzhiyun 		}
234*4882a593Smuzhiyun 	}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/* And now the rx ring. */
237*4882a593Smuzhiyun 	for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
238*4882a593Smuzhiyun 		if (sp->rx_desc[i].skb) {
239*4882a593Smuzhiyun 			dev_kfree_skb(sp->rx_desc[i].skb);
240*4882a593Smuzhiyun 			sp->rx_desc[i].skb = NULL;
241*4882a593Smuzhiyun 		}
242*4882a593Smuzhiyun 	}
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun #ifdef DEBUG
246*4882a593Smuzhiyun static struct sgiseeq_private *gpriv;
247*4882a593Smuzhiyun static struct net_device *gdev;
248*4882a593Smuzhiyun 
sgiseeq_dump_rings(void)249*4882a593Smuzhiyun static void sgiseeq_dump_rings(void)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun 	static int once;
252*4882a593Smuzhiyun 	struct sgiseeq_rx_desc *r = gpriv->rx_desc;
253*4882a593Smuzhiyun 	struct sgiseeq_tx_desc *t = gpriv->tx_desc;
254*4882a593Smuzhiyun 	struct hpc3_ethregs *hregs = gpriv->hregs;
255*4882a593Smuzhiyun 	int i;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	if (once)
258*4882a593Smuzhiyun 		return;
259*4882a593Smuzhiyun 	once++;
260*4882a593Smuzhiyun 	printk("RING DUMP:\n");
261*4882a593Smuzhiyun 	for (i = 0; i < SEEQ_RX_BUFFERS; i++) {
262*4882a593Smuzhiyun 		printk("RX [%d]: @(%p) [%08x,%08x,%08x] ",
263*4882a593Smuzhiyun 		       i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
264*4882a593Smuzhiyun 		       r[i].rdma.pnext);
265*4882a593Smuzhiyun 		i += 1;
266*4882a593Smuzhiyun 		printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
267*4882a593Smuzhiyun 		       i, (&r[i]), r[i].rdma.pbuf, r[i].rdma.cntinfo,
268*4882a593Smuzhiyun 		       r[i].rdma.pnext);
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 	for (i = 0; i < SEEQ_TX_BUFFERS; i++) {
271*4882a593Smuzhiyun 		printk("TX [%d]: @(%p) [%08x,%08x,%08x] ",
272*4882a593Smuzhiyun 		       i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
273*4882a593Smuzhiyun 		       t[i].tdma.pnext);
274*4882a593Smuzhiyun 		i += 1;
275*4882a593Smuzhiyun 		printk("-- [%d]: @(%p) [%08x,%08x,%08x]\n",
276*4882a593Smuzhiyun 		       i, (&t[i]), t[i].tdma.pbuf, t[i].tdma.cntinfo,
277*4882a593Smuzhiyun 		       t[i].tdma.pnext);
278*4882a593Smuzhiyun 	}
279*4882a593Smuzhiyun 	printk("INFO: [rx_new = %d rx_old=%d] [tx_new = %d tx_old = %d]\n",
280*4882a593Smuzhiyun 	       gpriv->rx_new, gpriv->rx_old, gpriv->tx_new, gpriv->tx_old);
281*4882a593Smuzhiyun 	printk("RREGS: rx_cbptr[%08x] rx_ndptr[%08x] rx_ctrl[%08x]\n",
282*4882a593Smuzhiyun 	       hregs->rx_cbptr, hregs->rx_ndptr, hregs->rx_ctrl);
283*4882a593Smuzhiyun 	printk("TREGS: tx_cbptr[%08x] tx_ndptr[%08x] tx_ctrl[%08x]\n",
284*4882a593Smuzhiyun 	       hregs->tx_cbptr, hregs->tx_ndptr, hregs->tx_ctrl);
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun #endif
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun #define TSTAT_INIT_SEEQ (SEEQ_TCMD_IPT|SEEQ_TCMD_I16|SEEQ_TCMD_IC|SEEQ_TCMD_IUF)
289*4882a593Smuzhiyun #define TSTAT_INIT_EDLC ((TSTAT_INIT_SEEQ) | SEEQ_TCMD_RB2)
290*4882a593Smuzhiyun 
init_seeq(struct net_device * dev,struct sgiseeq_private * sp,struct sgiseeq_regs * sregs)291*4882a593Smuzhiyun static int init_seeq(struct net_device *dev, struct sgiseeq_private *sp,
292*4882a593Smuzhiyun 		     struct sgiseeq_regs *sregs)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun 	struct hpc3_ethregs *hregs = sp->hregs;
295*4882a593Smuzhiyun 	int err;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	reset_hpc3_and_seeq(hregs, sregs);
298*4882a593Smuzhiyun 	err = seeq_init_ring(dev);
299*4882a593Smuzhiyun 	if (err)
300*4882a593Smuzhiyun 		return err;
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	/* Setup to field the proper interrupt types. */
303*4882a593Smuzhiyun 	if (sp->is_edlc) {
304*4882a593Smuzhiyun 		sregs->tstat = TSTAT_INIT_EDLC;
305*4882a593Smuzhiyun 		sregs->rw.wregs.control = sp->control;
306*4882a593Smuzhiyun 		sregs->rw.wregs.frame_gap = 0;
307*4882a593Smuzhiyun 	} else {
308*4882a593Smuzhiyun 		sregs->tstat = TSTAT_INIT_SEEQ;
309*4882a593Smuzhiyun 	}
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc);
312*4882a593Smuzhiyun 	hregs->tx_ndptr = VIRT_TO_DMA(sp, sp->tx_desc);
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	seeq_go(sp, hregs, sregs);
315*4882a593Smuzhiyun 	return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
record_rx_errors(struct net_device * dev,unsigned char status)318*4882a593Smuzhiyun static void record_rx_errors(struct net_device *dev, unsigned char status)
319*4882a593Smuzhiyun {
320*4882a593Smuzhiyun 	if (status & SEEQ_RSTAT_OVERF ||
321*4882a593Smuzhiyun 	    status & SEEQ_RSTAT_SFRAME)
322*4882a593Smuzhiyun 		dev->stats.rx_over_errors++;
323*4882a593Smuzhiyun 	if (status & SEEQ_RSTAT_CERROR)
324*4882a593Smuzhiyun 		dev->stats.rx_crc_errors++;
325*4882a593Smuzhiyun 	if (status & SEEQ_RSTAT_DERROR)
326*4882a593Smuzhiyun 		dev->stats.rx_frame_errors++;
327*4882a593Smuzhiyun 	if (status & SEEQ_RSTAT_REOF)
328*4882a593Smuzhiyun 		dev->stats.rx_errors++;
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
rx_maybe_restart(struct sgiseeq_private * sp,struct hpc3_ethregs * hregs,struct sgiseeq_regs * sregs)331*4882a593Smuzhiyun static inline void rx_maybe_restart(struct sgiseeq_private *sp,
332*4882a593Smuzhiyun 				    struct hpc3_ethregs *hregs,
333*4882a593Smuzhiyun 				    struct sgiseeq_regs *sregs)
334*4882a593Smuzhiyun {
335*4882a593Smuzhiyun 	if (!(hregs->rx_ctrl & HPC3_ERXCTRL_ACTIVE)) {
336*4882a593Smuzhiyun 		hregs->rx_ndptr = VIRT_TO_DMA(sp, sp->rx_desc + sp->rx_new);
337*4882a593Smuzhiyun 		seeq_go(sp, hregs, sregs);
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun 
sgiseeq_rx(struct net_device * dev,struct sgiseeq_private * sp,struct hpc3_ethregs * hregs,struct sgiseeq_regs * sregs)341*4882a593Smuzhiyun static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp,
342*4882a593Smuzhiyun 			      struct hpc3_ethregs *hregs,
343*4882a593Smuzhiyun 			      struct sgiseeq_regs *sregs)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun 	struct sgiseeq_rx_desc *rd;
346*4882a593Smuzhiyun 	struct sk_buff *skb = NULL;
347*4882a593Smuzhiyun 	struct sk_buff *newskb;
348*4882a593Smuzhiyun 	unsigned char pkt_status;
349*4882a593Smuzhiyun 	int len = 0;
350*4882a593Smuzhiyun 	unsigned int orig_end = PREV_RX(sp->rx_new);
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	/* Service every received packet. */
353*4882a593Smuzhiyun 	rd = &sp->rx_desc[sp->rx_new];
354*4882a593Smuzhiyun 	dma_sync_desc_cpu(dev, rd);
355*4882a593Smuzhiyun 	while (!(rd->rdma.cntinfo & HPCDMA_OWN)) {
356*4882a593Smuzhiyun 		len = PKT_BUF_SZ - (rd->rdma.cntinfo & HPCDMA_BCNT) - 3;
357*4882a593Smuzhiyun 		dma_unmap_single(dev->dev.parent, rd->rdma.pbuf,
358*4882a593Smuzhiyun 				 PKT_BUF_SZ, DMA_FROM_DEVICE);
359*4882a593Smuzhiyun 		pkt_status = rd->skb->data[len];
360*4882a593Smuzhiyun 		if (pkt_status & SEEQ_RSTAT_FIG) {
361*4882a593Smuzhiyun 			/* Packet is OK. */
362*4882a593Smuzhiyun 			/* We don't want to receive our own packets */
363*4882a593Smuzhiyun 			if (!ether_addr_equal(rd->skb->data + 6, dev->dev_addr)) {
364*4882a593Smuzhiyun 				if (len > rx_copybreak) {
365*4882a593Smuzhiyun 					skb = rd->skb;
366*4882a593Smuzhiyun 					newskb = netdev_alloc_skb(dev, PKT_BUF_SZ);
367*4882a593Smuzhiyun 					if (!newskb) {
368*4882a593Smuzhiyun 						newskb = skb;
369*4882a593Smuzhiyun 						skb = NULL;
370*4882a593Smuzhiyun 						goto memory_squeeze;
371*4882a593Smuzhiyun 					}
372*4882a593Smuzhiyun 					skb_reserve(newskb, 2);
373*4882a593Smuzhiyun 				} else {
374*4882a593Smuzhiyun 					skb = netdev_alloc_skb_ip_align(dev, len);
375*4882a593Smuzhiyun 					if (skb)
376*4882a593Smuzhiyun 						skb_copy_to_linear_data(skb, rd->skb->data, len);
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 					newskb = rd->skb;
379*4882a593Smuzhiyun 				}
380*4882a593Smuzhiyun memory_squeeze:
381*4882a593Smuzhiyun 				if (skb) {
382*4882a593Smuzhiyun 					skb_put(skb, len);
383*4882a593Smuzhiyun 					skb->protocol = eth_type_trans(skb, dev);
384*4882a593Smuzhiyun 					netif_rx(skb);
385*4882a593Smuzhiyun 					dev->stats.rx_packets++;
386*4882a593Smuzhiyun 					dev->stats.rx_bytes += len;
387*4882a593Smuzhiyun 				} else {
388*4882a593Smuzhiyun 					dev->stats.rx_dropped++;
389*4882a593Smuzhiyun 				}
390*4882a593Smuzhiyun 			} else {
391*4882a593Smuzhiyun 				/* Silently drop my own packets */
392*4882a593Smuzhiyun 				newskb = rd->skb;
393*4882a593Smuzhiyun 			}
394*4882a593Smuzhiyun 		} else {
395*4882a593Smuzhiyun 			record_rx_errors(dev, pkt_status);
396*4882a593Smuzhiyun 			newskb = rd->skb;
397*4882a593Smuzhiyun 		}
398*4882a593Smuzhiyun 		rd->skb = newskb;
399*4882a593Smuzhiyun 		rd->rdma.pbuf = dma_map_single(dev->dev.parent,
400*4882a593Smuzhiyun 					       newskb->data - 2,
401*4882a593Smuzhiyun 					       PKT_BUF_SZ, DMA_FROM_DEVICE);
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 		/* Return the entry to the ring pool. */
404*4882a593Smuzhiyun 		rd->rdma.cntinfo = RCNTINFO_INIT;
405*4882a593Smuzhiyun 		sp->rx_new = NEXT_RX(sp->rx_new);
406*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, rd);
407*4882a593Smuzhiyun 		rd = &sp->rx_desc[sp->rx_new];
408*4882a593Smuzhiyun 		dma_sync_desc_cpu(dev, rd);
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, rd);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	dma_sync_desc_cpu(dev, &sp->rx_desc[orig_end]);
413*4882a593Smuzhiyun 	sp->rx_desc[orig_end].rdma.cntinfo &= ~(HPCDMA_EOR);
414*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, &sp->rx_desc[orig_end]);
415*4882a593Smuzhiyun 	dma_sync_desc_cpu(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
416*4882a593Smuzhiyun 	sp->rx_desc[PREV_RX(sp->rx_new)].rdma.cntinfo |= HPCDMA_EOR;
417*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, &sp->rx_desc[PREV_RX(sp->rx_new)]);
418*4882a593Smuzhiyun 	rx_maybe_restart(sp, hregs, sregs);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
tx_maybe_reset_collisions(struct sgiseeq_private * sp,struct sgiseeq_regs * sregs)421*4882a593Smuzhiyun static inline void tx_maybe_reset_collisions(struct sgiseeq_private *sp,
422*4882a593Smuzhiyun 					     struct sgiseeq_regs *sregs)
423*4882a593Smuzhiyun {
424*4882a593Smuzhiyun 	if (sp->is_edlc) {
425*4882a593Smuzhiyun 		sregs->rw.wregs.control = sp->control & ~(SEEQ_CTRL_XCNT);
426*4882a593Smuzhiyun 		sregs->rw.wregs.control = sp->control;
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
kick_tx(struct net_device * dev,struct sgiseeq_private * sp,struct hpc3_ethregs * hregs)430*4882a593Smuzhiyun static inline void kick_tx(struct net_device *dev,
431*4882a593Smuzhiyun 			   struct sgiseeq_private *sp,
432*4882a593Smuzhiyun 			   struct hpc3_ethregs *hregs)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct sgiseeq_tx_desc *td;
435*4882a593Smuzhiyun 	int i = sp->tx_old;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* If the HPC aint doin nothin, and there are more packets
438*4882a593Smuzhiyun 	 * with ETXD cleared and XIU set we must make very certain
439*4882a593Smuzhiyun 	 * that we restart the HPC else we risk locking up the
440*4882a593Smuzhiyun 	 * adapter.  The following code is only safe iff the HPCDMA
441*4882a593Smuzhiyun 	 * is not active!
442*4882a593Smuzhiyun 	 */
443*4882a593Smuzhiyun 	td = &sp->tx_desc[i];
444*4882a593Smuzhiyun 	dma_sync_desc_cpu(dev, td);
445*4882a593Smuzhiyun 	while ((td->tdma.cntinfo & (HPCDMA_XIU | HPCDMA_ETXD)) ==
446*4882a593Smuzhiyun 	      (HPCDMA_XIU | HPCDMA_ETXD)) {
447*4882a593Smuzhiyun 		i = NEXT_TX(i);
448*4882a593Smuzhiyun 		td = &sp->tx_desc[i];
449*4882a593Smuzhiyun 		dma_sync_desc_cpu(dev, td);
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun 	if (td->tdma.cntinfo & HPCDMA_XIU) {
452*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, td);
453*4882a593Smuzhiyun 		hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
454*4882a593Smuzhiyun 		hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
455*4882a593Smuzhiyun 	}
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
sgiseeq_tx(struct net_device * dev,struct sgiseeq_private * sp,struct hpc3_ethregs * hregs,struct sgiseeq_regs * sregs)458*4882a593Smuzhiyun static inline void sgiseeq_tx(struct net_device *dev, struct sgiseeq_private *sp,
459*4882a593Smuzhiyun 			      struct hpc3_ethregs *hregs,
460*4882a593Smuzhiyun 			      struct sgiseeq_regs *sregs)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun 	struct sgiseeq_tx_desc *td;
463*4882a593Smuzhiyun 	unsigned long status = hregs->tx_ctrl;
464*4882a593Smuzhiyun 	int j;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	tx_maybe_reset_collisions(sp, sregs);
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (!(status & (HPC3_ETXCTRL_ACTIVE | SEEQ_TSTAT_PTRANS))) {
469*4882a593Smuzhiyun 		/* Oops, HPC detected some sort of error. */
470*4882a593Smuzhiyun 		if (status & SEEQ_TSTAT_R16)
471*4882a593Smuzhiyun 			dev->stats.tx_aborted_errors++;
472*4882a593Smuzhiyun 		if (status & SEEQ_TSTAT_UFLOW)
473*4882a593Smuzhiyun 			dev->stats.tx_fifo_errors++;
474*4882a593Smuzhiyun 		if (status & SEEQ_TSTAT_LCLS)
475*4882a593Smuzhiyun 			dev->stats.collisions++;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	/* Ack 'em... */
479*4882a593Smuzhiyun 	for (j = sp->tx_old; j != sp->tx_new; j = NEXT_TX(j)) {
480*4882a593Smuzhiyun 		td = &sp->tx_desc[j];
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 		dma_sync_desc_cpu(dev, td);
483*4882a593Smuzhiyun 		if (!(td->tdma.cntinfo & (HPCDMA_XIU)))
484*4882a593Smuzhiyun 			break;
485*4882a593Smuzhiyun 		if (!(td->tdma.cntinfo & (HPCDMA_ETXD))) {
486*4882a593Smuzhiyun 			dma_sync_desc_dev(dev, td);
487*4882a593Smuzhiyun 			if (!(status & HPC3_ETXCTRL_ACTIVE)) {
488*4882a593Smuzhiyun 				hregs->tx_ndptr = VIRT_TO_DMA(sp, td);
489*4882a593Smuzhiyun 				hregs->tx_ctrl = HPC3_ETXCTRL_ACTIVE;
490*4882a593Smuzhiyun 			}
491*4882a593Smuzhiyun 			break;
492*4882a593Smuzhiyun 		}
493*4882a593Smuzhiyun 		dev->stats.tx_packets++;
494*4882a593Smuzhiyun 		sp->tx_old = NEXT_TX(sp->tx_old);
495*4882a593Smuzhiyun 		td->tdma.cntinfo &= ~(HPCDMA_XIU | HPCDMA_XIE);
496*4882a593Smuzhiyun 		td->tdma.cntinfo |= HPCDMA_EOX;
497*4882a593Smuzhiyun 		if (td->skb) {
498*4882a593Smuzhiyun 			dev_kfree_skb_any(td->skb);
499*4882a593Smuzhiyun 			td->skb = NULL;
500*4882a593Smuzhiyun 		}
501*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, td);
502*4882a593Smuzhiyun 	}
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun 
sgiseeq_interrupt(int irq,void * dev_id)505*4882a593Smuzhiyun static irqreturn_t sgiseeq_interrupt(int irq, void *dev_id)
506*4882a593Smuzhiyun {
507*4882a593Smuzhiyun 	struct net_device *dev = (struct net_device *) dev_id;
508*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
509*4882a593Smuzhiyun 	struct hpc3_ethregs *hregs = sp->hregs;
510*4882a593Smuzhiyun 	struct sgiseeq_regs *sregs = sp->sregs;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	spin_lock(&sp->tx_lock);
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Ack the IRQ and set software state. */
515*4882a593Smuzhiyun 	hregs->reset = HPC3_ERST_CLRIRQ;
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	/* Always check for received packets. */
518*4882a593Smuzhiyun 	sgiseeq_rx(dev, sp, hregs, sregs);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* Only check for tx acks if we have something queued. */
521*4882a593Smuzhiyun 	if (sp->tx_old != sp->tx_new)
522*4882a593Smuzhiyun 		sgiseeq_tx(dev, sp, hregs, sregs);
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	if ((TX_BUFFS_AVAIL(sp) > 0) && netif_queue_stopped(dev)) {
525*4882a593Smuzhiyun 		netif_wake_queue(dev);
526*4882a593Smuzhiyun 	}
527*4882a593Smuzhiyun 	spin_unlock(&sp->tx_lock);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	return IRQ_HANDLED;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
sgiseeq_open(struct net_device * dev)532*4882a593Smuzhiyun static int sgiseeq_open(struct net_device *dev)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
535*4882a593Smuzhiyun 	struct sgiseeq_regs *sregs = sp->sregs;
536*4882a593Smuzhiyun 	unsigned int irq = dev->irq;
537*4882a593Smuzhiyun 	int err;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
540*4882a593Smuzhiyun 		printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
541*4882a593Smuzhiyun 		return -EAGAIN;
542*4882a593Smuzhiyun 	}
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun 	err = init_seeq(dev, sp, sregs);
545*4882a593Smuzhiyun 	if (err)
546*4882a593Smuzhiyun 		goto out_free_irq;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	netif_start_queue(dev);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	return 0;
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun out_free_irq:
553*4882a593Smuzhiyun 	free_irq(irq, dev);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	return err;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
sgiseeq_close(struct net_device * dev)558*4882a593Smuzhiyun static int sgiseeq_close(struct net_device *dev)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
561*4882a593Smuzhiyun 	struct sgiseeq_regs *sregs = sp->sregs;
562*4882a593Smuzhiyun 	unsigned int irq = dev->irq;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	netif_stop_queue(dev);
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	/* Shutdown the Seeq. */
567*4882a593Smuzhiyun 	reset_hpc3_and_seeq(sp->hregs, sregs);
568*4882a593Smuzhiyun 	free_irq(irq, dev);
569*4882a593Smuzhiyun 	seeq_purge_ring(dev);
570*4882a593Smuzhiyun 
571*4882a593Smuzhiyun 	return 0;
572*4882a593Smuzhiyun }
573*4882a593Smuzhiyun 
sgiseeq_reset(struct net_device * dev)574*4882a593Smuzhiyun static inline int sgiseeq_reset(struct net_device *dev)
575*4882a593Smuzhiyun {
576*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
577*4882a593Smuzhiyun 	struct sgiseeq_regs *sregs = sp->sregs;
578*4882a593Smuzhiyun 	int err;
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	err = init_seeq(dev, sp, sregs);
581*4882a593Smuzhiyun 	if (err)
582*4882a593Smuzhiyun 		return err;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
585*4882a593Smuzhiyun 	netif_wake_queue(dev);
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	return 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun static netdev_tx_t
sgiseeq_start_xmit(struct sk_buff * skb,struct net_device * dev)591*4882a593Smuzhiyun sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
594*4882a593Smuzhiyun 	struct hpc3_ethregs *hregs = sp->hregs;
595*4882a593Smuzhiyun 	unsigned long flags;
596*4882a593Smuzhiyun 	struct sgiseeq_tx_desc *td;
597*4882a593Smuzhiyun 	int len, entry;
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	spin_lock_irqsave(&sp->tx_lock, flags);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	/* Setup... */
602*4882a593Smuzhiyun 	len = skb->len;
603*4882a593Smuzhiyun 	if (len < ETH_ZLEN) {
604*4882a593Smuzhiyun 		if (skb_padto(skb, ETH_ZLEN)) {
605*4882a593Smuzhiyun 			spin_unlock_irqrestore(&sp->tx_lock, flags);
606*4882a593Smuzhiyun 			return NETDEV_TX_OK;
607*4882a593Smuzhiyun 		}
608*4882a593Smuzhiyun 		len = ETH_ZLEN;
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	dev->stats.tx_bytes += len;
612*4882a593Smuzhiyun 	entry = sp->tx_new;
613*4882a593Smuzhiyun 	td = &sp->tx_desc[entry];
614*4882a593Smuzhiyun 	dma_sync_desc_cpu(dev, td);
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	/* Create entry.  There are so many races with adding a new
617*4882a593Smuzhiyun 	 * descriptor to the chain:
618*4882a593Smuzhiyun 	 * 1) Assume that the HPC is off processing a DMA chain while
619*4882a593Smuzhiyun 	 *    we are changing all of the following.
620*4882a593Smuzhiyun 	 * 2) Do no allow the HPC to look at a new descriptor until
621*4882a593Smuzhiyun 	 *    we have completely set up it's state.  This means, do
622*4882a593Smuzhiyun 	 *    not clear HPCDMA_EOX in the current last descritptor
623*4882a593Smuzhiyun 	 *    until the one we are adding looks consistent and could
624*4882a593Smuzhiyun 	 *    be processes right now.
625*4882a593Smuzhiyun 	 * 3) The tx interrupt code must notice when we've added a new
626*4882a593Smuzhiyun 	 *    entry and the HPC got to the end of the chain before we
627*4882a593Smuzhiyun 	 *    added this new entry and restarted it.
628*4882a593Smuzhiyun 	 */
629*4882a593Smuzhiyun 	td->skb = skb;
630*4882a593Smuzhiyun 	td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
631*4882a593Smuzhiyun 				       len, DMA_TO_DEVICE);
632*4882a593Smuzhiyun 	td->tdma.cntinfo = (len & HPCDMA_BCNT) |
633*4882a593Smuzhiyun 	                   HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
634*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, td);
635*4882a593Smuzhiyun 	if (sp->tx_old != sp->tx_new) {
636*4882a593Smuzhiyun 		struct sgiseeq_tx_desc *backend;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
639*4882a593Smuzhiyun 		dma_sync_desc_cpu(dev, backend);
640*4882a593Smuzhiyun 		backend->tdma.cntinfo &= ~HPCDMA_EOX;
641*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, backend);
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun 	sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 	/* Maybe kick the HPC back into motion. */
646*4882a593Smuzhiyun 	if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
647*4882a593Smuzhiyun 		kick_tx(dev, sp, hregs);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	if (!TX_BUFFS_AVAIL(sp))
650*4882a593Smuzhiyun 		netif_stop_queue(dev);
651*4882a593Smuzhiyun 	spin_unlock_irqrestore(&sp->tx_lock, flags);
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	return NETDEV_TX_OK;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun 
timeout(struct net_device * dev,unsigned int txqueue)656*4882a593Smuzhiyun static void timeout(struct net_device *dev, unsigned int txqueue)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun 	printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
659*4882a593Smuzhiyun 	sgiseeq_reset(dev);
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
662*4882a593Smuzhiyun 	netif_wake_queue(dev);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun 
sgiseeq_set_multicast(struct net_device * dev)665*4882a593Smuzhiyun static void sgiseeq_set_multicast(struct net_device *dev)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
668*4882a593Smuzhiyun 	unsigned char oldmode = sp->mode;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	if(dev->flags & IFF_PROMISC)
671*4882a593Smuzhiyun 		sp->mode = SEEQ_RCMD_RANY;
672*4882a593Smuzhiyun 	else if ((dev->flags & IFF_ALLMULTI) || !netdev_mc_empty(dev))
673*4882a593Smuzhiyun 		sp->mode = SEEQ_RCMD_RBMCAST;
674*4882a593Smuzhiyun 	else
675*4882a593Smuzhiyun 		sp->mode = SEEQ_RCMD_RBCAST;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 	/* XXX I know this sucks, but is there a better way to reprogram
678*4882a593Smuzhiyun 	 * XXX the receiver? At least, this shouldn't happen too often.
679*4882a593Smuzhiyun 	 */
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 	if (oldmode != sp->mode)
682*4882a593Smuzhiyun 		sgiseeq_reset(dev);
683*4882a593Smuzhiyun }
684*4882a593Smuzhiyun 
setup_tx_ring(struct net_device * dev,struct sgiseeq_tx_desc * buf,int nbufs)685*4882a593Smuzhiyun static inline void setup_tx_ring(struct net_device *dev,
686*4882a593Smuzhiyun 				 struct sgiseeq_tx_desc *buf,
687*4882a593Smuzhiyun 				 int nbufs)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
690*4882a593Smuzhiyun 	int i = 0;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	while (i < (nbufs - 1)) {
693*4882a593Smuzhiyun 		buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
694*4882a593Smuzhiyun 		buf[i].tdma.pbuf = 0;
695*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, &buf[i]);
696*4882a593Smuzhiyun 		i++;
697*4882a593Smuzhiyun 	}
698*4882a593Smuzhiyun 	buf[i].tdma.pnext = VIRT_TO_DMA(sp, buf);
699*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, &buf[i]);
700*4882a593Smuzhiyun }
701*4882a593Smuzhiyun 
setup_rx_ring(struct net_device * dev,struct sgiseeq_rx_desc * buf,int nbufs)702*4882a593Smuzhiyun static inline void setup_rx_ring(struct net_device *dev,
703*4882a593Smuzhiyun 				 struct sgiseeq_rx_desc *buf,
704*4882a593Smuzhiyun 				 int nbufs)
705*4882a593Smuzhiyun {
706*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
707*4882a593Smuzhiyun 	int i = 0;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	while (i < (nbufs - 1)) {
710*4882a593Smuzhiyun 		buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf + i + 1);
711*4882a593Smuzhiyun 		buf[i].rdma.pbuf = 0;
712*4882a593Smuzhiyun 		dma_sync_desc_dev(dev, &buf[i]);
713*4882a593Smuzhiyun 		i++;
714*4882a593Smuzhiyun 	}
715*4882a593Smuzhiyun 	buf[i].rdma.pbuf = 0;
716*4882a593Smuzhiyun 	buf[i].rdma.pnext = VIRT_TO_DMA(sp, buf);
717*4882a593Smuzhiyun 	dma_sync_desc_dev(dev, &buf[i]);
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun static const struct net_device_ops sgiseeq_netdev_ops = {
721*4882a593Smuzhiyun 	.ndo_open		= sgiseeq_open,
722*4882a593Smuzhiyun 	.ndo_stop		= sgiseeq_close,
723*4882a593Smuzhiyun 	.ndo_start_xmit		= sgiseeq_start_xmit,
724*4882a593Smuzhiyun 	.ndo_tx_timeout		= timeout,
725*4882a593Smuzhiyun 	.ndo_set_rx_mode	= sgiseeq_set_multicast,
726*4882a593Smuzhiyun 	.ndo_set_mac_address	= sgiseeq_set_mac_address,
727*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
728*4882a593Smuzhiyun };
729*4882a593Smuzhiyun 
sgiseeq_probe(struct platform_device * pdev)730*4882a593Smuzhiyun static int sgiseeq_probe(struct platform_device *pdev)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
733*4882a593Smuzhiyun 	struct hpc3_regs *hpcregs = pd->hpc;
734*4882a593Smuzhiyun 	struct sgiseeq_init_block *sr;
735*4882a593Smuzhiyun 	unsigned int irq = pd->irq;
736*4882a593Smuzhiyun 	struct sgiseeq_private *sp;
737*4882a593Smuzhiyun 	struct net_device *dev;
738*4882a593Smuzhiyun 	int err;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof (struct sgiseeq_private));
741*4882a593Smuzhiyun 	if (!dev) {
742*4882a593Smuzhiyun 		err = -ENOMEM;
743*4882a593Smuzhiyun 		goto err_out;
744*4882a593Smuzhiyun 	}
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	platform_set_drvdata(pdev, dev);
747*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
748*4882a593Smuzhiyun 	sp = netdev_priv(dev);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 	/* Make private data page aligned */
751*4882a593Smuzhiyun 	sr = dma_alloc_noncoherent(&pdev->dev, sizeof(*sp->srings),
752*4882a593Smuzhiyun 			&sp->srings_dma, DMA_BIDIRECTIONAL, GFP_KERNEL);
753*4882a593Smuzhiyun 	if (!sr) {
754*4882a593Smuzhiyun 		printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
755*4882a593Smuzhiyun 		err = -ENOMEM;
756*4882a593Smuzhiyun 		goto err_out_free_dev;
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 	sp->srings = sr;
759*4882a593Smuzhiyun 	sp->rx_desc = sp->srings->rxvector;
760*4882a593Smuzhiyun 	sp->tx_desc = sp->srings->txvector;
761*4882a593Smuzhiyun 	spin_lock_init(&sp->tx_lock);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	/* A couple calculations now, saves many cycles later. */
764*4882a593Smuzhiyun 	setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
765*4882a593Smuzhiyun 	setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	memcpy(dev->dev_addr, pd->mac, ETH_ALEN);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun #ifdef DEBUG
770*4882a593Smuzhiyun 	gpriv = sp;
771*4882a593Smuzhiyun 	gdev = dev;
772*4882a593Smuzhiyun #endif
773*4882a593Smuzhiyun 	sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
774*4882a593Smuzhiyun 	sp->hregs = &hpcregs->ethregs;
775*4882a593Smuzhiyun 	sp->name = sgiseeqstr;
776*4882a593Smuzhiyun 	sp->mode = SEEQ_RCMD_RBCAST;
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	/* Setup PIO and DMA transfer timing */
779*4882a593Smuzhiyun 	sp->hregs->pconfig = 0x161;
780*4882a593Smuzhiyun 	sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
781*4882a593Smuzhiyun 			     HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	/* Setup PIO and DMA transfer timing */
784*4882a593Smuzhiyun 	sp->hregs->pconfig = 0x161;
785*4882a593Smuzhiyun 	sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
786*4882a593Smuzhiyun 			     HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;
787*4882a593Smuzhiyun 
788*4882a593Smuzhiyun 	/* Reset the chip. */
789*4882a593Smuzhiyun 	hpc3_eth_reset(sp->hregs);
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
792*4882a593Smuzhiyun 	if (sp->is_edlc)
793*4882a593Smuzhiyun 		sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
794*4882a593Smuzhiyun 			      SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
795*4882a593Smuzhiyun 			      SEEQ_CTRL_ENCARR;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	dev->netdev_ops		= &sgiseeq_netdev_ops;
798*4882a593Smuzhiyun 	dev->watchdog_timeo	= (200 * HZ) / 1000;
799*4882a593Smuzhiyun 	dev->irq		= irq;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 	if (register_netdev(dev)) {
802*4882a593Smuzhiyun 		printk(KERN_ERR "Sgiseeq: Cannot register net device, "
803*4882a593Smuzhiyun 		       "aborting.\n");
804*4882a593Smuzhiyun 		err = -ENODEV;
805*4882a593Smuzhiyun 		goto err_out_free_attrs;
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	return 0;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun err_out_free_attrs:
813*4882a593Smuzhiyun 	dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
814*4882a593Smuzhiyun 		       sp->srings_dma, DMA_BIDIRECTIONAL);
815*4882a593Smuzhiyun err_out_free_dev:
816*4882a593Smuzhiyun 	free_netdev(dev);
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun err_out:
819*4882a593Smuzhiyun 	return err;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun 
sgiseeq_remove(struct platform_device * pdev)822*4882a593Smuzhiyun static int sgiseeq_remove(struct platform_device *pdev)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	struct net_device *dev = platform_get_drvdata(pdev);
825*4882a593Smuzhiyun 	struct sgiseeq_private *sp = netdev_priv(dev);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun 	unregister_netdev(dev);
828*4882a593Smuzhiyun 	dma_free_noncoherent(&pdev->dev, sizeof(*sp->srings), sp->srings,
829*4882a593Smuzhiyun 		       sp->srings_dma, DMA_BIDIRECTIONAL);
830*4882a593Smuzhiyun 	free_netdev(dev);
831*4882a593Smuzhiyun 
832*4882a593Smuzhiyun 	return 0;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun static struct platform_driver sgiseeq_driver = {
836*4882a593Smuzhiyun 	.probe	= sgiseeq_probe,
837*4882a593Smuzhiyun 	.remove	= sgiseeq_remove,
838*4882a593Smuzhiyun 	.driver = {
839*4882a593Smuzhiyun 		.name	= "sgiseeq",
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun };
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun module_platform_driver(sgiseeq_driver);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun MODULE_DESCRIPTION("SGI Seeq 8003 driver");
846*4882a593Smuzhiyun MODULE_AUTHOR("Linux/MIPS Mailing List <linux-mips@linux-mips.org>");
847*4882a593Smuzhiyun MODULE_LICENSE("GPL");
848*4882a593Smuzhiyun MODULE_ALIAS("platform:sgiseeq");
849