1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
3*4882a593Smuzhiyun * Once again I am out to prove that every ethernet
4*4882a593Smuzhiyun * controller out there can be most efficiently programmed
5*4882a593Smuzhiyun * if you make it look like a LANCE.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/kernel.h>
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/errno.h>
14*4882a593Smuzhiyun #include <linux/fcntl.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/ioport.h>
17*4882a593Smuzhiyun #include <linux/in.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/string.h>
20*4882a593Smuzhiyun #include <linux/delay.h>
21*4882a593Smuzhiyun #include <linux/init.h>
22*4882a593Smuzhiyun #include <linux/crc32.h>
23*4882a593Smuzhiyun #include <linux/netdevice.h>
24*4882a593Smuzhiyun #include <linux/etherdevice.h>
25*4882a593Smuzhiyun #include <linux/skbuff.h>
26*4882a593Smuzhiyun #include <linux/ethtool.h>
27*4882a593Smuzhiyun #include <linux/bitops.h>
28*4882a593Smuzhiyun #include <linux/dma-mapping.h>
29*4882a593Smuzhiyun #include <linux/of.h>
30*4882a593Smuzhiyun #include <linux/of_device.h>
31*4882a593Smuzhiyun #include <linux/pgtable.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <asm/io.h>
34*4882a593Smuzhiyun #include <asm/dma.h>
35*4882a593Smuzhiyun #include <asm/byteorder.h>
36*4882a593Smuzhiyun #include <asm/idprom.h>
37*4882a593Smuzhiyun #include <asm/openprom.h>
38*4882a593Smuzhiyun #include <asm/oplib.h>
39*4882a593Smuzhiyun #include <asm/auxio.h>
40*4882a593Smuzhiyun #include <asm/irq.h>
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #include "sunqe.h"
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun #define DRV_NAME "sunqe"
45*4882a593Smuzhiyun #define DRV_VERSION "4.1"
46*4882a593Smuzhiyun #define DRV_RELDATE "August 27, 2008"
47*4882a593Smuzhiyun #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun static char version[] =
50*4882a593Smuzhiyun DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun MODULE_VERSION(DRV_VERSION);
53*4882a593Smuzhiyun MODULE_AUTHOR(DRV_AUTHOR);
54*4882a593Smuzhiyun MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
55*4882a593Smuzhiyun MODULE_LICENSE("GPL");
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun static struct sunqec *root_qec_dev;
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static void qe_set_multicast(struct net_device *dev);
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun #define QEC_RESET_TRIES 200
62*4882a593Smuzhiyun
qec_global_reset(void __iomem * gregs)63*4882a593Smuzhiyun static inline int qec_global_reset(void __iomem *gregs)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun int tries = QEC_RESET_TRIES;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
68*4882a593Smuzhiyun while (--tries) {
69*4882a593Smuzhiyun u32 tmp = sbus_readl(gregs + GLOB_CTRL);
70*4882a593Smuzhiyun if (tmp & GLOB_CTRL_RESET) {
71*4882a593Smuzhiyun udelay(20);
72*4882a593Smuzhiyun continue;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun break;
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun if (tries)
77*4882a593Smuzhiyun return 0;
78*4882a593Smuzhiyun printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
79*4882a593Smuzhiyun return -1;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define MACE_RESET_RETRIES 200
83*4882a593Smuzhiyun #define QE_RESET_RETRIES 200
84*4882a593Smuzhiyun
qe_stop(struct sunqe * qep)85*4882a593Smuzhiyun static inline int qe_stop(struct sunqe *qep)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun void __iomem *cregs = qep->qcregs;
88*4882a593Smuzhiyun void __iomem *mregs = qep->mregs;
89*4882a593Smuzhiyun int tries;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* Reset the MACE, then the QEC channel. */
92*4882a593Smuzhiyun sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
93*4882a593Smuzhiyun tries = MACE_RESET_RETRIES;
94*4882a593Smuzhiyun while (--tries) {
95*4882a593Smuzhiyun u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
96*4882a593Smuzhiyun if (tmp & MREGS_BCONFIG_RESET) {
97*4882a593Smuzhiyun udelay(20);
98*4882a593Smuzhiyun continue;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun break;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun if (!tries) {
103*4882a593Smuzhiyun printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
104*4882a593Smuzhiyun return -1;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
108*4882a593Smuzhiyun tries = QE_RESET_RETRIES;
109*4882a593Smuzhiyun while (--tries) {
110*4882a593Smuzhiyun u32 tmp = sbus_readl(cregs + CREG_CTRL);
111*4882a593Smuzhiyun if (tmp & CREG_CTRL_RESET) {
112*4882a593Smuzhiyun udelay(20);
113*4882a593Smuzhiyun continue;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun if (!tries) {
118*4882a593Smuzhiyun printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
119*4882a593Smuzhiyun return -1;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun return 0;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
qe_init_rings(struct sunqe * qep)124*4882a593Smuzhiyun static void qe_init_rings(struct sunqe *qep)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun struct qe_init_block *qb = qep->qe_block;
127*4882a593Smuzhiyun struct sunqe_buffers *qbufs = qep->buffers;
128*4882a593Smuzhiyun __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
129*4882a593Smuzhiyun int i;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
132*4882a593Smuzhiyun memset(qb, 0, sizeof(struct qe_init_block));
133*4882a593Smuzhiyun memset(qbufs, 0, sizeof(struct sunqe_buffers));
134*4882a593Smuzhiyun for (i = 0; i < RX_RING_SIZE; i++) {
135*4882a593Smuzhiyun qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
136*4882a593Smuzhiyun qb->qe_rxd[i].rx_flags =
137*4882a593Smuzhiyun (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
qe_init(struct sunqe * qep,int from_irq)141*4882a593Smuzhiyun static int qe_init(struct sunqe *qep, int from_irq)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun struct sunqec *qecp = qep->parent;
144*4882a593Smuzhiyun void __iomem *cregs = qep->qcregs;
145*4882a593Smuzhiyun void __iomem *mregs = qep->mregs;
146*4882a593Smuzhiyun void __iomem *gregs = qecp->gregs;
147*4882a593Smuzhiyun unsigned char *e = &qep->dev->dev_addr[0];
148*4882a593Smuzhiyun __u32 qblk_dvma = (__u32)qep->qblock_dvma;
149*4882a593Smuzhiyun u32 tmp;
150*4882a593Smuzhiyun int i;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* Shut it up. */
153*4882a593Smuzhiyun if (qe_stop(qep))
154*4882a593Smuzhiyun return -EAGAIN;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Setup initial rx/tx init block pointers. */
157*4882a593Smuzhiyun sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
158*4882a593Smuzhiyun sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /* Enable/mask the various irq's. */
161*4882a593Smuzhiyun sbus_writel(0, cregs + CREG_RIMASK);
162*4882a593Smuzhiyun sbus_writel(1, cregs + CREG_TIMASK);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun sbus_writel(0, cregs + CREG_QMASK);
165*4882a593Smuzhiyun sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Setup the FIFO pointers into QEC local memory. */
168*4882a593Smuzhiyun tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
169*4882a593Smuzhiyun sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
170*4882a593Smuzhiyun sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
173*4882a593Smuzhiyun sbus_readl(gregs + GLOB_RSIZE);
174*4882a593Smuzhiyun sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
175*4882a593Smuzhiyun sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun /* Clear the channel collision counter. */
178*4882a593Smuzhiyun sbus_writel(0, cregs + CREG_CCNT);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* For 10baseT, inter frame space nor throttle seems to be necessary. */
181*4882a593Smuzhiyun sbus_writel(0, cregs + CREG_PIPG);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* Now dork with the AMD MACE. */
184*4882a593Smuzhiyun sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
185*4882a593Smuzhiyun sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
186*4882a593Smuzhiyun sbus_writeb(0, mregs + MREGS_RXFCNTL);
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /* The QEC dma's the rx'd packets from local memory out to main memory,
189*4882a593Smuzhiyun * and therefore it interrupts when the packet reception is "complete".
190*4882a593Smuzhiyun * So don't listen for the MACE talking about it.
191*4882a593Smuzhiyun */
192*4882a593Smuzhiyun sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
193*4882a593Smuzhiyun sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
194*4882a593Smuzhiyun sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
195*4882a593Smuzhiyun MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
196*4882a593Smuzhiyun mregs + MREGS_FCONFIG);
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* Only usable interface on QuadEther is twisted pair. */
199*4882a593Smuzhiyun sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* Tell MACE we are changing the ether address. */
202*4882a593Smuzhiyun sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
203*4882a593Smuzhiyun mregs + MREGS_IACONFIG);
204*4882a593Smuzhiyun while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
205*4882a593Smuzhiyun barrier();
206*4882a593Smuzhiyun sbus_writeb(e[0], mregs + MREGS_ETHADDR);
207*4882a593Smuzhiyun sbus_writeb(e[1], mregs + MREGS_ETHADDR);
208*4882a593Smuzhiyun sbus_writeb(e[2], mregs + MREGS_ETHADDR);
209*4882a593Smuzhiyun sbus_writeb(e[3], mregs + MREGS_ETHADDR);
210*4882a593Smuzhiyun sbus_writeb(e[4], mregs + MREGS_ETHADDR);
211*4882a593Smuzhiyun sbus_writeb(e[5], mregs + MREGS_ETHADDR);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /* Clear out the address filter. */
214*4882a593Smuzhiyun sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
215*4882a593Smuzhiyun mregs + MREGS_IACONFIG);
216*4882a593Smuzhiyun while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
217*4882a593Smuzhiyun barrier();
218*4882a593Smuzhiyun for (i = 0; i < 8; i++)
219*4882a593Smuzhiyun sbus_writeb(0, mregs + MREGS_FILTER);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun /* Address changes are now complete. */
222*4882a593Smuzhiyun sbus_writeb(0, mregs + MREGS_IACONFIG);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun qe_init_rings(qep);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Wait a little bit for the link to come up... */
227*4882a593Smuzhiyun mdelay(5);
228*4882a593Smuzhiyun if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
229*4882a593Smuzhiyun int tries = 50;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun while (--tries) {
232*4882a593Smuzhiyun u8 tmp;
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun mdelay(5);
235*4882a593Smuzhiyun barrier();
236*4882a593Smuzhiyun tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
237*4882a593Smuzhiyun if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
238*4882a593Smuzhiyun break;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun if (tries == 0)
241*4882a593Smuzhiyun printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* Missed packet counter is cleared on a read. */
245*4882a593Smuzhiyun sbus_readb(mregs + MREGS_MPCNT);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun /* Reload multicast information, this will enable the receiver
248*4882a593Smuzhiyun * and transmitter.
249*4882a593Smuzhiyun */
250*4882a593Smuzhiyun qe_set_multicast(qep->dev);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* QEC should now start to show interrupts. */
253*4882a593Smuzhiyun return 0;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /* Grrr, certain error conditions completely lock up the AMD MACE,
257*4882a593Smuzhiyun * so when we get these we _must_ reset the chip.
258*4882a593Smuzhiyun */
qe_is_bolixed(struct sunqe * qep,u32 qe_status)259*4882a593Smuzhiyun static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
260*4882a593Smuzhiyun {
261*4882a593Smuzhiyun struct net_device *dev = qep->dev;
262*4882a593Smuzhiyun int mace_hwbug_workaround = 0;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (qe_status & CREG_STAT_EDEFER) {
265*4882a593Smuzhiyun printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
266*4882a593Smuzhiyun dev->stats.tx_errors++;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun if (qe_status & CREG_STAT_CLOSS) {
270*4882a593Smuzhiyun printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
271*4882a593Smuzhiyun dev->stats.tx_errors++;
272*4882a593Smuzhiyun dev->stats.tx_carrier_errors++;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun if (qe_status & CREG_STAT_ERETRIES) {
276*4882a593Smuzhiyun printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
277*4882a593Smuzhiyun dev->stats.tx_errors++;
278*4882a593Smuzhiyun mace_hwbug_workaround = 1;
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (qe_status & CREG_STAT_LCOLL) {
282*4882a593Smuzhiyun printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
283*4882a593Smuzhiyun dev->stats.tx_errors++;
284*4882a593Smuzhiyun dev->stats.collisions++;
285*4882a593Smuzhiyun mace_hwbug_workaround = 1;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (qe_status & CREG_STAT_FUFLOW) {
289*4882a593Smuzhiyun printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
290*4882a593Smuzhiyun dev->stats.tx_errors++;
291*4882a593Smuzhiyun mace_hwbug_workaround = 1;
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (qe_status & CREG_STAT_JERROR) {
295*4882a593Smuzhiyun printk(KERN_ERR "%s: Jabber error.\n", dev->name);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (qe_status & CREG_STAT_BERROR) {
299*4882a593Smuzhiyun printk(KERN_ERR "%s: Babble error.\n", dev->name);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun if (qe_status & CREG_STAT_CCOFLOW) {
303*4882a593Smuzhiyun dev->stats.tx_errors += 256;
304*4882a593Smuzhiyun dev->stats.collisions += 256;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (qe_status & CREG_STAT_TXDERROR) {
308*4882a593Smuzhiyun printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
309*4882a593Smuzhiyun dev->stats.tx_errors++;
310*4882a593Smuzhiyun dev->stats.tx_aborted_errors++;
311*4882a593Smuzhiyun mace_hwbug_workaround = 1;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (qe_status & CREG_STAT_TXLERR) {
315*4882a593Smuzhiyun printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
316*4882a593Smuzhiyun dev->stats.tx_errors++;
317*4882a593Smuzhiyun mace_hwbug_workaround = 1;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun if (qe_status & CREG_STAT_TXPERR) {
321*4882a593Smuzhiyun printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
322*4882a593Smuzhiyun dev->stats.tx_errors++;
323*4882a593Smuzhiyun dev->stats.tx_aborted_errors++;
324*4882a593Smuzhiyun mace_hwbug_workaround = 1;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun if (qe_status & CREG_STAT_TXSERR) {
328*4882a593Smuzhiyun printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
329*4882a593Smuzhiyun dev->stats.tx_errors++;
330*4882a593Smuzhiyun dev->stats.tx_aborted_errors++;
331*4882a593Smuzhiyun mace_hwbug_workaround = 1;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun if (qe_status & CREG_STAT_RCCOFLOW) {
335*4882a593Smuzhiyun dev->stats.rx_errors += 256;
336*4882a593Smuzhiyun dev->stats.collisions += 256;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun if (qe_status & CREG_STAT_RUOFLOW) {
340*4882a593Smuzhiyun dev->stats.rx_errors += 256;
341*4882a593Smuzhiyun dev->stats.rx_over_errors += 256;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (qe_status & CREG_STAT_MCOFLOW) {
345*4882a593Smuzhiyun dev->stats.rx_errors += 256;
346*4882a593Smuzhiyun dev->stats.rx_missed_errors += 256;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXFOFLOW) {
350*4882a593Smuzhiyun printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
351*4882a593Smuzhiyun dev->stats.rx_errors++;
352*4882a593Smuzhiyun dev->stats.rx_over_errors++;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun if (qe_status & CREG_STAT_RLCOLL) {
356*4882a593Smuzhiyun printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
357*4882a593Smuzhiyun dev->stats.rx_errors++;
358*4882a593Smuzhiyun dev->stats.collisions++;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (qe_status & CREG_STAT_FCOFLOW) {
362*4882a593Smuzhiyun dev->stats.rx_errors += 256;
363*4882a593Smuzhiyun dev->stats.rx_frame_errors += 256;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (qe_status & CREG_STAT_CECOFLOW) {
367*4882a593Smuzhiyun dev->stats.rx_errors += 256;
368*4882a593Smuzhiyun dev->stats.rx_crc_errors += 256;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXDROP) {
372*4882a593Smuzhiyun printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
373*4882a593Smuzhiyun dev->stats.rx_errors++;
374*4882a593Smuzhiyun dev->stats.rx_dropped++;
375*4882a593Smuzhiyun dev->stats.rx_missed_errors++;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXSMALL) {
379*4882a593Smuzhiyun printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
380*4882a593Smuzhiyun dev->stats.rx_errors++;
381*4882a593Smuzhiyun dev->stats.rx_length_errors++;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXLERR) {
385*4882a593Smuzhiyun printk(KERN_ERR "%s: Receive late error.\n", dev->name);
386*4882a593Smuzhiyun dev->stats.rx_errors++;
387*4882a593Smuzhiyun mace_hwbug_workaround = 1;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXPERR) {
391*4882a593Smuzhiyun printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
392*4882a593Smuzhiyun dev->stats.rx_errors++;
393*4882a593Smuzhiyun dev->stats.rx_missed_errors++;
394*4882a593Smuzhiyun mace_hwbug_workaround = 1;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXSERR) {
398*4882a593Smuzhiyun printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
399*4882a593Smuzhiyun dev->stats.rx_errors++;
400*4882a593Smuzhiyun dev->stats.rx_missed_errors++;
401*4882a593Smuzhiyun mace_hwbug_workaround = 1;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun if (mace_hwbug_workaround)
405*4882a593Smuzhiyun qe_init(qep, 1);
406*4882a593Smuzhiyun return mace_hwbug_workaround;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* Per-QE receive interrupt service routine. Just like on the happy meal
410*4882a593Smuzhiyun * we receive directly into skb's with a small packet copy water mark.
411*4882a593Smuzhiyun */
qe_rx(struct sunqe * qep)412*4882a593Smuzhiyun static void qe_rx(struct sunqe *qep)
413*4882a593Smuzhiyun {
414*4882a593Smuzhiyun struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
415*4882a593Smuzhiyun struct net_device *dev = qep->dev;
416*4882a593Smuzhiyun struct qe_rxd *this;
417*4882a593Smuzhiyun struct sunqe_buffers *qbufs = qep->buffers;
418*4882a593Smuzhiyun __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
419*4882a593Smuzhiyun int elem = qep->rx_new;
420*4882a593Smuzhiyun u32 flags;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun this = &rxbase[elem];
423*4882a593Smuzhiyun while (!((flags = this->rx_flags) & RXD_OWN)) {
424*4882a593Smuzhiyun struct sk_buff *skb;
425*4882a593Smuzhiyun unsigned char *this_qbuf =
426*4882a593Smuzhiyun &qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
427*4882a593Smuzhiyun __u32 this_qbuf_dvma = qbufs_dvma +
428*4882a593Smuzhiyun qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
429*4882a593Smuzhiyun struct qe_rxd *end_rxd =
430*4882a593Smuzhiyun &rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
431*4882a593Smuzhiyun int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun /* Check for errors. */
434*4882a593Smuzhiyun if (len < ETH_ZLEN) {
435*4882a593Smuzhiyun dev->stats.rx_errors++;
436*4882a593Smuzhiyun dev->stats.rx_length_errors++;
437*4882a593Smuzhiyun dev->stats.rx_dropped++;
438*4882a593Smuzhiyun } else {
439*4882a593Smuzhiyun skb = netdev_alloc_skb(dev, len + 2);
440*4882a593Smuzhiyun if (skb == NULL) {
441*4882a593Smuzhiyun dev->stats.rx_dropped++;
442*4882a593Smuzhiyun } else {
443*4882a593Smuzhiyun skb_reserve(skb, 2);
444*4882a593Smuzhiyun skb_put(skb, len);
445*4882a593Smuzhiyun skb_copy_to_linear_data(skb, this_qbuf,
446*4882a593Smuzhiyun len);
447*4882a593Smuzhiyun skb->protocol = eth_type_trans(skb, qep->dev);
448*4882a593Smuzhiyun netif_rx(skb);
449*4882a593Smuzhiyun dev->stats.rx_packets++;
450*4882a593Smuzhiyun dev->stats.rx_bytes += len;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun end_rxd->rx_addr = this_qbuf_dvma;
454*4882a593Smuzhiyun end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
455*4882a593Smuzhiyun
456*4882a593Smuzhiyun elem = NEXT_RX(elem);
457*4882a593Smuzhiyun this = &rxbase[elem];
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun qep->rx_new = elem;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun static void qe_tx_reclaim(struct sunqe *qep);
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /* Interrupts for all QE's get filtered out via the QEC master controller,
465*4882a593Smuzhiyun * so we just run through each qe and check to see who is signaling
466*4882a593Smuzhiyun * and thus needs to be serviced.
467*4882a593Smuzhiyun */
qec_interrupt(int irq,void * dev_id)468*4882a593Smuzhiyun static irqreturn_t qec_interrupt(int irq, void *dev_id)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct sunqec *qecp = dev_id;
471*4882a593Smuzhiyun u32 qec_status;
472*4882a593Smuzhiyun int channel = 0;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* Latch the status now. */
475*4882a593Smuzhiyun qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
476*4882a593Smuzhiyun while (channel < 4) {
477*4882a593Smuzhiyun if (qec_status & 0xf) {
478*4882a593Smuzhiyun struct sunqe *qep = qecp->qes[channel];
479*4882a593Smuzhiyun u32 qe_status;
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun qe_status = sbus_readl(qep->qcregs + CREG_STAT);
482*4882a593Smuzhiyun if (qe_status & CREG_STAT_ERRORS) {
483*4882a593Smuzhiyun if (qe_is_bolixed(qep, qe_status))
484*4882a593Smuzhiyun goto next;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun if (qe_status & CREG_STAT_RXIRQ)
487*4882a593Smuzhiyun qe_rx(qep);
488*4882a593Smuzhiyun if (netif_queue_stopped(qep->dev) &&
489*4882a593Smuzhiyun (qe_status & CREG_STAT_TXIRQ)) {
490*4882a593Smuzhiyun spin_lock(&qep->lock);
491*4882a593Smuzhiyun qe_tx_reclaim(qep);
492*4882a593Smuzhiyun if (TX_BUFFS_AVAIL(qep) > 0) {
493*4882a593Smuzhiyun /* Wake net queue and return to
494*4882a593Smuzhiyun * lazy tx reclaim.
495*4882a593Smuzhiyun */
496*4882a593Smuzhiyun netif_wake_queue(qep->dev);
497*4882a593Smuzhiyun sbus_writel(1, qep->qcregs + CREG_TIMASK);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun spin_unlock(&qep->lock);
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun next:
502*4882a593Smuzhiyun ;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun qec_status >>= 4;
505*4882a593Smuzhiyun channel++;
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun return IRQ_HANDLED;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun
qe_open(struct net_device * dev)511*4882a593Smuzhiyun static int qe_open(struct net_device *dev)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun qep->mconfig = (MREGS_MCONFIG_TXENAB |
516*4882a593Smuzhiyun MREGS_MCONFIG_RXENAB |
517*4882a593Smuzhiyun MREGS_MCONFIG_MBAENAB);
518*4882a593Smuzhiyun return qe_init(qep, 0);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
qe_close(struct net_device * dev)521*4882a593Smuzhiyun static int qe_close(struct net_device *dev)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun qe_stop(qep);
526*4882a593Smuzhiyun return 0;
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun
529*4882a593Smuzhiyun /* Reclaim TX'd frames from the ring. This must always run under
530*4882a593Smuzhiyun * the IRQ protected qep->lock.
531*4882a593Smuzhiyun */
qe_tx_reclaim(struct sunqe * qep)532*4882a593Smuzhiyun static void qe_tx_reclaim(struct sunqe *qep)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
535*4882a593Smuzhiyun int elem = qep->tx_old;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun while (elem != qep->tx_new) {
538*4882a593Smuzhiyun u32 flags = txbase[elem].tx_flags;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun if (flags & TXD_OWN)
541*4882a593Smuzhiyun break;
542*4882a593Smuzhiyun elem = NEXT_TX(elem);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun qep->tx_old = elem;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
qe_tx_timeout(struct net_device * dev,unsigned int txqueue)547*4882a593Smuzhiyun static void qe_tx_timeout(struct net_device *dev, unsigned int txqueue)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
550*4882a593Smuzhiyun int tx_full;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun spin_lock_irq(&qep->lock);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Try to reclaim, if that frees up some tx
555*4882a593Smuzhiyun * entries, we're fine.
556*4882a593Smuzhiyun */
557*4882a593Smuzhiyun qe_tx_reclaim(qep);
558*4882a593Smuzhiyun tx_full = TX_BUFFS_AVAIL(qep) <= 0;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun spin_unlock_irq(&qep->lock);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun if (! tx_full)
563*4882a593Smuzhiyun goto out;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
566*4882a593Smuzhiyun qe_init(qep, 1);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun out:
569*4882a593Smuzhiyun netif_wake_queue(dev);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun /* Get a packet queued to go onto the wire. */
qe_start_xmit(struct sk_buff * skb,struct net_device * dev)573*4882a593Smuzhiyun static netdev_tx_t qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
576*4882a593Smuzhiyun struct sunqe_buffers *qbufs = qep->buffers;
577*4882a593Smuzhiyun __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
578*4882a593Smuzhiyun unsigned char *txbuf;
579*4882a593Smuzhiyun int len, entry;
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun spin_lock_irq(&qep->lock);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun qe_tx_reclaim(qep);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun len = skb->len;
586*4882a593Smuzhiyun entry = qep->tx_new;
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
589*4882a593Smuzhiyun txbuf_dvma = qbufs_dvma +
590*4882a593Smuzhiyun qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
591*4882a593Smuzhiyun
592*4882a593Smuzhiyun /* Avoid a race... */
593*4882a593Smuzhiyun qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun skb_copy_from_linear_data(skb, txbuf, len);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
598*4882a593Smuzhiyun qep->qe_block->qe_txd[entry].tx_flags =
599*4882a593Smuzhiyun (TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
600*4882a593Smuzhiyun qep->tx_new = NEXT_TX(entry);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun /* Get it going. */
603*4882a593Smuzhiyun sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun dev->stats.tx_packets++;
606*4882a593Smuzhiyun dev->stats.tx_bytes += len;
607*4882a593Smuzhiyun
608*4882a593Smuzhiyun if (TX_BUFFS_AVAIL(qep) <= 0) {
609*4882a593Smuzhiyun /* Halt the net queue and enable tx interrupts.
610*4882a593Smuzhiyun * When the tx queue empties the tx irq handler
611*4882a593Smuzhiyun * will wake up the queue and return us back to
612*4882a593Smuzhiyun * the lazy tx reclaim scheme.
613*4882a593Smuzhiyun */
614*4882a593Smuzhiyun netif_stop_queue(dev);
615*4882a593Smuzhiyun sbus_writel(0, qep->qcregs + CREG_TIMASK);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun spin_unlock_irq(&qep->lock);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun dev_kfree_skb(skb);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun return NETDEV_TX_OK;
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
qe_set_multicast(struct net_device * dev)624*4882a593Smuzhiyun static void qe_set_multicast(struct net_device *dev)
625*4882a593Smuzhiyun {
626*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
627*4882a593Smuzhiyun struct netdev_hw_addr *ha;
628*4882a593Smuzhiyun u8 new_mconfig = qep->mconfig;
629*4882a593Smuzhiyun int i;
630*4882a593Smuzhiyun u32 crc;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun /* Lock out others. */
633*4882a593Smuzhiyun netif_stop_queue(dev);
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
636*4882a593Smuzhiyun sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
637*4882a593Smuzhiyun qep->mregs + MREGS_IACONFIG);
638*4882a593Smuzhiyun while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
639*4882a593Smuzhiyun barrier();
640*4882a593Smuzhiyun for (i = 0; i < 8; i++)
641*4882a593Smuzhiyun sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
642*4882a593Smuzhiyun sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
643*4882a593Smuzhiyun } else if (dev->flags & IFF_PROMISC) {
644*4882a593Smuzhiyun new_mconfig |= MREGS_MCONFIG_PROMISC;
645*4882a593Smuzhiyun } else {
646*4882a593Smuzhiyun u16 hash_table[4];
647*4882a593Smuzhiyun u8 *hbytes = (unsigned char *) &hash_table[0];
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun memset(hash_table, 0, sizeof(hash_table));
650*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, dev) {
651*4882a593Smuzhiyun crc = ether_crc_le(6, ha->addr);
652*4882a593Smuzhiyun crc >>= 26;
653*4882a593Smuzhiyun hash_table[crc >> 4] |= 1 << (crc & 0xf);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun /* Program the qe with the new filter value. */
656*4882a593Smuzhiyun sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
657*4882a593Smuzhiyun qep->mregs + MREGS_IACONFIG);
658*4882a593Smuzhiyun while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
659*4882a593Smuzhiyun barrier();
660*4882a593Smuzhiyun for (i = 0; i < 8; i++) {
661*4882a593Smuzhiyun u8 tmp = *hbytes++;
662*4882a593Smuzhiyun sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
663*4882a593Smuzhiyun }
664*4882a593Smuzhiyun sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun /* Any change of the logical address filter, the physical address,
668*4882a593Smuzhiyun * or enabling/disabling promiscuous mode causes the MACE to disable
669*4882a593Smuzhiyun * the receiver. So we must re-enable them here or else the MACE
670*4882a593Smuzhiyun * refuses to listen to anything on the network. Sheesh, took
671*4882a593Smuzhiyun * me a day or two to find this bug.
672*4882a593Smuzhiyun */
673*4882a593Smuzhiyun qep->mconfig = new_mconfig;
674*4882a593Smuzhiyun sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* Let us get going again. */
677*4882a593Smuzhiyun netif_wake_queue(dev);
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun /* Ethtool support... */
qe_get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)681*4882a593Smuzhiyun static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun const struct linux_prom_registers *regs;
684*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
685*4882a593Smuzhiyun struct platform_device *op;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun strlcpy(info->driver, "sunqe", sizeof(info->driver));
688*4882a593Smuzhiyun strlcpy(info->version, "3.0", sizeof(info->version));
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun op = qep->op;
691*4882a593Smuzhiyun regs = of_get_property(op->dev.of_node, "reg", NULL);
692*4882a593Smuzhiyun if (regs)
693*4882a593Smuzhiyun snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
694*4882a593Smuzhiyun regs->which_io);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun }
697*4882a593Smuzhiyun
qe_get_link(struct net_device * dev)698*4882a593Smuzhiyun static u32 qe_get_link(struct net_device *dev)
699*4882a593Smuzhiyun {
700*4882a593Smuzhiyun struct sunqe *qep = netdev_priv(dev);
701*4882a593Smuzhiyun void __iomem *mregs = qep->mregs;
702*4882a593Smuzhiyun u8 phyconfig;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun spin_lock_irq(&qep->lock);
705*4882a593Smuzhiyun phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
706*4882a593Smuzhiyun spin_unlock_irq(&qep->lock);
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun return phyconfig & MREGS_PHYCONFIG_LSTAT;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun static const struct ethtool_ops qe_ethtool_ops = {
712*4882a593Smuzhiyun .get_drvinfo = qe_get_drvinfo,
713*4882a593Smuzhiyun .get_link = qe_get_link,
714*4882a593Smuzhiyun };
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun /* This is only called once at boot time for each card probed. */
qec_init_once(struct sunqec * qecp,struct platform_device * op)717*4882a593Smuzhiyun static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun u8 bsizes = qecp->qec_bursts;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
722*4882a593Smuzhiyun sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
723*4882a593Smuzhiyun } else if (bsizes & DMA_BURST32) {
724*4882a593Smuzhiyun sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
725*4882a593Smuzhiyun } else {
726*4882a593Smuzhiyun sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /* Packetsize only used in 100baseT BigMAC configurations,
730*4882a593Smuzhiyun * set it to zero just to be on the safe side.
731*4882a593Smuzhiyun */
732*4882a593Smuzhiyun sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* Set the local memsize register, divided up to one piece per QE channel. */
735*4882a593Smuzhiyun sbus_writel((resource_size(&op->resource[1]) >> 2),
736*4882a593Smuzhiyun qecp->gregs + GLOB_MSIZE);
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun /* Divide up the local QEC memory amongst the 4 QE receiver and
739*4882a593Smuzhiyun * transmitter FIFOs. Basically it is (total / 2 / num_channels).
740*4882a593Smuzhiyun */
741*4882a593Smuzhiyun sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
742*4882a593Smuzhiyun qecp->gregs + GLOB_TSIZE);
743*4882a593Smuzhiyun sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
744*4882a593Smuzhiyun qecp->gregs + GLOB_RSIZE);
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun
qec_get_burst(struct device_node * dp)747*4882a593Smuzhiyun static u8 qec_get_burst(struct device_node *dp)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun u8 bsizes, bsizes_more;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun /* Find and set the burst sizes for the QEC, since it
752*4882a593Smuzhiyun * does the actual dma for all 4 channels.
753*4882a593Smuzhiyun */
754*4882a593Smuzhiyun bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
755*4882a593Smuzhiyun bsizes &= 0xff;
756*4882a593Smuzhiyun bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun if (bsizes_more != 0xff)
759*4882a593Smuzhiyun bsizes &= bsizes_more;
760*4882a593Smuzhiyun if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
761*4882a593Smuzhiyun (bsizes & DMA_BURST32)==0)
762*4882a593Smuzhiyun bsizes = (DMA_BURST32 - 1);
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun return bsizes;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
get_qec(struct platform_device * child)767*4882a593Smuzhiyun static struct sunqec *get_qec(struct platform_device *child)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct platform_device *op = to_platform_device(child->dev.parent);
770*4882a593Smuzhiyun struct sunqec *qecp;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun qecp = platform_get_drvdata(op);
773*4882a593Smuzhiyun if (!qecp) {
774*4882a593Smuzhiyun qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
775*4882a593Smuzhiyun if (qecp) {
776*4882a593Smuzhiyun u32 ctrl;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun qecp->op = op;
779*4882a593Smuzhiyun qecp->gregs = of_ioremap(&op->resource[0], 0,
780*4882a593Smuzhiyun GLOB_REG_SIZE,
781*4882a593Smuzhiyun "QEC Global Registers");
782*4882a593Smuzhiyun if (!qecp->gregs)
783*4882a593Smuzhiyun goto fail;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun /* Make sure the QEC is in MACE mode. */
786*4882a593Smuzhiyun ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
787*4882a593Smuzhiyun ctrl &= 0xf0000000;
788*4882a593Smuzhiyun if (ctrl != GLOB_CTRL_MMODE) {
789*4882a593Smuzhiyun printk(KERN_ERR "qec: Not in MACE mode!\n");
790*4882a593Smuzhiyun goto fail;
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (qec_global_reset(qecp->gregs))
794*4882a593Smuzhiyun goto fail;
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun qecp->qec_bursts = qec_get_burst(op->dev.of_node);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun qec_init_once(qecp, op);
799*4882a593Smuzhiyun
800*4882a593Smuzhiyun if (request_irq(op->archdata.irqs[0], qec_interrupt,
801*4882a593Smuzhiyun IRQF_SHARED, "qec", (void *) qecp)) {
802*4882a593Smuzhiyun printk(KERN_ERR "qec: Can't register irq.\n");
803*4882a593Smuzhiyun goto fail;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun platform_set_drvdata(op, qecp);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun qecp->next_module = root_qec_dev;
809*4882a593Smuzhiyun root_qec_dev = qecp;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun return qecp;
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun fail:
816*4882a593Smuzhiyun if (qecp->gregs)
817*4882a593Smuzhiyun of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
818*4882a593Smuzhiyun kfree(qecp);
819*4882a593Smuzhiyun return NULL;
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun static const struct net_device_ops qec_ops = {
823*4882a593Smuzhiyun .ndo_open = qe_open,
824*4882a593Smuzhiyun .ndo_stop = qe_close,
825*4882a593Smuzhiyun .ndo_start_xmit = qe_start_xmit,
826*4882a593Smuzhiyun .ndo_set_rx_mode = qe_set_multicast,
827*4882a593Smuzhiyun .ndo_tx_timeout = qe_tx_timeout,
828*4882a593Smuzhiyun .ndo_set_mac_address = eth_mac_addr,
829*4882a593Smuzhiyun .ndo_validate_addr = eth_validate_addr,
830*4882a593Smuzhiyun };
831*4882a593Smuzhiyun
qec_ether_init(struct platform_device * op)832*4882a593Smuzhiyun static int qec_ether_init(struct platform_device *op)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun static unsigned version_printed;
835*4882a593Smuzhiyun struct net_device *dev;
836*4882a593Smuzhiyun struct sunqec *qecp;
837*4882a593Smuzhiyun struct sunqe *qe;
838*4882a593Smuzhiyun int i, res;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun if (version_printed++ == 0)
841*4882a593Smuzhiyun printk(KERN_INFO "%s", version);
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun dev = alloc_etherdev(sizeof(struct sunqe));
844*4882a593Smuzhiyun if (!dev)
845*4882a593Smuzhiyun return -ENOMEM;
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun qe = netdev_priv(dev);
850*4882a593Smuzhiyun
851*4882a593Smuzhiyun res = -ENODEV;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun i = of_getintprop_default(op->dev.of_node, "channel#", -1);
854*4882a593Smuzhiyun if (i == -1)
855*4882a593Smuzhiyun goto fail;
856*4882a593Smuzhiyun qe->channel = i;
857*4882a593Smuzhiyun spin_lock_init(&qe->lock);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun qecp = get_qec(op);
860*4882a593Smuzhiyun if (!qecp)
861*4882a593Smuzhiyun goto fail;
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun qecp->qes[qe->channel] = qe;
864*4882a593Smuzhiyun qe->dev = dev;
865*4882a593Smuzhiyun qe->parent = qecp;
866*4882a593Smuzhiyun qe->op = op;
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun res = -ENOMEM;
869*4882a593Smuzhiyun qe->qcregs = of_ioremap(&op->resource[0], 0,
870*4882a593Smuzhiyun CREG_REG_SIZE, "QEC Channel Registers");
871*4882a593Smuzhiyun if (!qe->qcregs) {
872*4882a593Smuzhiyun printk(KERN_ERR "qe: Cannot map channel registers.\n");
873*4882a593Smuzhiyun goto fail;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun qe->mregs = of_ioremap(&op->resource[1], 0,
877*4882a593Smuzhiyun MREGS_REG_SIZE, "QE MACE Registers");
878*4882a593Smuzhiyun if (!qe->mregs) {
879*4882a593Smuzhiyun printk(KERN_ERR "qe: Cannot map MACE registers.\n");
880*4882a593Smuzhiyun goto fail;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
884*4882a593Smuzhiyun &qe->qblock_dvma, GFP_ATOMIC);
885*4882a593Smuzhiyun qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
886*4882a593Smuzhiyun &qe->buffers_dvma, GFP_ATOMIC);
887*4882a593Smuzhiyun if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
888*4882a593Smuzhiyun qe->buffers == NULL || qe->buffers_dvma == 0)
889*4882a593Smuzhiyun goto fail;
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* Stop this QE. */
892*4882a593Smuzhiyun qe_stop(qe);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun SET_NETDEV_DEV(dev, &op->dev);
895*4882a593Smuzhiyun
896*4882a593Smuzhiyun dev->watchdog_timeo = 5*HZ;
897*4882a593Smuzhiyun dev->irq = op->archdata.irqs[0];
898*4882a593Smuzhiyun dev->dma = 0;
899*4882a593Smuzhiyun dev->ethtool_ops = &qe_ethtool_ops;
900*4882a593Smuzhiyun dev->netdev_ops = &qec_ops;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun res = register_netdev(dev);
903*4882a593Smuzhiyun if (res)
904*4882a593Smuzhiyun goto fail;
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun platform_set_drvdata(op, qe);
907*4882a593Smuzhiyun
908*4882a593Smuzhiyun printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
909*4882a593Smuzhiyun dev->dev_addr);
910*4882a593Smuzhiyun return 0;
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun fail:
913*4882a593Smuzhiyun if (qe->qcregs)
914*4882a593Smuzhiyun of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
915*4882a593Smuzhiyun if (qe->mregs)
916*4882a593Smuzhiyun of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
917*4882a593Smuzhiyun if (qe->qe_block)
918*4882a593Smuzhiyun dma_free_coherent(&op->dev, PAGE_SIZE,
919*4882a593Smuzhiyun qe->qe_block, qe->qblock_dvma);
920*4882a593Smuzhiyun if (qe->buffers)
921*4882a593Smuzhiyun dma_free_coherent(&op->dev,
922*4882a593Smuzhiyun sizeof(struct sunqe_buffers),
923*4882a593Smuzhiyun qe->buffers,
924*4882a593Smuzhiyun qe->buffers_dvma);
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun free_netdev(dev);
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun return res;
929*4882a593Smuzhiyun }
930*4882a593Smuzhiyun
qec_sbus_probe(struct platform_device * op)931*4882a593Smuzhiyun static int qec_sbus_probe(struct platform_device *op)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun return qec_ether_init(op);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun
qec_sbus_remove(struct platform_device * op)936*4882a593Smuzhiyun static int qec_sbus_remove(struct platform_device *op)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun struct sunqe *qp = platform_get_drvdata(op);
939*4882a593Smuzhiyun struct net_device *net_dev = qp->dev;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun unregister_netdev(net_dev);
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
944*4882a593Smuzhiyun of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
945*4882a593Smuzhiyun dma_free_coherent(&op->dev, PAGE_SIZE,
946*4882a593Smuzhiyun qp->qe_block, qp->qblock_dvma);
947*4882a593Smuzhiyun dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
948*4882a593Smuzhiyun qp->buffers, qp->buffers_dvma);
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun free_netdev(net_dev);
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun return 0;
953*4882a593Smuzhiyun }
954*4882a593Smuzhiyun
955*4882a593Smuzhiyun static const struct of_device_id qec_sbus_match[] = {
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun .name = "qe",
958*4882a593Smuzhiyun },
959*4882a593Smuzhiyun {},
960*4882a593Smuzhiyun };
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, qec_sbus_match);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun static struct platform_driver qec_sbus_driver = {
965*4882a593Smuzhiyun .driver = {
966*4882a593Smuzhiyun .name = "qec",
967*4882a593Smuzhiyun .of_match_table = qec_sbus_match,
968*4882a593Smuzhiyun },
969*4882a593Smuzhiyun .probe = qec_sbus_probe,
970*4882a593Smuzhiyun .remove = qec_sbus_remove,
971*4882a593Smuzhiyun };
972*4882a593Smuzhiyun
qec_init(void)973*4882a593Smuzhiyun static int __init qec_init(void)
974*4882a593Smuzhiyun {
975*4882a593Smuzhiyun return platform_driver_register(&qec_sbus_driver);
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun
qec_exit(void)978*4882a593Smuzhiyun static void __exit qec_exit(void)
979*4882a593Smuzhiyun {
980*4882a593Smuzhiyun platform_driver_unregister(&qec_sbus_driver);
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun while (root_qec_dev) {
983*4882a593Smuzhiyun struct sunqec *next = root_qec_dev->next_module;
984*4882a593Smuzhiyun struct platform_device *op = root_qec_dev->op;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
987*4882a593Smuzhiyun of_iounmap(&op->resource[0], root_qec_dev->gregs,
988*4882a593Smuzhiyun GLOB_REG_SIZE);
989*4882a593Smuzhiyun kfree(root_qec_dev);
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun root_qec_dev = next;
992*4882a593Smuzhiyun }
993*4882a593Smuzhiyun }
994*4882a593Smuzhiyun
995*4882a593Smuzhiyun module_init(qec_init);
996*4882a593Smuzhiyun module_exit(qec_exit);
997