xref: /OK3568_Linux_fs/kernel/drivers/net/fddi/defza.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*	FDDI network adapter driver for DEC FDDIcontroller 700/700-C devices.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  *	Copyright (c) 2018  Maciej W. Rozycki
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *	This program is free software; you can redistribute it and/or
7*4882a593Smuzhiyun  *	modify it under the terms of the GNU General Public License
8*4882a593Smuzhiyun  *	as published by the Free Software Foundation; either version
9*4882a593Smuzhiyun  *	2 of the License, or (at your option) any later version.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  *	References:
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  *	Dave Sawyer & Phil Weeks & Frank Itkowsky,
14*4882a593Smuzhiyun  *	"DEC FDDIcontroller 700 Port Specification",
15*4882a593Smuzhiyun  *	Revision 1.1, Digital Equipment Corporation
16*4882a593Smuzhiyun  */
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun /* ------------------------------------------------------------------------- */
19*4882a593Smuzhiyun /* FZA configurable parameters.                                              */
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /* The number of transmit ring descriptors; either 0 for 512 or 1 for 1024.  */
22*4882a593Smuzhiyun #define FZA_RING_TX_MODE 0
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* The number of receive ring descriptors; from 2 up to 256.  */
25*4882a593Smuzhiyun #define FZA_RING_RX_SIZE 256
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* End of FZA configurable parameters.  No need to change anything below.    */
28*4882a593Smuzhiyun /* ------------------------------------------------------------------------- */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/delay.h>
31*4882a593Smuzhiyun #include <linux/device.h>
32*4882a593Smuzhiyun #include <linux/dma-mapping.h>
33*4882a593Smuzhiyun #include <linux/init.h>
34*4882a593Smuzhiyun #include <linux/interrupt.h>
35*4882a593Smuzhiyun #include <linux/io.h>
36*4882a593Smuzhiyun #include <linux/io-64-nonatomic-lo-hi.h>
37*4882a593Smuzhiyun #include <linux/ioport.h>
38*4882a593Smuzhiyun #include <linux/kernel.h>
39*4882a593Smuzhiyun #include <linux/list.h>
40*4882a593Smuzhiyun #include <linux/module.h>
41*4882a593Smuzhiyun #include <linux/netdevice.h>
42*4882a593Smuzhiyun #include <linux/fddidevice.h>
43*4882a593Smuzhiyun #include <linux/sched.h>
44*4882a593Smuzhiyun #include <linux/skbuff.h>
45*4882a593Smuzhiyun #include <linux/spinlock.h>
46*4882a593Smuzhiyun #include <linux/stat.h>
47*4882a593Smuzhiyun #include <linux/tc.h>
48*4882a593Smuzhiyun #include <linux/timer.h>
49*4882a593Smuzhiyun #include <linux/types.h>
50*4882a593Smuzhiyun #include <linux/wait.h>
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #include <asm/barrier.h>
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #include "defza.h"
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define DRV_NAME "defza"
57*4882a593Smuzhiyun #define DRV_VERSION "v.1.1.4"
58*4882a593Smuzhiyun #define DRV_RELDATE "Oct  6 2018"
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun static const char version[] =
61*4882a593Smuzhiyun 	DRV_NAME ": " DRV_VERSION "  " DRV_RELDATE "  Maciej W. Rozycki\n";
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun MODULE_AUTHOR("Maciej W. Rozycki <macro@linux-mips.org>");
64*4882a593Smuzhiyun MODULE_DESCRIPTION("DEC FDDIcontroller 700 (DEFZA-xx) driver");
65*4882a593Smuzhiyun MODULE_LICENSE("GPL");
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun static int loopback;
68*4882a593Smuzhiyun module_param(loopback, int, 0644);
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun /* Ring Purger Multicast */
71*4882a593Smuzhiyun static u8 hw_addr_purger[8] = { 0x09, 0x00, 0x2b, 0x02, 0x01, 0x05 };
72*4882a593Smuzhiyun /* Directed Beacon Multicast */
73*4882a593Smuzhiyun static u8 hw_addr_beacon[8] = { 0x01, 0x80, 0xc2, 0x00, 0x01, 0x00 };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* Shorthands for MMIO accesses that we require to be strongly ordered
76*4882a593Smuzhiyun  * WRT preceding MMIO accesses.
77*4882a593Smuzhiyun  */
78*4882a593Smuzhiyun #define readw_o readw_relaxed
79*4882a593Smuzhiyun #define readl_o readl_relaxed
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun #define writew_o writew_relaxed
82*4882a593Smuzhiyun #define writel_o writel_relaxed
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /* Shorthands for MMIO accesses that we are happy with being weakly ordered
85*4882a593Smuzhiyun  * WRT preceding MMIO accesses.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun #define readw_u readw_relaxed
88*4882a593Smuzhiyun #define readl_u readl_relaxed
89*4882a593Smuzhiyun #define readq_u readq_relaxed
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun #define writew_u writew_relaxed
92*4882a593Smuzhiyun #define writel_u writel_relaxed
93*4882a593Smuzhiyun #define writeq_u writeq_relaxed
94*4882a593Smuzhiyun 
fza_alloc_skb_irq(struct net_device * dev,unsigned int length)95*4882a593Smuzhiyun static inline struct sk_buff *fza_alloc_skb_irq(struct net_device *dev,
96*4882a593Smuzhiyun 						unsigned int length)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun 
fza_alloc_skb(struct net_device * dev,unsigned int length)101*4882a593Smuzhiyun static inline struct sk_buff *fza_alloc_skb(struct net_device *dev,
102*4882a593Smuzhiyun 					    unsigned int length)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun 	return __netdev_alloc_skb(dev, length, GFP_KERNEL);
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun 
fza_skb_align(struct sk_buff * skb,unsigned int v)107*4882a593Smuzhiyun static inline void fza_skb_align(struct sk_buff *skb, unsigned int v)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	unsigned long x, y;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	x = (unsigned long)skb->data;
112*4882a593Smuzhiyun 	y = ALIGN(x, v);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	skb_reserve(skb, y - x);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
fza_reads(const void __iomem * from,void * to,unsigned long size)117*4882a593Smuzhiyun static inline void fza_reads(const void __iomem *from, void *to,
118*4882a593Smuzhiyun 			     unsigned long size)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	if (sizeof(unsigned long) == 8) {
121*4882a593Smuzhiyun 		const u64 __iomem *src = from;
122*4882a593Smuzhiyun 		const u32 __iomem *src_trail;
123*4882a593Smuzhiyun 		u64 *dst = to;
124*4882a593Smuzhiyun 		u32 *dst_trail;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size > 1; size -= 2)
127*4882a593Smuzhiyun 			*dst++ = readq_u(src++);
128*4882a593Smuzhiyun 		if (size) {
129*4882a593Smuzhiyun 			src_trail = (u32 __iomem *)src;
130*4882a593Smuzhiyun 			dst_trail = (u32 *)dst;
131*4882a593Smuzhiyun 			*dst_trail = readl_u(src_trail);
132*4882a593Smuzhiyun 		}
133*4882a593Smuzhiyun 	} else {
134*4882a593Smuzhiyun 		const u32 __iomem *src = from;
135*4882a593Smuzhiyun 		u32 *dst = to;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size; size--)
138*4882a593Smuzhiyun 			*dst++ = readl_u(src++);
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
fza_writes(const void * from,void __iomem * to,unsigned long size)142*4882a593Smuzhiyun static inline void fza_writes(const void *from, void __iomem *to,
143*4882a593Smuzhiyun 			      unsigned long size)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	if (sizeof(unsigned long) == 8) {
146*4882a593Smuzhiyun 		const u64 *src = from;
147*4882a593Smuzhiyun 		const u32 *src_trail;
148*4882a593Smuzhiyun 		u64 __iomem *dst = to;
149*4882a593Smuzhiyun 		u32 __iomem *dst_trail;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size > 1; size -= 2)
152*4882a593Smuzhiyun 			writeq_u(*src++, dst++);
153*4882a593Smuzhiyun 		if (size) {
154*4882a593Smuzhiyun 			src_trail = (u32 *)src;
155*4882a593Smuzhiyun 			dst_trail = (u32 __iomem *)dst;
156*4882a593Smuzhiyun 			writel_u(*src_trail, dst_trail);
157*4882a593Smuzhiyun 		}
158*4882a593Smuzhiyun 	} else {
159*4882a593Smuzhiyun 		const u32 *src = from;
160*4882a593Smuzhiyun 		u32 __iomem *dst = to;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size; size--)
163*4882a593Smuzhiyun 			writel_u(*src++, dst++);
164*4882a593Smuzhiyun 	}
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
fza_moves(const void __iomem * from,void __iomem * to,unsigned long size)167*4882a593Smuzhiyun static inline void fza_moves(const void __iomem *from, void __iomem *to,
168*4882a593Smuzhiyun 			     unsigned long size)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	if (sizeof(unsigned long) == 8) {
171*4882a593Smuzhiyun 		const u64 __iomem *src = from;
172*4882a593Smuzhiyun 		const u32 __iomem *src_trail;
173*4882a593Smuzhiyun 		u64 __iomem *dst = to;
174*4882a593Smuzhiyun 		u32 __iomem *dst_trail;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size > 1; size -= 2)
177*4882a593Smuzhiyun 			writeq_u(readq_u(src++), dst++);
178*4882a593Smuzhiyun 		if (size) {
179*4882a593Smuzhiyun 			src_trail = (u32 __iomem *)src;
180*4882a593Smuzhiyun 			dst_trail = (u32 __iomem *)dst;
181*4882a593Smuzhiyun 			writel_u(readl_u(src_trail), dst_trail);
182*4882a593Smuzhiyun 		}
183*4882a593Smuzhiyun 	} else {
184*4882a593Smuzhiyun 		const u32 __iomem *src = from;
185*4882a593Smuzhiyun 		u32 __iomem *dst = to;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size; size--)
188*4882a593Smuzhiyun 			writel_u(readl_u(src++), dst++);
189*4882a593Smuzhiyun 	}
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun 
fza_zeros(void __iomem * to,unsigned long size)192*4882a593Smuzhiyun static inline void fza_zeros(void __iomem *to, unsigned long size)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun 	if (sizeof(unsigned long) == 8) {
195*4882a593Smuzhiyun 		u64 __iomem *dst = to;
196*4882a593Smuzhiyun 		u32 __iomem *dst_trail;
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size > 1; size -= 2)
199*4882a593Smuzhiyun 			writeq_u(0, dst++);
200*4882a593Smuzhiyun 		if (size) {
201*4882a593Smuzhiyun 			dst_trail = (u32 __iomem *)dst;
202*4882a593Smuzhiyun 			writel_u(0, dst_trail);
203*4882a593Smuzhiyun 		}
204*4882a593Smuzhiyun 	} else {
205*4882a593Smuzhiyun 		u32 __iomem *dst = to;
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 		for (size = (size + 3) / 4; size; size--)
208*4882a593Smuzhiyun 			writel_u(0, dst++);
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
fza_regs_dump(struct fza_private * fp)212*4882a593Smuzhiyun static inline void fza_regs_dump(struct fza_private *fp)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	pr_debug("%s: iomem registers:\n", fp->name);
215*4882a593Smuzhiyun 	pr_debug(" reset:           0x%04x\n", readw_o(&fp->regs->reset));
216*4882a593Smuzhiyun 	pr_debug(" interrupt event: 0x%04x\n", readw_u(&fp->regs->int_event));
217*4882a593Smuzhiyun 	pr_debug(" status:          0x%04x\n", readw_u(&fp->regs->status));
218*4882a593Smuzhiyun 	pr_debug(" interrupt mask:  0x%04x\n", readw_u(&fp->regs->int_mask));
219*4882a593Smuzhiyun 	pr_debug(" control A:       0x%04x\n", readw_u(&fp->regs->control_a));
220*4882a593Smuzhiyun 	pr_debug(" control B:       0x%04x\n", readw_u(&fp->regs->control_b));
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun 
fza_do_reset(struct fza_private * fp)223*4882a593Smuzhiyun static inline void fza_do_reset(struct fza_private *fp)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun 	/* Reset the board. */
226*4882a593Smuzhiyun 	writew_o(FZA_RESET_INIT, &fp->regs->reset);
227*4882a593Smuzhiyun 	readw_o(&fp->regs->reset);	/* Synchronize. */
228*4882a593Smuzhiyun 	readw_o(&fp->regs->reset);	/* Read it back for a small delay. */
229*4882a593Smuzhiyun 	writew_o(FZA_RESET_CLR, &fp->regs->reset);
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/* Enable all interrupt events we handle. */
232*4882a593Smuzhiyun 	writew_o(fp->int_mask, &fp->regs->int_mask);
233*4882a593Smuzhiyun 	readw_o(&fp->regs->int_mask);	/* Synchronize. */
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
fza_do_shutdown(struct fza_private * fp)236*4882a593Smuzhiyun static inline void fza_do_shutdown(struct fza_private *fp)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	/* Disable the driver mode. */
239*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_B_IDLE, &fp->regs->control_b);
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	/* And reset the board. */
242*4882a593Smuzhiyun 	writew_o(FZA_RESET_INIT, &fp->regs->reset);
243*4882a593Smuzhiyun 	readw_o(&fp->regs->reset);	/* Synchronize. */
244*4882a593Smuzhiyun 	writew_o(FZA_RESET_CLR, &fp->regs->reset);
245*4882a593Smuzhiyun 	readw_o(&fp->regs->reset);	/* Synchronize. */
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun 
fza_reset(struct fza_private * fp)248*4882a593Smuzhiyun static int fza_reset(struct fza_private *fp)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun 	unsigned long flags;
251*4882a593Smuzhiyun 	uint status, state;
252*4882a593Smuzhiyun 	long t;
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	pr_info("%s: resetting the board...\n", fp->name);
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	spin_lock_irqsave(&fp->lock, flags);
257*4882a593Smuzhiyun 	fp->state_chg_flag = 0;
258*4882a593Smuzhiyun 	fza_do_reset(fp);
259*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fp->lock, flags);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	/* DEC says RESET needs up to 30 seconds to complete.  My DEFZA-AA
262*4882a593Smuzhiyun 	 * rev. C03 happily finishes in 9.7 seconds. :-)  But we need to
263*4882a593Smuzhiyun 	 * be on the safe side...
264*4882a593Smuzhiyun 	 */
265*4882a593Smuzhiyun 	t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
266*4882a593Smuzhiyun 			       45 * HZ);
267*4882a593Smuzhiyun 	status = readw_u(&fp->regs->status);
268*4882a593Smuzhiyun 	state = FZA_STATUS_GET_STATE(status);
269*4882a593Smuzhiyun 	if (fp->state_chg_flag == 0) {
270*4882a593Smuzhiyun 		pr_err("%s: RESET timed out!, state %x\n", fp->name, state);
271*4882a593Smuzhiyun 		return -EIO;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 	if (state != FZA_STATE_UNINITIALIZED) {
274*4882a593Smuzhiyun 		pr_err("%s: RESET failed!, state %x, failure ID %x\n",
275*4882a593Smuzhiyun 		       fp->name, state, FZA_STATUS_GET_TEST(status));
276*4882a593Smuzhiyun 		return -EIO;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 	pr_info("%s: OK\n", fp->name);
279*4882a593Smuzhiyun 	pr_debug("%s: RESET: %lums elapsed\n", fp->name,
280*4882a593Smuzhiyun 		 (45 * HZ - t) * 1000 / HZ);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
fza_cmd_send(struct net_device * dev,int command)285*4882a593Smuzhiyun static struct fza_ring_cmd __iomem *fza_cmd_send(struct net_device *dev,
286*4882a593Smuzhiyun 						 int command)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
289*4882a593Smuzhiyun 	struct fza_ring_cmd __iomem *ring = fp->ring_cmd + fp->ring_cmd_index;
290*4882a593Smuzhiyun 	unsigned int old_mask, new_mask;
291*4882a593Smuzhiyun 	union fza_cmd_buf __iomem *buf;
292*4882a593Smuzhiyun 	struct netdev_hw_addr *ha;
293*4882a593Smuzhiyun 	int i;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	old_mask = fp->int_mask;
296*4882a593Smuzhiyun 	new_mask = old_mask & ~FZA_MASK_STATE_CHG;
297*4882a593Smuzhiyun 	writew_u(new_mask, &fp->regs->int_mask);
298*4882a593Smuzhiyun 	readw_o(&fp->regs->int_mask);			/* Synchronize. */
299*4882a593Smuzhiyun 	fp->int_mask = new_mask;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	buf = fp->mmio + readl_u(&ring->buffer);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	if ((readl_u(&ring->cmd_own) & FZA_RING_OWN_MASK) !=
304*4882a593Smuzhiyun 	    FZA_RING_OWN_HOST) {
305*4882a593Smuzhiyun 		pr_warn("%s: command buffer full, command: %u!\n", fp->name,
306*4882a593Smuzhiyun 			command);
307*4882a593Smuzhiyun 		return NULL;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	switch (command) {
311*4882a593Smuzhiyun 	case FZA_RING_CMD_INIT:
312*4882a593Smuzhiyun 		writel_u(FZA_RING_TX_MODE, &buf->init.tx_mode);
313*4882a593Smuzhiyun 		writel_u(FZA_RING_RX_SIZE, &buf->init.hst_rx_size);
314*4882a593Smuzhiyun 		fza_zeros(&buf->init.counters, sizeof(buf->init.counters));
315*4882a593Smuzhiyun 		break;
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	case FZA_RING_CMD_MODCAM:
318*4882a593Smuzhiyun 		i = 0;
319*4882a593Smuzhiyun 		fza_writes(&hw_addr_purger, &buf->cam.hw_addr[i++],
320*4882a593Smuzhiyun 			   sizeof(*buf->cam.hw_addr));
321*4882a593Smuzhiyun 		fza_writes(&hw_addr_beacon, &buf->cam.hw_addr[i++],
322*4882a593Smuzhiyun 			   sizeof(*buf->cam.hw_addr));
323*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev) {
324*4882a593Smuzhiyun 			if (i >= FZA_CMD_CAM_SIZE)
325*4882a593Smuzhiyun 				break;
326*4882a593Smuzhiyun 			fza_writes(ha->addr, &buf->cam.hw_addr[i++],
327*4882a593Smuzhiyun 				   sizeof(*buf->cam.hw_addr));
328*4882a593Smuzhiyun 		}
329*4882a593Smuzhiyun 		while (i < FZA_CMD_CAM_SIZE)
330*4882a593Smuzhiyun 			fza_zeros(&buf->cam.hw_addr[i++],
331*4882a593Smuzhiyun 				  sizeof(*buf->cam.hw_addr));
332*4882a593Smuzhiyun 		break;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	case FZA_RING_CMD_PARAM:
335*4882a593Smuzhiyun 		writel_u(loopback, &buf->param.loop_mode);
336*4882a593Smuzhiyun 		writel_u(fp->t_max, &buf->param.t_max);
337*4882a593Smuzhiyun 		writel_u(fp->t_req, &buf->param.t_req);
338*4882a593Smuzhiyun 		writel_u(fp->tvx, &buf->param.tvx);
339*4882a593Smuzhiyun 		writel_u(fp->lem_threshold, &buf->param.lem_threshold);
340*4882a593Smuzhiyun 		fza_writes(&fp->station_id, &buf->param.station_id,
341*4882a593Smuzhiyun 			   sizeof(buf->param.station_id));
342*4882a593Smuzhiyun 		/* Convert to milliseconds due to buggy firmware. */
343*4882a593Smuzhiyun 		writel_u(fp->rtoken_timeout / 12500,
344*4882a593Smuzhiyun 			 &buf->param.rtoken_timeout);
345*4882a593Smuzhiyun 		writel_u(fp->ring_purger, &buf->param.ring_purger);
346*4882a593Smuzhiyun 		break;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	case FZA_RING_CMD_MODPROM:
349*4882a593Smuzhiyun 		if (dev->flags & IFF_PROMISC) {
350*4882a593Smuzhiyun 			writel_u(1, &buf->modprom.llc_prom);
351*4882a593Smuzhiyun 			writel_u(1, &buf->modprom.smt_prom);
352*4882a593Smuzhiyun 		} else {
353*4882a593Smuzhiyun 			writel_u(0, &buf->modprom.llc_prom);
354*4882a593Smuzhiyun 			writel_u(0, &buf->modprom.smt_prom);
355*4882a593Smuzhiyun 		}
356*4882a593Smuzhiyun 		if (dev->flags & IFF_ALLMULTI ||
357*4882a593Smuzhiyun 		    netdev_mc_count(dev) > FZA_CMD_CAM_SIZE - 2)
358*4882a593Smuzhiyun 			writel_u(1, &buf->modprom.llc_multi);
359*4882a593Smuzhiyun 		else
360*4882a593Smuzhiyun 			writel_u(0, &buf->modprom.llc_multi);
361*4882a593Smuzhiyun 		writel_u(1, &buf->modprom.llc_bcast);
362*4882a593Smuzhiyun 		break;
363*4882a593Smuzhiyun 	}
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	/* Trigger the command. */
366*4882a593Smuzhiyun 	writel_u(FZA_RING_OWN_FZA | command, &ring->cmd_own);
367*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_A_CMD_POLL, &fp->regs->control_a);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	fp->ring_cmd_index = (fp->ring_cmd_index + 1) % FZA_RING_CMD_SIZE;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	fp->int_mask = old_mask;
372*4882a593Smuzhiyun 	writew_u(fp->int_mask, &fp->regs->int_mask);
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 	return ring;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun 
fza_init_send(struct net_device * dev,struct fza_cmd_init * __iomem * init)377*4882a593Smuzhiyun static int fza_init_send(struct net_device *dev,
378*4882a593Smuzhiyun 			 struct fza_cmd_init *__iomem *init)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
381*4882a593Smuzhiyun 	struct fza_ring_cmd __iomem *ring;
382*4882a593Smuzhiyun 	unsigned long flags;
383*4882a593Smuzhiyun 	u32 stat;
384*4882a593Smuzhiyun 	long t;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	spin_lock_irqsave(&fp->lock, flags);
387*4882a593Smuzhiyun 	fp->cmd_done_flag = 0;
388*4882a593Smuzhiyun 	ring = fza_cmd_send(dev, FZA_RING_CMD_INIT);
389*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fp->lock, flags);
390*4882a593Smuzhiyun 	if (!ring)
391*4882a593Smuzhiyun 		/* This should never happen in the uninitialized state,
392*4882a593Smuzhiyun 		 * so do not try to recover and just consider it fatal.
393*4882a593Smuzhiyun 		 */
394*4882a593Smuzhiyun 		return -ENOBUFS;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 	/* INIT may take quite a long time (160ms for my C03). */
397*4882a593Smuzhiyun 	t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
398*4882a593Smuzhiyun 	if (fp->cmd_done_flag == 0) {
399*4882a593Smuzhiyun 		pr_err("%s: INIT command timed out!, state %x\n", fp->name,
400*4882a593Smuzhiyun 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
401*4882a593Smuzhiyun 		return -EIO;
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 	stat = readl_u(&ring->stat);
404*4882a593Smuzhiyun 	if (stat != FZA_RING_STAT_SUCCESS) {
405*4882a593Smuzhiyun 		pr_err("%s: INIT command failed!, status %02x, state %x\n",
406*4882a593Smuzhiyun 		       fp->name, stat,
407*4882a593Smuzhiyun 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
408*4882a593Smuzhiyun 		return -EIO;
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 	pr_debug("%s: INIT: %lums elapsed\n", fp->name,
411*4882a593Smuzhiyun 		 (3 * HZ - t) * 1000 / HZ);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (init)
414*4882a593Smuzhiyun 		*init = fp->mmio + readl_u(&ring->buffer);
415*4882a593Smuzhiyun 	return 0;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun 
fza_rx_init(struct fza_private * fp)418*4882a593Smuzhiyun static void fza_rx_init(struct fza_private *fp)
419*4882a593Smuzhiyun {
420*4882a593Smuzhiyun 	int i;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	/* Fill the host receive descriptor ring. */
423*4882a593Smuzhiyun 	for (i = 0; i < FZA_RING_RX_SIZE; i++) {
424*4882a593Smuzhiyun 		writel_o(0, &fp->ring_hst_rx[i].rmc);
425*4882a593Smuzhiyun 		writel_o((fp->rx_dma[i] + 0x1000) >> 9,
426*4882a593Smuzhiyun 			 &fp->ring_hst_rx[i].buffer1);
427*4882a593Smuzhiyun 		writel_o(fp->rx_dma[i] >> 9 | FZA_RING_OWN_FZA,
428*4882a593Smuzhiyun 			 &fp->ring_hst_rx[i].buf0_own);
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun 
fza_set_rx_mode(struct net_device * dev)432*4882a593Smuzhiyun static void fza_set_rx_mode(struct net_device *dev)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	fza_cmd_send(dev, FZA_RING_CMD_MODCAM);
435*4882a593Smuzhiyun 	fza_cmd_send(dev, FZA_RING_CMD_MODPROM);
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun union fza_buffer_txp {
439*4882a593Smuzhiyun 	struct fza_buffer_tx *data_ptr;
440*4882a593Smuzhiyun 	struct fza_buffer_tx __iomem *mmio_ptr;
441*4882a593Smuzhiyun };
442*4882a593Smuzhiyun 
fza_do_xmit(union fza_buffer_txp ub,int len,struct net_device * dev,int smt)443*4882a593Smuzhiyun static int fza_do_xmit(union fza_buffer_txp ub, int len,
444*4882a593Smuzhiyun 		       struct net_device *dev, int smt)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
447*4882a593Smuzhiyun 	struct fza_buffer_tx __iomem *rmc_tx_ptr;
448*4882a593Smuzhiyun 	int i, first, frag_len, left_len;
449*4882a593Smuzhiyun 	u32 own, rmc;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
452*4882a593Smuzhiyun 	       fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
453*4882a593Smuzhiyun 	     FZA_TX_BUFFER_SIZE) < len)
454*4882a593Smuzhiyun 		return 1;
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	first = fp->ring_rmc_tx_index;
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	left_len = len;
459*4882a593Smuzhiyun 	frag_len = FZA_TX_BUFFER_SIZE;
460*4882a593Smuzhiyun 	/* First descriptor is relinquished last. */
461*4882a593Smuzhiyun 	own = FZA_RING_TX_OWN_HOST;
462*4882a593Smuzhiyun 	/* First descriptor carries frame length; we don't use cut-through. */
463*4882a593Smuzhiyun 	rmc = FZA_RING_TX_SOP | FZA_RING_TX_VBC | len;
464*4882a593Smuzhiyun 	do {
465*4882a593Smuzhiyun 		i = fp->ring_rmc_tx_index;
466*4882a593Smuzhiyun 		rmc_tx_ptr = &fp->buffer_tx[i];
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 		if (left_len < FZA_TX_BUFFER_SIZE)
469*4882a593Smuzhiyun 			frag_len = left_len;
470*4882a593Smuzhiyun 		left_len -= frag_len;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		/* Length must be a multiple of 4 as only word writes are
473*4882a593Smuzhiyun 		 * permitted!
474*4882a593Smuzhiyun 		 */
475*4882a593Smuzhiyun 		frag_len = (frag_len + 3) & ~3;
476*4882a593Smuzhiyun 		if (smt)
477*4882a593Smuzhiyun 			fza_moves(ub.mmio_ptr, rmc_tx_ptr, frag_len);
478*4882a593Smuzhiyun 		else
479*4882a593Smuzhiyun 			fza_writes(ub.data_ptr, rmc_tx_ptr, frag_len);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 		if (left_len == 0)
482*4882a593Smuzhiyun 			rmc |= FZA_RING_TX_EOP;		/* Mark last frag. */
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		writel_o(rmc, &fp->ring_rmc_tx[i].rmc);
485*4882a593Smuzhiyun 		writel_o(own, &fp->ring_rmc_tx[i].own);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 		ub.data_ptr++;
488*4882a593Smuzhiyun 		fp->ring_rmc_tx_index = (fp->ring_rmc_tx_index + 1) %
489*4882a593Smuzhiyun 					fp->ring_rmc_tx_size;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 		/* Settings for intermediate frags. */
492*4882a593Smuzhiyun 		own = FZA_RING_TX_OWN_RMC;
493*4882a593Smuzhiyun 		rmc = 0;
494*4882a593Smuzhiyun 	} while (left_len > 0);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
497*4882a593Smuzhiyun 	       fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
498*4882a593Smuzhiyun 	     FZA_TX_BUFFER_SIZE) < dev->mtu + dev->hard_header_len) {
499*4882a593Smuzhiyun 		netif_stop_queue(dev);
500*4882a593Smuzhiyun 		pr_debug("%s: queue stopped\n", fp->name);
501*4882a593Smuzhiyun 	}
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	writel_o(FZA_RING_TX_OWN_RMC, &fp->ring_rmc_tx[first].own);
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	/* Go, go, go! */
506*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_A_TX_POLL, &fp->regs->control_a);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	return 0;
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun 
fza_do_recv_smt(struct fza_buffer_tx * data_ptr,int len,u32 rmc,struct net_device * dev)511*4882a593Smuzhiyun static int fza_do_recv_smt(struct fza_buffer_tx *data_ptr, int len,
512*4882a593Smuzhiyun 			   u32 rmc, struct net_device *dev)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
515*4882a593Smuzhiyun 	struct fza_buffer_tx __iomem *smt_rx_ptr;
516*4882a593Smuzhiyun 	u32 own;
517*4882a593Smuzhiyun 	int i;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	i = fp->ring_smt_rx_index;
520*4882a593Smuzhiyun 	own = readl_o(&fp->ring_smt_rx[i].own);
521*4882a593Smuzhiyun 	if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
522*4882a593Smuzhiyun 		return 1;
523*4882a593Smuzhiyun 
524*4882a593Smuzhiyun 	smt_rx_ptr = fp->mmio + readl_u(&fp->ring_smt_rx[i].buffer);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	/* Length must be a multiple of 4 as only word writes are permitted! */
527*4882a593Smuzhiyun 	fza_writes(data_ptr, smt_rx_ptr, (len + 3) & ~3);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	writel_o(rmc, &fp->ring_smt_rx[i].rmc);
530*4882a593Smuzhiyun 	writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_rx[i].own);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	fp->ring_smt_rx_index =
533*4882a593Smuzhiyun 		(fp->ring_smt_rx_index + 1) % fp->ring_smt_rx_size;
534*4882a593Smuzhiyun 
535*4882a593Smuzhiyun 	/* Grab it! */
536*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_A_SMT_RX_POLL, &fp->regs->control_a);
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	return 0;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun 
fza_tx(struct net_device * dev)541*4882a593Smuzhiyun static void fza_tx(struct net_device *dev)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
544*4882a593Smuzhiyun 	u32 own, rmc;
545*4882a593Smuzhiyun 	int i;
546*4882a593Smuzhiyun 
547*4882a593Smuzhiyun 	while (1) {
548*4882a593Smuzhiyun 		i = fp->ring_rmc_txd_index;
549*4882a593Smuzhiyun 		if (i == fp->ring_rmc_tx_index)
550*4882a593Smuzhiyun 			break;
551*4882a593Smuzhiyun 		own = readl_o(&fp->ring_rmc_tx[i].own);
552*4882a593Smuzhiyun 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC)
553*4882a593Smuzhiyun 			break;
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 		rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
556*4882a593Smuzhiyun 		/* Only process the first descriptor. */
557*4882a593Smuzhiyun 		if ((rmc & FZA_RING_TX_SOP) != 0) {
558*4882a593Smuzhiyun 			if ((rmc & FZA_RING_TX_DCC_MASK) ==
559*4882a593Smuzhiyun 			    FZA_RING_TX_DCC_SUCCESS) {
560*4882a593Smuzhiyun 				int pkt_len = (rmc & FZA_RING_PBC_MASK) - 3;
561*4882a593Smuzhiyun 								/* Omit PRH. */
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 				fp->stats.tx_packets++;
564*4882a593Smuzhiyun 				fp->stats.tx_bytes += pkt_len;
565*4882a593Smuzhiyun 			} else {
566*4882a593Smuzhiyun 				fp->stats.tx_errors++;
567*4882a593Smuzhiyun 				switch (rmc & FZA_RING_TX_DCC_MASK) {
568*4882a593Smuzhiyun 				case FZA_RING_TX_DCC_DTP_SOP:
569*4882a593Smuzhiyun 				case FZA_RING_TX_DCC_DTP:
570*4882a593Smuzhiyun 				case FZA_RING_TX_DCC_ABORT:
571*4882a593Smuzhiyun 					fp->stats.tx_aborted_errors++;
572*4882a593Smuzhiyun 					break;
573*4882a593Smuzhiyun 				case FZA_RING_TX_DCC_UNDRRUN:
574*4882a593Smuzhiyun 					fp->stats.tx_fifo_errors++;
575*4882a593Smuzhiyun 					break;
576*4882a593Smuzhiyun 				case FZA_RING_TX_DCC_PARITY:
577*4882a593Smuzhiyun 				default:
578*4882a593Smuzhiyun 					break;
579*4882a593Smuzhiyun 				}
580*4882a593Smuzhiyun 			}
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		fp->ring_rmc_txd_index = (fp->ring_rmc_txd_index + 1) %
584*4882a593Smuzhiyun 					 fp->ring_rmc_tx_size;
585*4882a593Smuzhiyun 	}
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	if (((((fp->ring_rmc_txd_index - 1 + fp->ring_rmc_tx_size) -
588*4882a593Smuzhiyun 	       fp->ring_rmc_tx_index) % fp->ring_rmc_tx_size) *
589*4882a593Smuzhiyun 	     FZA_TX_BUFFER_SIZE) >= dev->mtu + dev->hard_header_len) {
590*4882a593Smuzhiyun 		if (fp->queue_active) {
591*4882a593Smuzhiyun 			netif_wake_queue(dev);
592*4882a593Smuzhiyun 			pr_debug("%s: queue woken\n", fp->name);
593*4882a593Smuzhiyun 		}
594*4882a593Smuzhiyun 	}
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
fza_rx_err(struct fza_private * fp,const u32 rmc,const u8 fc)597*4882a593Smuzhiyun static inline int fza_rx_err(struct fza_private *fp,
598*4882a593Smuzhiyun 			     const u32 rmc, const u8 fc)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun 	int len, min_len, max_len;
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	len = rmc & FZA_RING_PBC_MASK;
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	if (unlikely((rmc & FZA_RING_RX_BAD) != 0)) {
605*4882a593Smuzhiyun 		fp->stats.rx_errors++;
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 		/* Check special status codes. */
608*4882a593Smuzhiyun 		if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
609*4882a593Smuzhiyun 			    FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
610*4882a593Smuzhiyun 		     (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
611*4882a593Smuzhiyun 		      FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_ALIAS)) {
612*4882a593Smuzhiyun 			if (len >= 8190)
613*4882a593Smuzhiyun 				fp->stats.rx_length_errors++;
614*4882a593Smuzhiyun 			return 1;
615*4882a593Smuzhiyun 		}
616*4882a593Smuzhiyun 		if ((rmc & (FZA_RING_RX_CRC | FZA_RING_RX_RRR_MASK |
617*4882a593Smuzhiyun 			    FZA_RING_RX_DA_MASK | FZA_RING_RX_SA_MASK)) ==
618*4882a593Smuzhiyun 		     (FZA_RING_RX_CRC | FZA_RING_RX_RRR_DADDR |
619*4882a593Smuzhiyun 		      FZA_RING_RX_DA_CAM | FZA_RING_RX_SA_CAM)) {
620*4882a593Smuzhiyun 			/* Halt the interface to trigger a reset. */
621*4882a593Smuzhiyun 			writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
622*4882a593Smuzhiyun 			readw_o(&fp->regs->control_a);	/* Synchronize. */
623*4882a593Smuzhiyun 			return 1;
624*4882a593Smuzhiyun 		}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 		/* Check the MAC status. */
627*4882a593Smuzhiyun 		switch (rmc & FZA_RING_RX_RRR_MASK) {
628*4882a593Smuzhiyun 		case FZA_RING_RX_RRR_OK:
629*4882a593Smuzhiyun 			if ((rmc & FZA_RING_RX_CRC) != 0)
630*4882a593Smuzhiyun 				fp->stats.rx_crc_errors++;
631*4882a593Smuzhiyun 			else if ((rmc & FZA_RING_RX_FSC_MASK) == 0 ||
632*4882a593Smuzhiyun 				 (rmc & FZA_RING_RX_FSB_ERR) != 0)
633*4882a593Smuzhiyun 				fp->stats.rx_frame_errors++;
634*4882a593Smuzhiyun 			return 1;
635*4882a593Smuzhiyun 		case FZA_RING_RX_RRR_SADDR:
636*4882a593Smuzhiyun 		case FZA_RING_RX_RRR_DADDR:
637*4882a593Smuzhiyun 		case FZA_RING_RX_RRR_ABORT:
638*4882a593Smuzhiyun 			/* Halt the interface to trigger a reset. */
639*4882a593Smuzhiyun 			writew_o(FZA_CONTROL_A_HALT, &fp->regs->control_a);
640*4882a593Smuzhiyun 			readw_o(&fp->regs->control_a);	/* Synchronize. */
641*4882a593Smuzhiyun 			return 1;
642*4882a593Smuzhiyun 		case FZA_RING_RX_RRR_LENGTH:
643*4882a593Smuzhiyun 			fp->stats.rx_frame_errors++;
644*4882a593Smuzhiyun 			return 1;
645*4882a593Smuzhiyun 		default:
646*4882a593Smuzhiyun 			return 1;
647*4882a593Smuzhiyun 		}
648*4882a593Smuzhiyun 	}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 	/* Packet received successfully; validate the length. */
651*4882a593Smuzhiyun 	switch (fc & FDDI_FC_K_FORMAT_MASK) {
652*4882a593Smuzhiyun 	case FDDI_FC_K_FORMAT_MANAGEMENT:
653*4882a593Smuzhiyun 		if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_ASYNC)
654*4882a593Smuzhiyun 			min_len = 37;
655*4882a593Smuzhiyun 		else
656*4882a593Smuzhiyun 			min_len = 17;
657*4882a593Smuzhiyun 		break;
658*4882a593Smuzhiyun 	case FDDI_FC_K_FORMAT_LLC:
659*4882a593Smuzhiyun 		min_len = 20;
660*4882a593Smuzhiyun 		break;
661*4882a593Smuzhiyun 	default:
662*4882a593Smuzhiyun 		min_len = 17;
663*4882a593Smuzhiyun 		break;
664*4882a593Smuzhiyun 	}
665*4882a593Smuzhiyun 	max_len = 4495;
666*4882a593Smuzhiyun 	if (len < min_len || len > max_len) {
667*4882a593Smuzhiyun 		fp->stats.rx_errors++;
668*4882a593Smuzhiyun 		fp->stats.rx_length_errors++;
669*4882a593Smuzhiyun 		return 1;
670*4882a593Smuzhiyun 	}
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	return 0;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun 
fza_rx(struct net_device * dev)675*4882a593Smuzhiyun static void fza_rx(struct net_device *dev)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
678*4882a593Smuzhiyun 	struct sk_buff *skb, *newskb;
679*4882a593Smuzhiyun 	struct fza_fddihdr *frame;
680*4882a593Smuzhiyun 	dma_addr_t dma, newdma;
681*4882a593Smuzhiyun 	u32 own, rmc, buf;
682*4882a593Smuzhiyun 	int i, len;
683*4882a593Smuzhiyun 	u8 fc;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	while (1) {
686*4882a593Smuzhiyun 		i = fp->ring_hst_rx_index;
687*4882a593Smuzhiyun 		own = readl_o(&fp->ring_hst_rx[i].buf0_own);
688*4882a593Smuzhiyun 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
689*4882a593Smuzhiyun 			break;
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun 		rmc = readl_u(&fp->ring_hst_rx[i].rmc);
692*4882a593Smuzhiyun 		skb = fp->rx_skbuff[i];
693*4882a593Smuzhiyun 		dma = fp->rx_dma[i];
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 		/* The RMC doesn't count the preamble and the starting
696*4882a593Smuzhiyun 		 * delimiter.  We fix it up here for a total of 3 octets.
697*4882a593Smuzhiyun 		 */
698*4882a593Smuzhiyun 		dma_rmb();
699*4882a593Smuzhiyun 		len = (rmc & FZA_RING_PBC_MASK) + 3;
700*4882a593Smuzhiyun 		frame = (struct fza_fddihdr *)skb->data;
701*4882a593Smuzhiyun 
702*4882a593Smuzhiyun 		/* We need to get at real FC. */
703*4882a593Smuzhiyun 		dma_sync_single_for_cpu(fp->bdev,
704*4882a593Smuzhiyun 					dma +
705*4882a593Smuzhiyun 					((u8 *)&frame->hdr.fc - (u8 *)frame),
706*4882a593Smuzhiyun 					sizeof(frame->hdr.fc),
707*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
708*4882a593Smuzhiyun 		fc = frame->hdr.fc;
709*4882a593Smuzhiyun 
710*4882a593Smuzhiyun 		if (fza_rx_err(fp, rmc, fc))
711*4882a593Smuzhiyun 			goto err_rx;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 		/* We have to 512-byte-align RX buffers... */
714*4882a593Smuzhiyun 		newskb = fza_alloc_skb_irq(dev, FZA_RX_BUFFER_SIZE + 511);
715*4882a593Smuzhiyun 		if (newskb) {
716*4882a593Smuzhiyun 			fza_skb_align(newskb, 512);
717*4882a593Smuzhiyun 			newdma = dma_map_single(fp->bdev, newskb->data,
718*4882a593Smuzhiyun 						FZA_RX_BUFFER_SIZE,
719*4882a593Smuzhiyun 						DMA_FROM_DEVICE);
720*4882a593Smuzhiyun 			if (dma_mapping_error(fp->bdev, newdma)) {
721*4882a593Smuzhiyun 				dev_kfree_skb_irq(newskb);
722*4882a593Smuzhiyun 				newskb = NULL;
723*4882a593Smuzhiyun 			}
724*4882a593Smuzhiyun 		}
725*4882a593Smuzhiyun 		if (newskb) {
726*4882a593Smuzhiyun 			int pkt_len = len - 7;	/* Omit P, SD and FCS. */
727*4882a593Smuzhiyun 			int is_multi;
728*4882a593Smuzhiyun 			int rx_stat;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 			dma_unmap_single(fp->bdev, dma, FZA_RX_BUFFER_SIZE,
731*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
732*4882a593Smuzhiyun 
733*4882a593Smuzhiyun 			/* Queue SMT frames to the SMT receive ring. */
734*4882a593Smuzhiyun 			if ((fc & (FDDI_FC_K_CLASS_MASK |
735*4882a593Smuzhiyun 				   FDDI_FC_K_FORMAT_MASK)) ==
736*4882a593Smuzhiyun 			     (FDDI_FC_K_CLASS_ASYNC |
737*4882a593Smuzhiyun 			      FDDI_FC_K_FORMAT_MANAGEMENT) &&
738*4882a593Smuzhiyun 			    (rmc & FZA_RING_RX_DA_MASK) !=
739*4882a593Smuzhiyun 			     FZA_RING_RX_DA_PROM) {
740*4882a593Smuzhiyun 				if (fza_do_recv_smt((struct fza_buffer_tx *)
741*4882a593Smuzhiyun 						    skb->data, len, rmc,
742*4882a593Smuzhiyun 						    dev)) {
743*4882a593Smuzhiyun 					writel_o(FZA_CONTROL_A_SMT_RX_OVFL,
744*4882a593Smuzhiyun 						 &fp->regs->control_a);
745*4882a593Smuzhiyun 				}
746*4882a593Smuzhiyun 			}
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 			is_multi = ((frame->hdr.daddr[0] & 0x01) != 0);
749*4882a593Smuzhiyun 
750*4882a593Smuzhiyun 			skb_reserve(skb, 3);	/* Skip over P and SD. */
751*4882a593Smuzhiyun 			skb_put(skb, pkt_len);	/* And cut off FCS. */
752*4882a593Smuzhiyun 			skb->protocol = fddi_type_trans(skb, dev);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 			rx_stat = netif_rx(skb);
755*4882a593Smuzhiyun 			if (rx_stat != NET_RX_DROP) {
756*4882a593Smuzhiyun 				fp->stats.rx_packets++;
757*4882a593Smuzhiyun 				fp->stats.rx_bytes += pkt_len;
758*4882a593Smuzhiyun 				if (is_multi)
759*4882a593Smuzhiyun 					fp->stats.multicast++;
760*4882a593Smuzhiyun 			} else {
761*4882a593Smuzhiyun 				fp->stats.rx_dropped++;
762*4882a593Smuzhiyun 			}
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 			skb = newskb;
765*4882a593Smuzhiyun 			dma = newdma;
766*4882a593Smuzhiyun 			fp->rx_skbuff[i] = skb;
767*4882a593Smuzhiyun 			fp->rx_dma[i] = dma;
768*4882a593Smuzhiyun 		} else {
769*4882a593Smuzhiyun 			fp->stats.rx_dropped++;
770*4882a593Smuzhiyun 			pr_notice("%s: memory squeeze, dropping packet\n",
771*4882a593Smuzhiyun 				  fp->name);
772*4882a593Smuzhiyun 		}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun err_rx:
775*4882a593Smuzhiyun 		writel_o(0, &fp->ring_hst_rx[i].rmc);
776*4882a593Smuzhiyun 		buf = (dma + 0x1000) >> 9;
777*4882a593Smuzhiyun 		writel_o(buf, &fp->ring_hst_rx[i].buffer1);
778*4882a593Smuzhiyun 		buf = dma >> 9 | FZA_RING_OWN_FZA;
779*4882a593Smuzhiyun 		writel_o(buf, &fp->ring_hst_rx[i].buf0_own);
780*4882a593Smuzhiyun 		fp->ring_hst_rx_index =
781*4882a593Smuzhiyun 			(fp->ring_hst_rx_index + 1) % fp->ring_hst_rx_size;
782*4882a593Smuzhiyun 	}
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
fza_tx_smt(struct net_device * dev)785*4882a593Smuzhiyun static void fza_tx_smt(struct net_device *dev)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
788*4882a593Smuzhiyun 	struct fza_buffer_tx __iomem *smt_tx_ptr;
789*4882a593Smuzhiyun 	int i, len;
790*4882a593Smuzhiyun 	u32 own;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	while (1) {
793*4882a593Smuzhiyun 		i = fp->ring_smt_tx_index;
794*4882a593Smuzhiyun 		own = readl_o(&fp->ring_smt_tx[i].own);
795*4882a593Smuzhiyun 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
796*4882a593Smuzhiyun 			break;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		smt_tx_ptr = fp->mmio + readl_u(&fp->ring_smt_tx[i].buffer);
799*4882a593Smuzhiyun 		len = readl_u(&fp->ring_smt_tx[i].rmc) & FZA_RING_PBC_MASK;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 		if (!netif_queue_stopped(dev)) {
802*4882a593Smuzhiyun 			if (dev_nit_active(dev)) {
803*4882a593Smuzhiyun 				struct fza_buffer_tx *skb_data_ptr;
804*4882a593Smuzhiyun 				struct sk_buff *skb;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 				/* Length must be a multiple of 4 as only word
807*4882a593Smuzhiyun 				 * reads are permitted!
808*4882a593Smuzhiyun 				 */
809*4882a593Smuzhiyun 				skb = fza_alloc_skb_irq(dev, (len + 3) & ~3);
810*4882a593Smuzhiyun 				if (!skb)
811*4882a593Smuzhiyun 					goto err_no_skb;	/* Drop. */
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun 				skb_data_ptr = (struct fza_buffer_tx *)
814*4882a593Smuzhiyun 					       skb->data;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 				fza_reads(smt_tx_ptr, skb_data_ptr,
817*4882a593Smuzhiyun 					  (len + 3) & ~3);
818*4882a593Smuzhiyun 				skb->dev = dev;
819*4882a593Smuzhiyun 				skb_reserve(skb, 3);	/* Skip over PRH. */
820*4882a593Smuzhiyun 				skb_put(skb, len - 3);
821*4882a593Smuzhiyun 				skb_reset_network_header(skb);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 				dev_queue_xmit_nit(skb, dev);
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 				dev_kfree_skb_irq(skb);
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun err_no_skb:
828*4882a593Smuzhiyun 				;
829*4882a593Smuzhiyun 			}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 			/* Queue the frame to the RMC transmit ring. */
832*4882a593Smuzhiyun 			fza_do_xmit((union fza_buffer_txp)
833*4882a593Smuzhiyun 				    { .mmio_ptr = smt_tx_ptr },
834*4882a593Smuzhiyun 				    len, dev, 1);
835*4882a593Smuzhiyun 		}
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 		writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
838*4882a593Smuzhiyun 		fp->ring_smt_tx_index =
839*4882a593Smuzhiyun 			(fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
840*4882a593Smuzhiyun 	}
841*4882a593Smuzhiyun }
842*4882a593Smuzhiyun 
fza_uns(struct net_device * dev)843*4882a593Smuzhiyun static void fza_uns(struct net_device *dev)
844*4882a593Smuzhiyun {
845*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
846*4882a593Smuzhiyun 	u32 own;
847*4882a593Smuzhiyun 	int i;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	while (1) {
850*4882a593Smuzhiyun 		i = fp->ring_uns_index;
851*4882a593Smuzhiyun 		own = readl_o(&fp->ring_uns[i].own);
852*4882a593Smuzhiyun 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_OWN_FZA)
853*4882a593Smuzhiyun 			break;
854*4882a593Smuzhiyun 
855*4882a593Smuzhiyun 		if (readl_u(&fp->ring_uns[i].id) == FZA_RING_UNS_RX_OVER) {
856*4882a593Smuzhiyun 			fp->stats.rx_errors++;
857*4882a593Smuzhiyun 			fp->stats.rx_over_errors++;
858*4882a593Smuzhiyun 		}
859*4882a593Smuzhiyun 
860*4882a593Smuzhiyun 		writel_o(FZA_RING_OWN_FZA, &fp->ring_uns[i].own);
861*4882a593Smuzhiyun 		fp->ring_uns_index =
862*4882a593Smuzhiyun 			(fp->ring_uns_index + 1) % FZA_RING_UNS_SIZE;
863*4882a593Smuzhiyun 	}
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun 
fza_tx_flush(struct net_device * dev)866*4882a593Smuzhiyun static void fza_tx_flush(struct net_device *dev)
867*4882a593Smuzhiyun {
868*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
869*4882a593Smuzhiyun 	u32 own;
870*4882a593Smuzhiyun 	int i;
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 	/* Clean up the SMT TX ring. */
873*4882a593Smuzhiyun 	i = fp->ring_smt_tx_index;
874*4882a593Smuzhiyun 	do {
875*4882a593Smuzhiyun 		writel_o(FZA_RING_OWN_FZA, &fp->ring_smt_tx[i].own);
876*4882a593Smuzhiyun 		fp->ring_smt_tx_index =
877*4882a593Smuzhiyun 			(fp->ring_smt_tx_index + 1) % fp->ring_smt_tx_size;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	} while (i != fp->ring_smt_tx_index);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun 	/* Clean up the RMC TX ring. */
882*4882a593Smuzhiyun 	i = fp->ring_rmc_tx_index;
883*4882a593Smuzhiyun 	do {
884*4882a593Smuzhiyun 		own = readl_o(&fp->ring_rmc_tx[i].own);
885*4882a593Smuzhiyun 		if ((own & FZA_RING_OWN_MASK) == FZA_RING_TX_OWN_RMC) {
886*4882a593Smuzhiyun 			u32 rmc = readl_u(&fp->ring_rmc_tx[i].rmc);
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 			writel_u(rmc | FZA_RING_TX_DTP,
889*4882a593Smuzhiyun 				 &fp->ring_rmc_tx[i].rmc);
890*4882a593Smuzhiyun 		}
891*4882a593Smuzhiyun 		fp->ring_rmc_tx_index =
892*4882a593Smuzhiyun 			(fp->ring_rmc_tx_index + 1) % fp->ring_rmc_tx_size;
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	} while (i != fp->ring_rmc_tx_index);
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	/* Done. */
897*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_A_FLUSH_DONE, &fp->regs->control_a);
898*4882a593Smuzhiyun }
899*4882a593Smuzhiyun 
fza_interrupt(int irq,void * dev_id)900*4882a593Smuzhiyun static irqreturn_t fza_interrupt(int irq, void *dev_id)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	struct net_device *dev = dev_id;
903*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
904*4882a593Smuzhiyun 	uint int_event;
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	/* Get interrupt events. */
907*4882a593Smuzhiyun 	int_event = readw_o(&fp->regs->int_event) & fp->int_mask;
908*4882a593Smuzhiyun 	if (int_event == 0)
909*4882a593Smuzhiyun 		return IRQ_NONE;
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	/* Clear the events. */
912*4882a593Smuzhiyun 	writew_u(int_event, &fp->regs->int_event);
913*4882a593Smuzhiyun 
914*4882a593Smuzhiyun 	/* Now handle the events.  The order matters. */
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* Command finished interrupt. */
917*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_CMD_DONE) != 0) {
918*4882a593Smuzhiyun 		fp->irq_count_cmd_done++;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 		spin_lock(&fp->lock);
921*4882a593Smuzhiyun 		fp->cmd_done_flag = 1;
922*4882a593Smuzhiyun 		wake_up(&fp->cmd_done_wait);
923*4882a593Smuzhiyun 		spin_unlock(&fp->lock);
924*4882a593Smuzhiyun 	}
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	/* Transmit finished interrupt. */
927*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_TX_DONE) != 0) {
928*4882a593Smuzhiyun 		fp->irq_count_tx_done++;
929*4882a593Smuzhiyun 		fza_tx(dev);
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	/* Host receive interrupt. */
933*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_RX_POLL) != 0) {
934*4882a593Smuzhiyun 		fp->irq_count_rx_poll++;
935*4882a593Smuzhiyun 		fza_rx(dev);
936*4882a593Smuzhiyun 	}
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	/* SMT transmit interrupt. */
939*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_SMT_TX_POLL) != 0) {
940*4882a593Smuzhiyun 		fp->irq_count_smt_tx_poll++;
941*4882a593Smuzhiyun 		fza_tx_smt(dev);
942*4882a593Smuzhiyun 	}
943*4882a593Smuzhiyun 
944*4882a593Smuzhiyun 	/* Transmit ring flush request. */
945*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_FLUSH_TX) != 0) {
946*4882a593Smuzhiyun 		fp->irq_count_flush_tx++;
947*4882a593Smuzhiyun 		fza_tx_flush(dev);
948*4882a593Smuzhiyun 	}
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	/* Link status change interrupt. */
951*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_LINK_ST_CHG) != 0) {
952*4882a593Smuzhiyun 		uint status;
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun 		fp->irq_count_link_st_chg++;
955*4882a593Smuzhiyun 		status = readw_u(&fp->regs->status);
956*4882a593Smuzhiyun 		if (FZA_STATUS_GET_LINK(status) == FZA_LINK_ON) {
957*4882a593Smuzhiyun 			netif_carrier_on(dev);
958*4882a593Smuzhiyun 			pr_info("%s: link available\n", fp->name);
959*4882a593Smuzhiyun 		} else {
960*4882a593Smuzhiyun 			netif_carrier_off(dev);
961*4882a593Smuzhiyun 			pr_info("%s: link unavailable\n", fp->name);
962*4882a593Smuzhiyun 		}
963*4882a593Smuzhiyun 	}
964*4882a593Smuzhiyun 
965*4882a593Smuzhiyun 	/* Unsolicited event interrupt. */
966*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_UNS_POLL) != 0) {
967*4882a593Smuzhiyun 		fp->irq_count_uns_poll++;
968*4882a593Smuzhiyun 		fza_uns(dev);
969*4882a593Smuzhiyun 	}
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/* State change interrupt. */
972*4882a593Smuzhiyun 	if ((int_event & FZA_EVENT_STATE_CHG) != 0) {
973*4882a593Smuzhiyun 		uint status, state;
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 		fp->irq_count_state_chg++;
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun 		status = readw_u(&fp->regs->status);
978*4882a593Smuzhiyun 		state = FZA_STATUS_GET_STATE(status);
979*4882a593Smuzhiyun 		pr_debug("%s: state change: %x\n", fp->name, state);
980*4882a593Smuzhiyun 		switch (state) {
981*4882a593Smuzhiyun 		case FZA_STATE_RESET:
982*4882a593Smuzhiyun 			break;
983*4882a593Smuzhiyun 
984*4882a593Smuzhiyun 		case FZA_STATE_UNINITIALIZED:
985*4882a593Smuzhiyun 			netif_carrier_off(dev);
986*4882a593Smuzhiyun 			del_timer_sync(&fp->reset_timer);
987*4882a593Smuzhiyun 			fp->ring_cmd_index = 0;
988*4882a593Smuzhiyun 			fp->ring_uns_index = 0;
989*4882a593Smuzhiyun 			fp->ring_rmc_tx_index = 0;
990*4882a593Smuzhiyun 			fp->ring_rmc_txd_index = 0;
991*4882a593Smuzhiyun 			fp->ring_hst_rx_index = 0;
992*4882a593Smuzhiyun 			fp->ring_smt_tx_index = 0;
993*4882a593Smuzhiyun 			fp->ring_smt_rx_index = 0;
994*4882a593Smuzhiyun 			if (fp->state > state) {
995*4882a593Smuzhiyun 				pr_info("%s: OK\n", fp->name);
996*4882a593Smuzhiyun 				fza_cmd_send(dev, FZA_RING_CMD_INIT);
997*4882a593Smuzhiyun 			}
998*4882a593Smuzhiyun 			break;
999*4882a593Smuzhiyun 
1000*4882a593Smuzhiyun 		case FZA_STATE_INITIALIZED:
1001*4882a593Smuzhiyun 			if (fp->state > state) {
1002*4882a593Smuzhiyun 				fza_set_rx_mode(dev);
1003*4882a593Smuzhiyun 				fza_cmd_send(dev, FZA_RING_CMD_PARAM);
1004*4882a593Smuzhiyun 			}
1005*4882a593Smuzhiyun 			break;
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun 		case FZA_STATE_RUNNING:
1008*4882a593Smuzhiyun 		case FZA_STATE_MAINTENANCE:
1009*4882a593Smuzhiyun 			fp->state = state;
1010*4882a593Smuzhiyun 			fza_rx_init(fp);
1011*4882a593Smuzhiyun 			fp->queue_active = 1;
1012*4882a593Smuzhiyun 			netif_wake_queue(dev);
1013*4882a593Smuzhiyun 			pr_debug("%s: queue woken\n", fp->name);
1014*4882a593Smuzhiyun 			break;
1015*4882a593Smuzhiyun 
1016*4882a593Smuzhiyun 		case FZA_STATE_HALTED:
1017*4882a593Smuzhiyun 			fp->queue_active = 0;
1018*4882a593Smuzhiyun 			netif_stop_queue(dev);
1019*4882a593Smuzhiyun 			pr_debug("%s: queue stopped\n", fp->name);
1020*4882a593Smuzhiyun 			del_timer_sync(&fp->reset_timer);
1021*4882a593Smuzhiyun 			pr_warn("%s: halted, reason: %x\n", fp->name,
1022*4882a593Smuzhiyun 				FZA_STATUS_GET_HALT(status));
1023*4882a593Smuzhiyun 			fza_regs_dump(fp);
1024*4882a593Smuzhiyun 			pr_info("%s: resetting the board...\n", fp->name);
1025*4882a593Smuzhiyun 			fza_do_reset(fp);
1026*4882a593Smuzhiyun 			fp->timer_state = 0;
1027*4882a593Smuzhiyun 			fp->reset_timer.expires = jiffies + 45 * HZ;
1028*4882a593Smuzhiyun 			add_timer(&fp->reset_timer);
1029*4882a593Smuzhiyun 			break;
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 		default:
1032*4882a593Smuzhiyun 			pr_warn("%s: undefined state: %x\n", fp->name, state);
1033*4882a593Smuzhiyun 			break;
1034*4882a593Smuzhiyun 		}
1035*4882a593Smuzhiyun 
1036*4882a593Smuzhiyun 		spin_lock(&fp->lock);
1037*4882a593Smuzhiyun 		fp->state_chg_flag = 1;
1038*4882a593Smuzhiyun 		wake_up(&fp->state_chg_wait);
1039*4882a593Smuzhiyun 		spin_unlock(&fp->lock);
1040*4882a593Smuzhiyun 	}
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	return IRQ_HANDLED;
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun 
fza_reset_timer(struct timer_list * t)1045*4882a593Smuzhiyun static void fza_reset_timer(struct timer_list *t)
1046*4882a593Smuzhiyun {
1047*4882a593Smuzhiyun 	struct fza_private *fp = from_timer(fp, t, reset_timer);
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	if (!fp->timer_state) {
1050*4882a593Smuzhiyun 		pr_err("%s: RESET timed out!\n", fp->name);
1051*4882a593Smuzhiyun 		pr_info("%s: trying harder...\n", fp->name);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 		/* Assert the board reset. */
1054*4882a593Smuzhiyun 		writew_o(FZA_RESET_INIT, &fp->regs->reset);
1055*4882a593Smuzhiyun 		readw_o(&fp->regs->reset);		/* Synchronize. */
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 		fp->timer_state = 1;
1058*4882a593Smuzhiyun 		fp->reset_timer.expires = jiffies + HZ;
1059*4882a593Smuzhiyun 	} else {
1060*4882a593Smuzhiyun 		/* Clear the board reset. */
1061*4882a593Smuzhiyun 		writew_u(FZA_RESET_CLR, &fp->regs->reset);
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 		/* Enable all interrupt events we handle. */
1064*4882a593Smuzhiyun 		writew_o(fp->int_mask, &fp->regs->int_mask);
1065*4882a593Smuzhiyun 		readw_o(&fp->regs->int_mask);		/* Synchronize. */
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 		fp->timer_state = 0;
1068*4882a593Smuzhiyun 		fp->reset_timer.expires = jiffies + 45 * HZ;
1069*4882a593Smuzhiyun 	}
1070*4882a593Smuzhiyun 	add_timer(&fp->reset_timer);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun 
fza_set_mac_address(struct net_device * dev,void * addr)1073*4882a593Smuzhiyun static int fza_set_mac_address(struct net_device *dev, void *addr)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun 	return -EOPNOTSUPP;
1076*4882a593Smuzhiyun }
1077*4882a593Smuzhiyun 
fza_start_xmit(struct sk_buff * skb,struct net_device * dev)1078*4882a593Smuzhiyun static netdev_tx_t fza_start_xmit(struct sk_buff *skb, struct net_device *dev)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
1081*4882a593Smuzhiyun 	unsigned int old_mask, new_mask;
1082*4882a593Smuzhiyun 	int ret;
1083*4882a593Smuzhiyun 	u8 fc;
1084*4882a593Smuzhiyun 
1085*4882a593Smuzhiyun 	skb_push(skb, 3);			/* Make room for PRH. */
1086*4882a593Smuzhiyun 
1087*4882a593Smuzhiyun 	/* Decode FC to set PRH. */
1088*4882a593Smuzhiyun 	fc = skb->data[3];
1089*4882a593Smuzhiyun 	skb->data[0] = 0;
1090*4882a593Smuzhiyun 	skb->data[1] = 0;
1091*4882a593Smuzhiyun 	skb->data[2] = FZA_PRH2_NORMAL;
1092*4882a593Smuzhiyun 	if ((fc & FDDI_FC_K_CLASS_MASK) == FDDI_FC_K_CLASS_SYNC)
1093*4882a593Smuzhiyun 		skb->data[0] |= FZA_PRH0_FRAME_SYNC;
1094*4882a593Smuzhiyun 	switch (fc & FDDI_FC_K_FORMAT_MASK) {
1095*4882a593Smuzhiyun 	case FDDI_FC_K_FORMAT_MANAGEMENT:
1096*4882a593Smuzhiyun 		if ((fc & FDDI_FC_K_CONTROL_MASK) == 0) {
1097*4882a593Smuzhiyun 			/* Token. */
1098*4882a593Smuzhiyun 			skb->data[0] |= FZA_PRH0_TKN_TYPE_IMM;
1099*4882a593Smuzhiyun 			skb->data[1] |= FZA_PRH1_TKN_SEND_NONE;
1100*4882a593Smuzhiyun 		} else {
1101*4882a593Smuzhiyun 			/* SMT or MAC. */
1102*4882a593Smuzhiyun 			skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
1103*4882a593Smuzhiyun 			skb->data[1] |= FZA_PRH1_TKN_SEND_UNR;
1104*4882a593Smuzhiyun 		}
1105*4882a593Smuzhiyun 		skb->data[1] |= FZA_PRH1_CRC_NORMAL;
1106*4882a593Smuzhiyun 		break;
1107*4882a593Smuzhiyun 	case FDDI_FC_K_FORMAT_LLC:
1108*4882a593Smuzhiyun 	case FDDI_FC_K_FORMAT_FUTURE:
1109*4882a593Smuzhiyun 		skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
1110*4882a593Smuzhiyun 		skb->data[1] |= FZA_PRH1_CRC_NORMAL | FZA_PRH1_TKN_SEND_UNR;
1111*4882a593Smuzhiyun 		break;
1112*4882a593Smuzhiyun 	case FDDI_FC_K_FORMAT_IMPLEMENTOR:
1113*4882a593Smuzhiyun 		skb->data[0] |= FZA_PRH0_TKN_TYPE_UNR;
1114*4882a593Smuzhiyun 		skb->data[1] |= FZA_PRH1_TKN_SEND_ORIG;
1115*4882a593Smuzhiyun 		break;
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	/* SMT transmit interrupts may sneak frames into the RMC
1119*4882a593Smuzhiyun 	 * transmit ring.  We disable them while queueing a frame
1120*4882a593Smuzhiyun 	 * to maintain consistency.
1121*4882a593Smuzhiyun 	 */
1122*4882a593Smuzhiyun 	old_mask = fp->int_mask;
1123*4882a593Smuzhiyun 	new_mask = old_mask & ~FZA_MASK_SMT_TX_POLL;
1124*4882a593Smuzhiyun 	writew_u(new_mask, &fp->regs->int_mask);
1125*4882a593Smuzhiyun 	readw_o(&fp->regs->int_mask);			/* Synchronize. */
1126*4882a593Smuzhiyun 	fp->int_mask = new_mask;
1127*4882a593Smuzhiyun 	ret = fza_do_xmit((union fza_buffer_txp)
1128*4882a593Smuzhiyun 			  { .data_ptr = (struct fza_buffer_tx *)skb->data },
1129*4882a593Smuzhiyun 			  skb->len, dev, 0);
1130*4882a593Smuzhiyun 	fp->int_mask = old_mask;
1131*4882a593Smuzhiyun 	writew_u(fp->int_mask, &fp->regs->int_mask);
1132*4882a593Smuzhiyun 
1133*4882a593Smuzhiyun 	if (ret) {
1134*4882a593Smuzhiyun 		/* Probably an SMT packet filled the remaining space,
1135*4882a593Smuzhiyun 		 * so just stop the queue, but don't report it as an error.
1136*4882a593Smuzhiyun 		 */
1137*4882a593Smuzhiyun 		netif_stop_queue(dev);
1138*4882a593Smuzhiyun 		pr_debug("%s: queue stopped\n", fp->name);
1139*4882a593Smuzhiyun 		fp->stats.tx_dropped++;
1140*4882a593Smuzhiyun 	}
1141*4882a593Smuzhiyun 
1142*4882a593Smuzhiyun 	dev_kfree_skb(skb);
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	return ret;
1145*4882a593Smuzhiyun }
1146*4882a593Smuzhiyun 
fza_open(struct net_device * dev)1147*4882a593Smuzhiyun static int fza_open(struct net_device *dev)
1148*4882a593Smuzhiyun {
1149*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
1150*4882a593Smuzhiyun 	struct fza_ring_cmd __iomem *ring;
1151*4882a593Smuzhiyun 	struct sk_buff *skb;
1152*4882a593Smuzhiyun 	unsigned long flags;
1153*4882a593Smuzhiyun 	dma_addr_t dma;
1154*4882a593Smuzhiyun 	int ret, i;
1155*4882a593Smuzhiyun 	u32 stat;
1156*4882a593Smuzhiyun 	long t;
1157*4882a593Smuzhiyun 
1158*4882a593Smuzhiyun 	for (i = 0; i < FZA_RING_RX_SIZE; i++) {
1159*4882a593Smuzhiyun 		/* We have to 512-byte-align RX buffers... */
1160*4882a593Smuzhiyun 		skb = fza_alloc_skb(dev, FZA_RX_BUFFER_SIZE + 511);
1161*4882a593Smuzhiyun 		if (skb) {
1162*4882a593Smuzhiyun 			fza_skb_align(skb, 512);
1163*4882a593Smuzhiyun 			dma = dma_map_single(fp->bdev, skb->data,
1164*4882a593Smuzhiyun 					     FZA_RX_BUFFER_SIZE,
1165*4882a593Smuzhiyun 					     DMA_FROM_DEVICE);
1166*4882a593Smuzhiyun 			if (dma_mapping_error(fp->bdev, dma)) {
1167*4882a593Smuzhiyun 				dev_kfree_skb(skb);
1168*4882a593Smuzhiyun 				skb = NULL;
1169*4882a593Smuzhiyun 			}
1170*4882a593Smuzhiyun 		}
1171*4882a593Smuzhiyun 		if (!skb) {
1172*4882a593Smuzhiyun 			for (--i; i >= 0; i--) {
1173*4882a593Smuzhiyun 				dma_unmap_single(fp->bdev, fp->rx_dma[i],
1174*4882a593Smuzhiyun 						 FZA_RX_BUFFER_SIZE,
1175*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
1176*4882a593Smuzhiyun 				dev_kfree_skb(fp->rx_skbuff[i]);
1177*4882a593Smuzhiyun 				fp->rx_dma[i] = 0;
1178*4882a593Smuzhiyun 				fp->rx_skbuff[i] = NULL;
1179*4882a593Smuzhiyun 			}
1180*4882a593Smuzhiyun 			return -ENOMEM;
1181*4882a593Smuzhiyun 		}
1182*4882a593Smuzhiyun 		fp->rx_skbuff[i] = skb;
1183*4882a593Smuzhiyun 		fp->rx_dma[i] = dma;
1184*4882a593Smuzhiyun 	}
1185*4882a593Smuzhiyun 
1186*4882a593Smuzhiyun 	ret = fza_init_send(dev, NULL);
1187*4882a593Smuzhiyun 	if (ret != 0)
1188*4882a593Smuzhiyun 		return ret;
1189*4882a593Smuzhiyun 
1190*4882a593Smuzhiyun 	/* Purger and Beacon multicasts need to be supplied before PARAM. */
1191*4882a593Smuzhiyun 	fza_set_rx_mode(dev);
1192*4882a593Smuzhiyun 
1193*4882a593Smuzhiyun 	spin_lock_irqsave(&fp->lock, flags);
1194*4882a593Smuzhiyun 	fp->cmd_done_flag = 0;
1195*4882a593Smuzhiyun 	ring = fza_cmd_send(dev, FZA_RING_CMD_PARAM);
1196*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fp->lock, flags);
1197*4882a593Smuzhiyun 	if (!ring)
1198*4882a593Smuzhiyun 		return -ENOBUFS;
1199*4882a593Smuzhiyun 
1200*4882a593Smuzhiyun 	t = wait_event_timeout(fp->cmd_done_wait, fp->cmd_done_flag, 3 * HZ);
1201*4882a593Smuzhiyun 	if (fp->cmd_done_flag == 0) {
1202*4882a593Smuzhiyun 		pr_err("%s: PARAM command timed out!, state %x\n", fp->name,
1203*4882a593Smuzhiyun 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
1204*4882a593Smuzhiyun 		return -EIO;
1205*4882a593Smuzhiyun 	}
1206*4882a593Smuzhiyun 	stat = readl_u(&ring->stat);
1207*4882a593Smuzhiyun 	if (stat != FZA_RING_STAT_SUCCESS) {
1208*4882a593Smuzhiyun 		pr_err("%s: PARAM command failed!, status %02x, state %x\n",
1209*4882a593Smuzhiyun 		       fp->name, stat,
1210*4882a593Smuzhiyun 		       FZA_STATUS_GET_STATE(readw_u(&fp->regs->status)));
1211*4882a593Smuzhiyun 		return -EIO;
1212*4882a593Smuzhiyun 	}
1213*4882a593Smuzhiyun 	pr_debug("%s: PARAM: %lums elapsed\n", fp->name,
1214*4882a593Smuzhiyun 		 (3 * HZ - t) * 1000 / HZ);
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun 	return 0;
1217*4882a593Smuzhiyun }
1218*4882a593Smuzhiyun 
fza_close(struct net_device * dev)1219*4882a593Smuzhiyun static int fza_close(struct net_device *dev)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
1222*4882a593Smuzhiyun 	unsigned long flags;
1223*4882a593Smuzhiyun 	uint state;
1224*4882a593Smuzhiyun 	long t;
1225*4882a593Smuzhiyun 	int i;
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun 	netif_stop_queue(dev);
1228*4882a593Smuzhiyun 	pr_debug("%s: queue stopped\n", fp->name);
1229*4882a593Smuzhiyun 
1230*4882a593Smuzhiyun 	del_timer_sync(&fp->reset_timer);
1231*4882a593Smuzhiyun 	spin_lock_irqsave(&fp->lock, flags);
1232*4882a593Smuzhiyun 	fp->state = FZA_STATE_UNINITIALIZED;
1233*4882a593Smuzhiyun 	fp->state_chg_flag = 0;
1234*4882a593Smuzhiyun 	/* Shut the interface down. */
1235*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_A_SHUT, &fp->regs->control_a);
1236*4882a593Smuzhiyun 	readw_o(&fp->regs->control_a);			/* Synchronize. */
1237*4882a593Smuzhiyun 	spin_unlock_irqrestore(&fp->lock, flags);
1238*4882a593Smuzhiyun 
1239*4882a593Smuzhiyun 	/* DEC says SHUT needs up to 10 seconds to complete. */
1240*4882a593Smuzhiyun 	t = wait_event_timeout(fp->state_chg_wait, fp->state_chg_flag,
1241*4882a593Smuzhiyun 			       15 * HZ);
1242*4882a593Smuzhiyun 	state = FZA_STATUS_GET_STATE(readw_o(&fp->regs->status));
1243*4882a593Smuzhiyun 	if (fp->state_chg_flag == 0) {
1244*4882a593Smuzhiyun 		pr_err("%s: SHUT timed out!, state %x\n", fp->name, state);
1245*4882a593Smuzhiyun 		return -EIO;
1246*4882a593Smuzhiyun 	}
1247*4882a593Smuzhiyun 	if (state != FZA_STATE_UNINITIALIZED) {
1248*4882a593Smuzhiyun 		pr_err("%s: SHUT failed!, state %x\n", fp->name, state);
1249*4882a593Smuzhiyun 		return -EIO;
1250*4882a593Smuzhiyun 	}
1251*4882a593Smuzhiyun 	pr_debug("%s: SHUT: %lums elapsed\n", fp->name,
1252*4882a593Smuzhiyun 		 (15 * HZ - t) * 1000 / HZ);
1253*4882a593Smuzhiyun 
1254*4882a593Smuzhiyun 	for (i = 0; i < FZA_RING_RX_SIZE; i++)
1255*4882a593Smuzhiyun 		if (fp->rx_skbuff[i]) {
1256*4882a593Smuzhiyun 			dma_unmap_single(fp->bdev, fp->rx_dma[i],
1257*4882a593Smuzhiyun 					 FZA_RX_BUFFER_SIZE, DMA_FROM_DEVICE);
1258*4882a593Smuzhiyun 			dev_kfree_skb(fp->rx_skbuff[i]);
1259*4882a593Smuzhiyun 			fp->rx_dma[i] = 0;
1260*4882a593Smuzhiyun 			fp->rx_skbuff[i] = NULL;
1261*4882a593Smuzhiyun 		}
1262*4882a593Smuzhiyun 
1263*4882a593Smuzhiyun 	return 0;
1264*4882a593Smuzhiyun }
1265*4882a593Smuzhiyun 
fza_get_stats(struct net_device * dev)1266*4882a593Smuzhiyun static struct net_device_stats *fza_get_stats(struct net_device *dev)
1267*4882a593Smuzhiyun {
1268*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	return &fp->stats;
1271*4882a593Smuzhiyun }
1272*4882a593Smuzhiyun 
fza_probe(struct device * bdev)1273*4882a593Smuzhiyun static int fza_probe(struct device *bdev)
1274*4882a593Smuzhiyun {
1275*4882a593Smuzhiyun 	static const struct net_device_ops netdev_ops = {
1276*4882a593Smuzhiyun 		.ndo_open = fza_open,
1277*4882a593Smuzhiyun 		.ndo_stop = fza_close,
1278*4882a593Smuzhiyun 		.ndo_start_xmit = fza_start_xmit,
1279*4882a593Smuzhiyun 		.ndo_set_rx_mode = fza_set_rx_mode,
1280*4882a593Smuzhiyun 		.ndo_set_mac_address = fza_set_mac_address,
1281*4882a593Smuzhiyun 		.ndo_get_stats = fza_get_stats,
1282*4882a593Smuzhiyun 	};
1283*4882a593Smuzhiyun 	static int version_printed;
1284*4882a593Smuzhiyun 	char rom_rev[4], fw_rev[4], rmc_rev[4];
1285*4882a593Smuzhiyun 	struct tc_dev *tdev = to_tc_dev(bdev);
1286*4882a593Smuzhiyun 	struct fza_cmd_init __iomem *init;
1287*4882a593Smuzhiyun 	resource_size_t start, len;
1288*4882a593Smuzhiyun 	struct net_device *dev;
1289*4882a593Smuzhiyun 	struct fza_private *fp;
1290*4882a593Smuzhiyun 	uint smt_ver, pmd_type;
1291*4882a593Smuzhiyun 	void __iomem *mmio;
1292*4882a593Smuzhiyun 	uint hw_addr[2];
1293*4882a593Smuzhiyun 	int ret, i;
1294*4882a593Smuzhiyun 
1295*4882a593Smuzhiyun 	if (!version_printed) {
1296*4882a593Smuzhiyun 		pr_info("%s", version);
1297*4882a593Smuzhiyun 		version_printed = 1;
1298*4882a593Smuzhiyun 	}
1299*4882a593Smuzhiyun 
1300*4882a593Smuzhiyun 	dev = alloc_fddidev(sizeof(*fp));
1301*4882a593Smuzhiyun 	if (!dev)
1302*4882a593Smuzhiyun 		return -ENOMEM;
1303*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, bdev);
1304*4882a593Smuzhiyun 
1305*4882a593Smuzhiyun 	fp = netdev_priv(dev);
1306*4882a593Smuzhiyun 	dev_set_drvdata(bdev, dev);
1307*4882a593Smuzhiyun 
1308*4882a593Smuzhiyun 	fp->bdev = bdev;
1309*4882a593Smuzhiyun 	fp->name = dev_name(bdev);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	/* Request the I/O MEM resource. */
1312*4882a593Smuzhiyun 	start = tdev->resource.start;
1313*4882a593Smuzhiyun 	len = tdev->resource.end - start + 1;
1314*4882a593Smuzhiyun 	if (!request_mem_region(start, len, dev_name(bdev))) {
1315*4882a593Smuzhiyun 		pr_err("%s: cannot reserve MMIO region\n", fp->name);
1316*4882a593Smuzhiyun 		ret = -EBUSY;
1317*4882a593Smuzhiyun 		goto err_out_kfree;
1318*4882a593Smuzhiyun 	}
1319*4882a593Smuzhiyun 
1320*4882a593Smuzhiyun 	/* MMIO mapping setup. */
1321*4882a593Smuzhiyun 	mmio = ioremap(start, len);
1322*4882a593Smuzhiyun 	if (!mmio) {
1323*4882a593Smuzhiyun 		pr_err("%s: cannot map MMIO\n", fp->name);
1324*4882a593Smuzhiyun 		ret = -ENOMEM;
1325*4882a593Smuzhiyun 		goto err_out_resource;
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	/* Initialize the new device structure. */
1329*4882a593Smuzhiyun 	switch (loopback) {
1330*4882a593Smuzhiyun 	case FZA_LOOP_NORMAL:
1331*4882a593Smuzhiyun 	case FZA_LOOP_INTERN:
1332*4882a593Smuzhiyun 	case FZA_LOOP_EXTERN:
1333*4882a593Smuzhiyun 		break;
1334*4882a593Smuzhiyun 	default:
1335*4882a593Smuzhiyun 		loopback = FZA_LOOP_NORMAL;
1336*4882a593Smuzhiyun 	}
1337*4882a593Smuzhiyun 
1338*4882a593Smuzhiyun 	fp->mmio = mmio;
1339*4882a593Smuzhiyun 	dev->irq = tdev->interrupt;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	pr_info("%s: DEC FDDIcontroller 700 or 700-C at 0x%08llx, irq %d\n",
1342*4882a593Smuzhiyun 		fp->name, (long long)tdev->resource.start, dev->irq);
1343*4882a593Smuzhiyun 	pr_debug("%s: mapped at: 0x%p\n", fp->name, mmio);
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	fp->regs = mmio + FZA_REG_BASE;
1346*4882a593Smuzhiyun 	fp->ring_cmd = mmio + FZA_RING_CMD;
1347*4882a593Smuzhiyun 	fp->ring_uns = mmio + FZA_RING_UNS;
1348*4882a593Smuzhiyun 
1349*4882a593Smuzhiyun 	init_waitqueue_head(&fp->state_chg_wait);
1350*4882a593Smuzhiyun 	init_waitqueue_head(&fp->cmd_done_wait);
1351*4882a593Smuzhiyun 	spin_lock_init(&fp->lock);
1352*4882a593Smuzhiyun 	fp->int_mask = FZA_MASK_NORMAL;
1353*4882a593Smuzhiyun 
1354*4882a593Smuzhiyun 	timer_setup(&fp->reset_timer, fza_reset_timer, 0);
1355*4882a593Smuzhiyun 
1356*4882a593Smuzhiyun 	/* Sanitize the board. */
1357*4882a593Smuzhiyun 	fza_regs_dump(fp);
1358*4882a593Smuzhiyun 	fza_do_shutdown(fp);
1359*4882a593Smuzhiyun 
1360*4882a593Smuzhiyun 	ret = request_irq(dev->irq, fza_interrupt, IRQF_SHARED, fp->name, dev);
1361*4882a593Smuzhiyun 	if (ret != 0) {
1362*4882a593Smuzhiyun 		pr_err("%s: unable to get IRQ %d!\n", fp->name, dev->irq);
1363*4882a593Smuzhiyun 		goto err_out_map;
1364*4882a593Smuzhiyun 	}
1365*4882a593Smuzhiyun 
1366*4882a593Smuzhiyun 	/* Enable the driver mode. */
1367*4882a593Smuzhiyun 	writew_o(FZA_CONTROL_B_DRIVER, &fp->regs->control_b);
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	/* For some reason transmit done interrupts can trigger during
1370*4882a593Smuzhiyun 	 * reset.  This avoids a division error in the handler.
1371*4882a593Smuzhiyun 	 */
1372*4882a593Smuzhiyun 	fp->ring_rmc_tx_size = FZA_RING_TX_SIZE;
1373*4882a593Smuzhiyun 
1374*4882a593Smuzhiyun 	ret = fza_reset(fp);
1375*4882a593Smuzhiyun 	if (ret != 0)
1376*4882a593Smuzhiyun 		goto err_out_irq;
1377*4882a593Smuzhiyun 
1378*4882a593Smuzhiyun 	ret = fza_init_send(dev, &init);
1379*4882a593Smuzhiyun 	if (ret != 0)
1380*4882a593Smuzhiyun 		goto err_out_irq;
1381*4882a593Smuzhiyun 
1382*4882a593Smuzhiyun 	fza_reads(&init->hw_addr, &hw_addr, sizeof(hw_addr));
1383*4882a593Smuzhiyun 	memcpy(dev->dev_addr, &hw_addr, FDDI_K_ALEN);
1384*4882a593Smuzhiyun 
1385*4882a593Smuzhiyun 	fza_reads(&init->rom_rev, &rom_rev, sizeof(rom_rev));
1386*4882a593Smuzhiyun 	fza_reads(&init->fw_rev, &fw_rev, sizeof(fw_rev));
1387*4882a593Smuzhiyun 	fza_reads(&init->rmc_rev, &rmc_rev, sizeof(rmc_rev));
1388*4882a593Smuzhiyun 	for (i = 3; i >= 0 && rom_rev[i] == ' '; i--)
1389*4882a593Smuzhiyun 		rom_rev[i] = 0;
1390*4882a593Smuzhiyun 	for (i = 3; i >= 0 && fw_rev[i] == ' '; i--)
1391*4882a593Smuzhiyun 		fw_rev[i] = 0;
1392*4882a593Smuzhiyun 	for (i = 3; i >= 0 && rmc_rev[i] == ' '; i--)
1393*4882a593Smuzhiyun 		rmc_rev[i] = 0;
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	fp->ring_rmc_tx = mmio + readl_u(&init->rmc_tx);
1396*4882a593Smuzhiyun 	fp->ring_rmc_tx_size = readl_u(&init->rmc_tx_size);
1397*4882a593Smuzhiyun 	fp->ring_hst_rx = mmio + readl_u(&init->hst_rx);
1398*4882a593Smuzhiyun 	fp->ring_hst_rx_size = readl_u(&init->hst_rx_size);
1399*4882a593Smuzhiyun 	fp->ring_smt_tx = mmio + readl_u(&init->smt_tx);
1400*4882a593Smuzhiyun 	fp->ring_smt_tx_size = readl_u(&init->smt_tx_size);
1401*4882a593Smuzhiyun 	fp->ring_smt_rx = mmio + readl_u(&init->smt_rx);
1402*4882a593Smuzhiyun 	fp->ring_smt_rx_size = readl_u(&init->smt_rx_size);
1403*4882a593Smuzhiyun 
1404*4882a593Smuzhiyun 	fp->buffer_tx = mmio + FZA_TX_BUFFER_ADDR(readl_u(&init->rmc_tx));
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	fp->t_max = readl_u(&init->def_t_max);
1407*4882a593Smuzhiyun 	fp->t_req = readl_u(&init->def_t_req);
1408*4882a593Smuzhiyun 	fp->tvx = readl_u(&init->def_tvx);
1409*4882a593Smuzhiyun 	fp->lem_threshold = readl_u(&init->lem_threshold);
1410*4882a593Smuzhiyun 	fza_reads(&init->def_station_id, &fp->station_id,
1411*4882a593Smuzhiyun 		  sizeof(fp->station_id));
1412*4882a593Smuzhiyun 	fp->rtoken_timeout = readl_u(&init->rtoken_timeout);
1413*4882a593Smuzhiyun 	fp->ring_purger = readl_u(&init->ring_purger);
1414*4882a593Smuzhiyun 
1415*4882a593Smuzhiyun 	smt_ver = readl_u(&init->smt_ver);
1416*4882a593Smuzhiyun 	pmd_type = readl_u(&init->pmd_type);
1417*4882a593Smuzhiyun 
1418*4882a593Smuzhiyun 	pr_debug("%s: INIT parameters:\n", fp->name);
1419*4882a593Smuzhiyun 	pr_debug("        tx_mode: %u\n", readl_u(&init->tx_mode));
1420*4882a593Smuzhiyun 	pr_debug("    hst_rx_size: %u\n", readl_u(&init->hst_rx_size));
1421*4882a593Smuzhiyun 	pr_debug("        rmc_rev: %.4s\n", rmc_rev);
1422*4882a593Smuzhiyun 	pr_debug("        rom_rev: %.4s\n", rom_rev);
1423*4882a593Smuzhiyun 	pr_debug("         fw_rev: %.4s\n", fw_rev);
1424*4882a593Smuzhiyun 	pr_debug("       mop_type: %u\n", readl_u(&init->mop_type));
1425*4882a593Smuzhiyun 	pr_debug("         hst_rx: 0x%08x\n", readl_u(&init->hst_rx));
1426*4882a593Smuzhiyun 	pr_debug("         rmc_tx: 0x%08x\n", readl_u(&init->rmc_tx));
1427*4882a593Smuzhiyun 	pr_debug("    rmc_tx_size: %u\n", readl_u(&init->rmc_tx_size));
1428*4882a593Smuzhiyun 	pr_debug("         smt_tx: 0x%08x\n", readl_u(&init->smt_tx));
1429*4882a593Smuzhiyun 	pr_debug("    smt_tx_size: %u\n", readl_u(&init->smt_tx_size));
1430*4882a593Smuzhiyun 	pr_debug("         smt_rx: 0x%08x\n", readl_u(&init->smt_rx));
1431*4882a593Smuzhiyun 	pr_debug("    smt_rx_size: %u\n", readl_u(&init->smt_rx_size));
1432*4882a593Smuzhiyun 	/* TC systems are always LE, so don't bother swapping. */
1433*4882a593Smuzhiyun 	pr_debug("        hw_addr: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1434*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[0]) >> 0) & 0xff,
1435*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[0]) >> 8) & 0xff,
1436*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[0]) >> 16) & 0xff,
1437*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[0]) >> 24) & 0xff,
1438*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[1]) >> 0) & 0xff,
1439*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[1]) >> 8) & 0xff,
1440*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[1]) >> 16) & 0xff,
1441*4882a593Smuzhiyun 		 (readl_u(&init->hw_addr[1]) >> 24) & 0xff);
1442*4882a593Smuzhiyun 	pr_debug("      def_t_req: %u\n", readl_u(&init->def_t_req));
1443*4882a593Smuzhiyun 	pr_debug("        def_tvx: %u\n", readl_u(&init->def_tvx));
1444*4882a593Smuzhiyun 	pr_debug("      def_t_max: %u\n", readl_u(&init->def_t_max));
1445*4882a593Smuzhiyun 	pr_debug("  lem_threshold: %u\n", readl_u(&init->lem_threshold));
1446*4882a593Smuzhiyun 	/* Don't bother swapping, see above. */
1447*4882a593Smuzhiyun 	pr_debug(" def_station_id: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
1448*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[0]) >> 0) & 0xff,
1449*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[0]) >> 8) & 0xff,
1450*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[0]) >> 16) & 0xff,
1451*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[0]) >> 24) & 0xff,
1452*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[1]) >> 0) & 0xff,
1453*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[1]) >> 8) & 0xff,
1454*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[1]) >> 16) & 0xff,
1455*4882a593Smuzhiyun 		 (readl_u(&init->def_station_id[1]) >> 24) & 0xff);
1456*4882a593Smuzhiyun 	pr_debug("   pmd_type_alt: %u\n", readl_u(&init->pmd_type_alt));
1457*4882a593Smuzhiyun 	pr_debug("        smt_ver: %u\n", readl_u(&init->smt_ver));
1458*4882a593Smuzhiyun 	pr_debug(" rtoken_timeout: %u\n", readl_u(&init->rtoken_timeout));
1459*4882a593Smuzhiyun 	pr_debug("    ring_purger: %u\n", readl_u(&init->ring_purger));
1460*4882a593Smuzhiyun 	pr_debug("    smt_ver_max: %u\n", readl_u(&init->smt_ver_max));
1461*4882a593Smuzhiyun 	pr_debug("    smt_ver_min: %u\n", readl_u(&init->smt_ver_min));
1462*4882a593Smuzhiyun 	pr_debug("       pmd_type: %u\n", readl_u(&init->pmd_type));
1463*4882a593Smuzhiyun 
1464*4882a593Smuzhiyun 	pr_info("%s: model %s, address %pMF\n",
1465*4882a593Smuzhiyun 		fp->name,
1466*4882a593Smuzhiyun 		pmd_type == FZA_PMD_TYPE_TW ?
1467*4882a593Smuzhiyun 			"700-C (DEFZA-CA), ThinWire PMD selected" :
1468*4882a593Smuzhiyun 			pmd_type == FZA_PMD_TYPE_STP ?
1469*4882a593Smuzhiyun 				"700-C (DEFZA-CA), STP PMD selected" :
1470*4882a593Smuzhiyun 				"700 (DEFZA-AA), MMF PMD",
1471*4882a593Smuzhiyun 		dev->dev_addr);
1472*4882a593Smuzhiyun 	pr_info("%s: ROM rev. %.4s, firmware rev. %.4s, RMC rev. %.4s, "
1473*4882a593Smuzhiyun 		"SMT ver. %u\n", fp->name, rom_rev, fw_rev, rmc_rev, smt_ver);
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	/* Now that we fetched initial parameters just shut the interface
1476*4882a593Smuzhiyun 	 * until opened.
1477*4882a593Smuzhiyun 	 */
1478*4882a593Smuzhiyun 	ret = fza_close(dev);
1479*4882a593Smuzhiyun 	if (ret != 0)
1480*4882a593Smuzhiyun 		goto err_out_irq;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	/* The FZA-specific entries in the device structure. */
1483*4882a593Smuzhiyun 	dev->netdev_ops = &netdev_ops;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	ret = register_netdev(dev);
1486*4882a593Smuzhiyun 	if (ret != 0)
1487*4882a593Smuzhiyun 		goto err_out_irq;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	pr_info("%s: registered as %s\n", fp->name, dev->name);
1490*4882a593Smuzhiyun 	fp->name = (const char *)dev->name;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	get_device(bdev);
1493*4882a593Smuzhiyun 	return 0;
1494*4882a593Smuzhiyun 
1495*4882a593Smuzhiyun err_out_irq:
1496*4882a593Smuzhiyun 	del_timer_sync(&fp->reset_timer);
1497*4882a593Smuzhiyun 	fza_do_shutdown(fp);
1498*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1499*4882a593Smuzhiyun 
1500*4882a593Smuzhiyun err_out_map:
1501*4882a593Smuzhiyun 	iounmap(mmio);
1502*4882a593Smuzhiyun 
1503*4882a593Smuzhiyun err_out_resource:
1504*4882a593Smuzhiyun 	release_mem_region(start, len);
1505*4882a593Smuzhiyun 
1506*4882a593Smuzhiyun err_out_kfree:
1507*4882a593Smuzhiyun 	pr_err("%s: initialization failure, aborting!\n", fp->name);
1508*4882a593Smuzhiyun 	free_netdev(dev);
1509*4882a593Smuzhiyun 	return ret;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun 
fza_remove(struct device * bdev)1512*4882a593Smuzhiyun static int fza_remove(struct device *bdev)
1513*4882a593Smuzhiyun {
1514*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(bdev);
1515*4882a593Smuzhiyun 	struct fza_private *fp = netdev_priv(dev);
1516*4882a593Smuzhiyun 	struct tc_dev *tdev = to_tc_dev(bdev);
1517*4882a593Smuzhiyun 	resource_size_t start, len;
1518*4882a593Smuzhiyun 
1519*4882a593Smuzhiyun 	put_device(bdev);
1520*4882a593Smuzhiyun 
1521*4882a593Smuzhiyun 	unregister_netdev(dev);
1522*4882a593Smuzhiyun 
1523*4882a593Smuzhiyun 	del_timer_sync(&fp->reset_timer);
1524*4882a593Smuzhiyun 	fza_do_shutdown(fp);
1525*4882a593Smuzhiyun 	free_irq(dev->irq, dev);
1526*4882a593Smuzhiyun 
1527*4882a593Smuzhiyun 	iounmap(fp->mmio);
1528*4882a593Smuzhiyun 
1529*4882a593Smuzhiyun 	start = tdev->resource.start;
1530*4882a593Smuzhiyun 	len = tdev->resource.end - start + 1;
1531*4882a593Smuzhiyun 	release_mem_region(start, len);
1532*4882a593Smuzhiyun 
1533*4882a593Smuzhiyun 	free_netdev(dev);
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	return 0;
1536*4882a593Smuzhiyun }
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun static struct tc_device_id const fza_tc_table[] = {
1539*4882a593Smuzhiyun 	{ "DEC     ", "PMAF-AA " },
1540*4882a593Smuzhiyun 	{ }
1541*4882a593Smuzhiyun };
1542*4882a593Smuzhiyun MODULE_DEVICE_TABLE(tc, fza_tc_table);
1543*4882a593Smuzhiyun 
1544*4882a593Smuzhiyun static struct tc_driver fza_driver = {
1545*4882a593Smuzhiyun 	.id_table	= fza_tc_table,
1546*4882a593Smuzhiyun 	.driver		= {
1547*4882a593Smuzhiyun 		.name	= "defza",
1548*4882a593Smuzhiyun 		.bus	= &tc_bus_type,
1549*4882a593Smuzhiyun 		.probe	= fza_probe,
1550*4882a593Smuzhiyun 		.remove	= fza_remove,
1551*4882a593Smuzhiyun 	},
1552*4882a593Smuzhiyun };
1553*4882a593Smuzhiyun 
fza_init(void)1554*4882a593Smuzhiyun static int fza_init(void)
1555*4882a593Smuzhiyun {
1556*4882a593Smuzhiyun 	return tc_register_driver(&fza_driver);
1557*4882a593Smuzhiyun }
1558*4882a593Smuzhiyun 
fza_exit(void)1559*4882a593Smuzhiyun static void fza_exit(void)
1560*4882a593Smuzhiyun {
1561*4882a593Smuzhiyun 	tc_unregister_driver(&fza_driver);
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun module_init(fza_init);
1565*4882a593Smuzhiyun module_exit(fza_exit);
1566