xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/natsemi/natsemi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun 	Written/copyright 1999-2001 by Donald Becker.
4*4882a593Smuzhiyun 	Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
5*4882a593Smuzhiyun 	Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
6*4882a593Smuzhiyun 	Portions copyright 2004 Harald Welte <laforge@gnumonks.org>
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun 	This software may be used and distributed according to the terms of
9*4882a593Smuzhiyun 	the GNU General Public License (GPL), incorporated herein by reference.
10*4882a593Smuzhiyun 	Drivers based on or derived from this code fall under the GPL and must
11*4882a593Smuzhiyun 	retain the authorship, copyright and license notice.  This file is not
12*4882a593Smuzhiyun 	a complete program and may only be used when the entire operating
13*4882a593Smuzhiyun 	system is licensed under the GPL.  License for under other terms may be
14*4882a593Smuzhiyun 	available.  Contact the original author for details.
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun 	The original author may be reached as becker@scyld.com, or at
17*4882a593Smuzhiyun 	Scyld Computing Corporation
18*4882a593Smuzhiyun 	410 Severn Ave., Suite 210
19*4882a593Smuzhiyun 	Annapolis MD 21403
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	Support information and updates available at
22*4882a593Smuzhiyun 	http://www.scyld.com/network/netsemi.html
23*4882a593Smuzhiyun 	[link no longer provides useful info -jgarzik]
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	TODO:
27*4882a593Smuzhiyun 	* big endian support with CFG:BEM instead of cpu_to_le32
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <linux/module.h>
31*4882a593Smuzhiyun #include <linux/kernel.h>
32*4882a593Smuzhiyun #include <linux/string.h>
33*4882a593Smuzhiyun #include <linux/timer.h>
34*4882a593Smuzhiyun #include <linux/errno.h>
35*4882a593Smuzhiyun #include <linux/ioport.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <linux/interrupt.h>
38*4882a593Smuzhiyun #include <linux/pci.h>
39*4882a593Smuzhiyun #include <linux/netdevice.h>
40*4882a593Smuzhiyun #include <linux/etherdevice.h>
41*4882a593Smuzhiyun #include <linux/skbuff.h>
42*4882a593Smuzhiyun #include <linux/init.h>
43*4882a593Smuzhiyun #include <linux/spinlock.h>
44*4882a593Smuzhiyun #include <linux/ethtool.h>
45*4882a593Smuzhiyun #include <linux/delay.h>
46*4882a593Smuzhiyun #include <linux/rtnetlink.h>
47*4882a593Smuzhiyun #include <linux/mii.h>
48*4882a593Smuzhiyun #include <linux/crc32.h>
49*4882a593Smuzhiyun #include <linux/bitops.h>
50*4882a593Smuzhiyun #include <linux/prefetch.h>
51*4882a593Smuzhiyun #include <asm/processor.h>	/* Processor type for cache alignment. */
52*4882a593Smuzhiyun #include <asm/io.h>
53*4882a593Smuzhiyun #include <asm/irq.h>
54*4882a593Smuzhiyun #include <linux/uaccess.h>
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define DRV_NAME	"natsemi"
57*4882a593Smuzhiyun #define DRV_VERSION	"2.1"
58*4882a593Smuzhiyun #define DRV_RELDATE	"Sept 11, 2006"
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define RX_OFFSET	2
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* Updated to recommendations in pci-skeleton v2.03. */
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun /* The user-configurable values.
65*4882a593Smuzhiyun    These may be modified when a driver module is loaded.*/
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun #define NATSEMI_DEF_MSG		(NETIF_MSG_DRV		| \
68*4882a593Smuzhiyun 				 NETIF_MSG_LINK		| \
69*4882a593Smuzhiyun 				 NETIF_MSG_WOL		| \
70*4882a593Smuzhiyun 				 NETIF_MSG_RX_ERR	| \
71*4882a593Smuzhiyun 				 NETIF_MSG_TX_ERR)
72*4882a593Smuzhiyun static int debug = -1;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun static int mtu;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
77*4882a593Smuzhiyun    This chip uses a 512 element hash table based on the Ethernet CRC.  */
78*4882a593Smuzhiyun static const int multicast_filter_limit = 100;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
81*4882a593Smuzhiyun    Setting to > 1518 effectively disables this feature. */
82*4882a593Smuzhiyun static int rx_copybreak;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun static int dspcfg_workaround = 1;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /* Used to pass the media type, etc.
87*4882a593Smuzhiyun    Both 'options[]' and 'full_duplex[]' should exist for driver
88*4882a593Smuzhiyun    interoperability.
89*4882a593Smuzhiyun    The media type is usually passed in 'options[]'.
90*4882a593Smuzhiyun */
91*4882a593Smuzhiyun #define MAX_UNITS 8		/* More are supported, limit only on options */
92*4882a593Smuzhiyun static int options[MAX_UNITS];
93*4882a593Smuzhiyun static int full_duplex[MAX_UNITS];
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun /* Operational parameters that are set at compile time. */
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun /* Keep the ring sizes a power of two for compile efficiency.
98*4882a593Smuzhiyun    The compiler will convert <unsigned>'%'<2^N> into a bit mask.
99*4882a593Smuzhiyun    Making the Tx ring too large decreases the effectiveness of channel
100*4882a593Smuzhiyun    bonding and packet priority.
101*4882a593Smuzhiyun    There are no ill effects from too-large receive rings. */
102*4882a593Smuzhiyun #define TX_RING_SIZE	16
103*4882a593Smuzhiyun #define TX_QUEUE_LEN	10 /* Limit ring entries actually used, min 4. */
104*4882a593Smuzhiyun #define RX_RING_SIZE	32
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun /* Operational parameters that usually are not changed. */
107*4882a593Smuzhiyun /* Time in jiffies before concluding the transmitter is hung. */
108*4882a593Smuzhiyun #define TX_TIMEOUT  (2*HZ)
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun #define NATSEMI_HW_TIMEOUT	400
111*4882a593Smuzhiyun #define NATSEMI_TIMER_FREQ	5*HZ
112*4882a593Smuzhiyun #define NATSEMI_PG0_NREGS	64
113*4882a593Smuzhiyun #define NATSEMI_RFDR_NREGS	8
114*4882a593Smuzhiyun #define NATSEMI_PG1_NREGS	4
115*4882a593Smuzhiyun #define NATSEMI_NREGS		(NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
116*4882a593Smuzhiyun 				 NATSEMI_PG1_NREGS)
117*4882a593Smuzhiyun #define NATSEMI_REGS_VER	1 /* v1 added RFDR registers */
118*4882a593Smuzhiyun #define NATSEMI_REGS_SIZE	(NATSEMI_NREGS * sizeof(u32))
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /* Buffer sizes:
121*4882a593Smuzhiyun  * The nic writes 32-bit values, even if the upper bytes of
122*4882a593Smuzhiyun  * a 32-bit value are beyond the end of the buffer.
123*4882a593Smuzhiyun  */
124*4882a593Smuzhiyun #define NATSEMI_HEADERS		22	/* 2*mac,type,vlan,crc */
125*4882a593Smuzhiyun #define NATSEMI_PADDING		16	/* 2 bytes should be sufficient */
126*4882a593Smuzhiyun #define NATSEMI_LONGPKT		1518	/* limit for normal packets */
127*4882a593Smuzhiyun #define NATSEMI_RX_LIMIT	2046	/* maximum supported by hardware */
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun /* These identify the driver base version and may not be removed. */
130*4882a593Smuzhiyun static const char version[] =
131*4882a593Smuzhiyun   KERN_INFO DRV_NAME " dp8381x driver, version "
132*4882a593Smuzhiyun       DRV_VERSION ", " DRV_RELDATE "\n"
133*4882a593Smuzhiyun   "  originally by Donald Becker <becker@scyld.com>\n"
134*4882a593Smuzhiyun   "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n";
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
137*4882a593Smuzhiyun MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
138*4882a593Smuzhiyun MODULE_LICENSE("GPL");
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun module_param(mtu, int, 0);
141*4882a593Smuzhiyun module_param(debug, int, 0);
142*4882a593Smuzhiyun module_param(rx_copybreak, int, 0);
143*4882a593Smuzhiyun module_param(dspcfg_workaround, int, 0);
144*4882a593Smuzhiyun module_param_array(options, int, NULL, 0);
145*4882a593Smuzhiyun module_param_array(full_duplex, int, NULL, 0);
146*4882a593Smuzhiyun MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
147*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "DP8381x default debug level");
148*4882a593Smuzhiyun MODULE_PARM_DESC(rx_copybreak,
149*4882a593Smuzhiyun 	"DP8381x copy breakpoint for copy-only-tiny-frames");
150*4882a593Smuzhiyun MODULE_PARM_DESC(dspcfg_workaround, "DP8381x: control DspCfg workaround");
151*4882a593Smuzhiyun MODULE_PARM_DESC(options,
152*4882a593Smuzhiyun 	"DP8381x: Bits 0-3: media type, bit 17: full duplex");
153*4882a593Smuzhiyun MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun /*
156*4882a593Smuzhiyun 				Theory of Operation
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun I. Board Compatibility
159*4882a593Smuzhiyun 
160*4882a593Smuzhiyun This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
161*4882a593Smuzhiyun It also works with other chips in in the DP83810 series.
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun II. Board-specific settings
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun This driver requires the PCI interrupt line to be valid.
166*4882a593Smuzhiyun It honors the EEPROM-set values.
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun III. Driver operation
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun IIIa. Ring buffers
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun This driver uses two statically allocated fixed-size descriptor lists
173*4882a593Smuzhiyun formed into rings by a branch from the final descriptor to the beginning of
174*4882a593Smuzhiyun the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
175*4882a593Smuzhiyun The NatSemi design uses a 'next descriptor' pointer that the driver forms
176*4882a593Smuzhiyun into a list.
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun IIIb/c. Transmit/Receive Structure
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun This driver uses a zero-copy receive and transmit scheme.
181*4882a593Smuzhiyun The driver allocates full frame size skbuffs for the Rx ring buffers at
182*4882a593Smuzhiyun open() time and passes the skb->data field to the chip as receive data
183*4882a593Smuzhiyun buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
184*4882a593Smuzhiyun a fresh skbuff is allocated and the frame is copied to the new skbuff.
185*4882a593Smuzhiyun When the incoming frame is larger, the skbuff is passed directly up the
186*4882a593Smuzhiyun protocol stack.  Buffers consumed this way are replaced by newly allocated
187*4882a593Smuzhiyun skbuffs in a later phase of receives.
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun The RX_COPYBREAK value is chosen to trade-off the memory wasted by
190*4882a593Smuzhiyun using a full-sized skbuff for small frames vs. the copying costs of larger
191*4882a593Smuzhiyun frames.  New boards are typically used in generously configured machines
192*4882a593Smuzhiyun and the underfilled buffers have negligible impact compared to the benefit of
193*4882a593Smuzhiyun a single allocation size, so the default value of zero results in never
194*4882a593Smuzhiyun copying packets.  When copying is done, the cost is usually mitigated by using
195*4882a593Smuzhiyun a combined copy/checksum routine.  Copying also preloads the cache, which is
196*4882a593Smuzhiyun most useful with small frames.
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun A subtle aspect of the operation is that unaligned buffers are not permitted
199*4882a593Smuzhiyun by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
200*4882a593Smuzhiyun longword aligned for further processing.  On copies frames are put into the
201*4882a593Smuzhiyun skbuff at an offset of "+2", 16-byte aligning the IP header.
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun IIId. Synchronization
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun Most operations are synchronized on the np->lock irq spinlock, except the
206*4882a593Smuzhiyun receive and transmit paths which are synchronised using a combination of
207*4882a593Smuzhiyun hardware descriptor ownership, disabling interrupts and NAPI poll scheduling.
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun IVb. References
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun http://www.scyld.com/expert/100mbps.html
212*4882a593Smuzhiyun http://www.scyld.com/expert/NWay.html
213*4882a593Smuzhiyun Datasheet is available from:
214*4882a593Smuzhiyun http://www.national.com/pf/DP/DP83815.html
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun IVc. Errata
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun None characterised.
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun /*
224*4882a593Smuzhiyun  * Support for fibre connections on Am79C874:
225*4882a593Smuzhiyun  * This phy needs a special setup when connected to a fibre cable.
226*4882a593Smuzhiyun  * http://www.amd.com/files/connectivitysolutions/networking/archivednetworking/22235.pdf
227*4882a593Smuzhiyun  */
228*4882a593Smuzhiyun #define PHYID_AM79C874	0x0022561b
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun enum {
231*4882a593Smuzhiyun 	MII_MCTRL	= 0x15,		/* mode control register */
232*4882a593Smuzhiyun 	MII_FX_SEL	= 0x0001,	/* 100BASE-FX (fiber) */
233*4882a593Smuzhiyun 	MII_EN_SCRM	= 0x0004,	/* enable scrambler (tp) */
234*4882a593Smuzhiyun };
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun enum {
237*4882a593Smuzhiyun 	NATSEMI_FLAG_IGNORE_PHY		= 0x1,
238*4882a593Smuzhiyun };
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /* array of board data directly indexed by pci_tbl[x].driver_data */
241*4882a593Smuzhiyun static struct {
242*4882a593Smuzhiyun 	const char *name;
243*4882a593Smuzhiyun 	unsigned long flags;
244*4882a593Smuzhiyun 	unsigned int eeprom_size;
245*4882a593Smuzhiyun } natsemi_pci_info[] = {
246*4882a593Smuzhiyun 	{ "Aculab E1/T1 PMXc cPCI carrier card", NATSEMI_FLAG_IGNORE_PHY, 128 },
247*4882a593Smuzhiyun 	{ "NatSemi DP8381[56]", 0, 24 },
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun static const struct pci_device_id natsemi_pci_tbl[] = {
251*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_NS, 0x0020, 0x12d9,     0x000c,     0, 0, 0 },
252*4882a593Smuzhiyun 	{ PCI_VENDOR_ID_NS, 0x0020, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
253*4882a593Smuzhiyun 	{ }	/* terminate list */
254*4882a593Smuzhiyun };
255*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun /* Offsets to the device registers.
258*4882a593Smuzhiyun    Unlike software-only systems, device drivers interact with complex hardware.
259*4882a593Smuzhiyun    It's not useful to define symbolic names for every register bit in the
260*4882a593Smuzhiyun    device.
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun enum register_offsets {
263*4882a593Smuzhiyun 	ChipCmd			= 0x00,
264*4882a593Smuzhiyun 	ChipConfig		= 0x04,
265*4882a593Smuzhiyun 	EECtrl			= 0x08,
266*4882a593Smuzhiyun 	PCIBusCfg		= 0x0C,
267*4882a593Smuzhiyun 	IntrStatus		= 0x10,
268*4882a593Smuzhiyun 	IntrMask		= 0x14,
269*4882a593Smuzhiyun 	IntrEnable		= 0x18,
270*4882a593Smuzhiyun 	IntrHoldoff		= 0x1C, /* DP83816 only */
271*4882a593Smuzhiyun 	TxRingPtr		= 0x20,
272*4882a593Smuzhiyun 	TxConfig		= 0x24,
273*4882a593Smuzhiyun 	RxRingPtr		= 0x30,
274*4882a593Smuzhiyun 	RxConfig		= 0x34,
275*4882a593Smuzhiyun 	ClkRun			= 0x3C,
276*4882a593Smuzhiyun 	WOLCmd			= 0x40,
277*4882a593Smuzhiyun 	PauseCmd		= 0x44,
278*4882a593Smuzhiyun 	RxFilterAddr		= 0x48,
279*4882a593Smuzhiyun 	RxFilterData		= 0x4C,
280*4882a593Smuzhiyun 	BootRomAddr		= 0x50,
281*4882a593Smuzhiyun 	BootRomData		= 0x54,
282*4882a593Smuzhiyun 	SiliconRev		= 0x58,
283*4882a593Smuzhiyun 	StatsCtrl		= 0x5C,
284*4882a593Smuzhiyun 	StatsData		= 0x60,
285*4882a593Smuzhiyun 	RxPktErrs		= 0x60,
286*4882a593Smuzhiyun 	RxMissed		= 0x68,
287*4882a593Smuzhiyun 	RxCRCErrs		= 0x64,
288*4882a593Smuzhiyun 	BasicControl		= 0x80,
289*4882a593Smuzhiyun 	BasicStatus		= 0x84,
290*4882a593Smuzhiyun 	AnegAdv			= 0x90,
291*4882a593Smuzhiyun 	AnegPeer		= 0x94,
292*4882a593Smuzhiyun 	PhyStatus		= 0xC0,
293*4882a593Smuzhiyun 	MIntrCtrl		= 0xC4,
294*4882a593Smuzhiyun 	MIntrStatus		= 0xC8,
295*4882a593Smuzhiyun 	PhyCtrl			= 0xE4,
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* These are from the spec, around page 78... on a separate table.
298*4882a593Smuzhiyun 	 * The meaning of these registers depend on the value of PGSEL. */
299*4882a593Smuzhiyun 	PGSEL			= 0xCC,
300*4882a593Smuzhiyun 	PMDCSR			= 0xE4,
301*4882a593Smuzhiyun 	TSTDAT			= 0xFC,
302*4882a593Smuzhiyun 	DSPCFG			= 0xF4,
303*4882a593Smuzhiyun 	SDCFG			= 0xF8
304*4882a593Smuzhiyun };
305*4882a593Smuzhiyun /* the values for the 'magic' registers above (PGSEL=1) */
306*4882a593Smuzhiyun #define PMDCSR_VAL	0x189c	/* enable preferred adaptation circuitry */
307*4882a593Smuzhiyun #define TSTDAT_VAL	0x0
308*4882a593Smuzhiyun #define DSPCFG_VAL	0x5040
309*4882a593Smuzhiyun #define SDCFG_VAL	0x008c	/* set voltage thresholds for Signal Detect */
310*4882a593Smuzhiyun #define DSPCFG_LOCK	0x20	/* coefficient lock bit in DSPCFG */
311*4882a593Smuzhiyun #define DSPCFG_COEF	0x1000	/* see coefficient (in TSTDAT) bit in DSPCFG */
312*4882a593Smuzhiyun #define TSTDAT_FIXED	0xe8	/* magic number for bad coefficients */
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun /* misc PCI space registers */
315*4882a593Smuzhiyun enum pci_register_offsets {
316*4882a593Smuzhiyun 	PCIPM			= 0x44,
317*4882a593Smuzhiyun };
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun enum ChipCmd_bits {
320*4882a593Smuzhiyun 	ChipReset		= 0x100,
321*4882a593Smuzhiyun 	RxReset			= 0x20,
322*4882a593Smuzhiyun 	TxReset			= 0x10,
323*4882a593Smuzhiyun 	RxOff			= 0x08,
324*4882a593Smuzhiyun 	RxOn			= 0x04,
325*4882a593Smuzhiyun 	TxOff			= 0x02,
326*4882a593Smuzhiyun 	TxOn			= 0x01,
327*4882a593Smuzhiyun };
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun enum ChipConfig_bits {
330*4882a593Smuzhiyun 	CfgPhyDis		= 0x200,
331*4882a593Smuzhiyun 	CfgPhyRst		= 0x400,
332*4882a593Smuzhiyun 	CfgExtPhy		= 0x1000,
333*4882a593Smuzhiyun 	CfgAnegEnable		= 0x2000,
334*4882a593Smuzhiyun 	CfgAneg100		= 0x4000,
335*4882a593Smuzhiyun 	CfgAnegFull		= 0x8000,
336*4882a593Smuzhiyun 	CfgAnegDone		= 0x8000000,
337*4882a593Smuzhiyun 	CfgFullDuplex		= 0x20000000,
338*4882a593Smuzhiyun 	CfgSpeed100		= 0x40000000,
339*4882a593Smuzhiyun 	CfgLink			= 0x80000000,
340*4882a593Smuzhiyun };
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun enum EECtrl_bits {
343*4882a593Smuzhiyun 	EE_ShiftClk		= 0x04,
344*4882a593Smuzhiyun 	EE_DataIn		= 0x01,
345*4882a593Smuzhiyun 	EE_ChipSelect		= 0x08,
346*4882a593Smuzhiyun 	EE_DataOut		= 0x02,
347*4882a593Smuzhiyun 	MII_Data 		= 0x10,
348*4882a593Smuzhiyun 	MII_Write		= 0x20,
349*4882a593Smuzhiyun 	MII_ShiftClk		= 0x40,
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun enum PCIBusCfg_bits {
353*4882a593Smuzhiyun 	EepromReload		= 0x4,
354*4882a593Smuzhiyun };
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun /* Bits in the interrupt status/mask registers. */
357*4882a593Smuzhiyun enum IntrStatus_bits {
358*4882a593Smuzhiyun 	IntrRxDone		= 0x0001,
359*4882a593Smuzhiyun 	IntrRxIntr		= 0x0002,
360*4882a593Smuzhiyun 	IntrRxErr		= 0x0004,
361*4882a593Smuzhiyun 	IntrRxEarly		= 0x0008,
362*4882a593Smuzhiyun 	IntrRxIdle		= 0x0010,
363*4882a593Smuzhiyun 	IntrRxOverrun		= 0x0020,
364*4882a593Smuzhiyun 	IntrTxDone		= 0x0040,
365*4882a593Smuzhiyun 	IntrTxIntr		= 0x0080,
366*4882a593Smuzhiyun 	IntrTxErr		= 0x0100,
367*4882a593Smuzhiyun 	IntrTxIdle		= 0x0200,
368*4882a593Smuzhiyun 	IntrTxUnderrun		= 0x0400,
369*4882a593Smuzhiyun 	StatsMax		= 0x0800,
370*4882a593Smuzhiyun 	SWInt			= 0x1000,
371*4882a593Smuzhiyun 	WOLPkt			= 0x2000,
372*4882a593Smuzhiyun 	LinkChange		= 0x4000,
373*4882a593Smuzhiyun 	IntrHighBits		= 0x8000,
374*4882a593Smuzhiyun 	RxStatusFIFOOver	= 0x10000,
375*4882a593Smuzhiyun 	IntrPCIErr		= 0xf00000,
376*4882a593Smuzhiyun 	RxResetDone		= 0x1000000,
377*4882a593Smuzhiyun 	TxResetDone		= 0x2000000,
378*4882a593Smuzhiyun 	IntrAbnormalSummary	= 0xCD20,
379*4882a593Smuzhiyun };
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun /*
382*4882a593Smuzhiyun  * Default Interrupts:
383*4882a593Smuzhiyun  * Rx OK, Rx Packet Error, Rx Overrun,
384*4882a593Smuzhiyun  * Tx OK, Tx Packet Error, Tx Underrun,
385*4882a593Smuzhiyun  * MIB Service, Phy Interrupt, High Bits,
386*4882a593Smuzhiyun  * Rx Status FIFO overrun,
387*4882a593Smuzhiyun  * Received Target Abort, Received Master Abort,
388*4882a593Smuzhiyun  * Signalled System Error, Received Parity Error
389*4882a593Smuzhiyun  */
390*4882a593Smuzhiyun #define DEFAULT_INTR 0x00f1cd65
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun enum TxConfig_bits {
393*4882a593Smuzhiyun 	TxDrthMask		= 0x3f,
394*4882a593Smuzhiyun 	TxFlthMask		= 0x3f00,
395*4882a593Smuzhiyun 	TxMxdmaMask		= 0x700000,
396*4882a593Smuzhiyun 	TxMxdma_512		= 0x0,
397*4882a593Smuzhiyun 	TxMxdma_4		= 0x100000,
398*4882a593Smuzhiyun 	TxMxdma_8		= 0x200000,
399*4882a593Smuzhiyun 	TxMxdma_16		= 0x300000,
400*4882a593Smuzhiyun 	TxMxdma_32		= 0x400000,
401*4882a593Smuzhiyun 	TxMxdma_64		= 0x500000,
402*4882a593Smuzhiyun 	TxMxdma_128		= 0x600000,
403*4882a593Smuzhiyun 	TxMxdma_256		= 0x700000,
404*4882a593Smuzhiyun 	TxCollRetry		= 0x800000,
405*4882a593Smuzhiyun 	TxAutoPad		= 0x10000000,
406*4882a593Smuzhiyun 	TxMacLoop		= 0x20000000,
407*4882a593Smuzhiyun 	TxHeartIgn		= 0x40000000,
408*4882a593Smuzhiyun 	TxCarrierIgn		= 0x80000000
409*4882a593Smuzhiyun };
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun /*
412*4882a593Smuzhiyun  * Tx Configuration:
413*4882a593Smuzhiyun  * - 256 byte DMA burst length
414*4882a593Smuzhiyun  * - fill threshold 512 bytes (i.e. restart DMA when 512 bytes are free)
415*4882a593Smuzhiyun  * - 64 bytes initial drain threshold (i.e. begin actual transmission
416*4882a593Smuzhiyun  *   when 64 byte are in the fifo)
417*4882a593Smuzhiyun  * - on tx underruns, increase drain threshold by 64.
418*4882a593Smuzhiyun  * - at most use a drain threshold of 1472 bytes: The sum of the fill
419*4882a593Smuzhiyun  *   threshold and the drain threshold must be less than 2016 bytes.
420*4882a593Smuzhiyun  *
421*4882a593Smuzhiyun  */
422*4882a593Smuzhiyun #define TX_FLTH_VAL		((512/32) << 8)
423*4882a593Smuzhiyun #define TX_DRTH_VAL_START	(64/32)
424*4882a593Smuzhiyun #define TX_DRTH_VAL_INC		2
425*4882a593Smuzhiyun #define TX_DRTH_VAL_LIMIT	(1472/32)
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun enum RxConfig_bits {
428*4882a593Smuzhiyun 	RxDrthMask		= 0x3e,
429*4882a593Smuzhiyun 	RxMxdmaMask		= 0x700000,
430*4882a593Smuzhiyun 	RxMxdma_512		= 0x0,
431*4882a593Smuzhiyun 	RxMxdma_4		= 0x100000,
432*4882a593Smuzhiyun 	RxMxdma_8		= 0x200000,
433*4882a593Smuzhiyun 	RxMxdma_16		= 0x300000,
434*4882a593Smuzhiyun 	RxMxdma_32		= 0x400000,
435*4882a593Smuzhiyun 	RxMxdma_64		= 0x500000,
436*4882a593Smuzhiyun 	RxMxdma_128		= 0x600000,
437*4882a593Smuzhiyun 	RxMxdma_256		= 0x700000,
438*4882a593Smuzhiyun 	RxAcceptLong		= 0x8000000,
439*4882a593Smuzhiyun 	RxAcceptTx		= 0x10000000,
440*4882a593Smuzhiyun 	RxAcceptRunt		= 0x40000000,
441*4882a593Smuzhiyun 	RxAcceptErr		= 0x80000000
442*4882a593Smuzhiyun };
443*4882a593Smuzhiyun #define RX_DRTH_VAL		(128/8)
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun enum ClkRun_bits {
446*4882a593Smuzhiyun 	PMEEnable		= 0x100,
447*4882a593Smuzhiyun 	PMEStatus		= 0x8000,
448*4882a593Smuzhiyun };
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun enum WolCmd_bits {
451*4882a593Smuzhiyun 	WakePhy			= 0x1,
452*4882a593Smuzhiyun 	WakeUnicast		= 0x2,
453*4882a593Smuzhiyun 	WakeMulticast		= 0x4,
454*4882a593Smuzhiyun 	WakeBroadcast		= 0x8,
455*4882a593Smuzhiyun 	WakeArp			= 0x10,
456*4882a593Smuzhiyun 	WakePMatch0		= 0x20,
457*4882a593Smuzhiyun 	WakePMatch1		= 0x40,
458*4882a593Smuzhiyun 	WakePMatch2		= 0x80,
459*4882a593Smuzhiyun 	WakePMatch3		= 0x100,
460*4882a593Smuzhiyun 	WakeMagic		= 0x200,
461*4882a593Smuzhiyun 	WakeMagicSecure		= 0x400,
462*4882a593Smuzhiyun 	SecureHack		= 0x100000,
463*4882a593Smuzhiyun 	WokePhy			= 0x400000,
464*4882a593Smuzhiyun 	WokeUnicast		= 0x800000,
465*4882a593Smuzhiyun 	WokeMulticast		= 0x1000000,
466*4882a593Smuzhiyun 	WokeBroadcast		= 0x2000000,
467*4882a593Smuzhiyun 	WokeArp			= 0x4000000,
468*4882a593Smuzhiyun 	WokePMatch0		= 0x8000000,
469*4882a593Smuzhiyun 	WokePMatch1		= 0x10000000,
470*4882a593Smuzhiyun 	WokePMatch2		= 0x20000000,
471*4882a593Smuzhiyun 	WokePMatch3		= 0x40000000,
472*4882a593Smuzhiyun 	WokeMagic		= 0x80000000,
473*4882a593Smuzhiyun 	WakeOptsSummary		= 0x7ff
474*4882a593Smuzhiyun };
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun enum RxFilterAddr_bits {
477*4882a593Smuzhiyun 	RFCRAddressMask		= 0x3ff,
478*4882a593Smuzhiyun 	AcceptMulticast		= 0x00200000,
479*4882a593Smuzhiyun 	AcceptMyPhys		= 0x08000000,
480*4882a593Smuzhiyun 	AcceptAllPhys		= 0x10000000,
481*4882a593Smuzhiyun 	AcceptAllMulticast	= 0x20000000,
482*4882a593Smuzhiyun 	AcceptBroadcast		= 0x40000000,
483*4882a593Smuzhiyun 	RxFilterEnable		= 0x80000000
484*4882a593Smuzhiyun };
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun enum StatsCtrl_bits {
487*4882a593Smuzhiyun 	StatsWarn		= 0x1,
488*4882a593Smuzhiyun 	StatsFreeze		= 0x2,
489*4882a593Smuzhiyun 	StatsClear		= 0x4,
490*4882a593Smuzhiyun 	StatsStrobe		= 0x8,
491*4882a593Smuzhiyun };
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun enum MIntrCtrl_bits {
494*4882a593Smuzhiyun 	MICRIntEn		= 0x2,
495*4882a593Smuzhiyun };
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun enum PhyCtrl_bits {
498*4882a593Smuzhiyun 	PhyAddrMask		= 0x1f,
499*4882a593Smuzhiyun };
500*4882a593Smuzhiyun 
501*4882a593Smuzhiyun #define PHY_ADDR_NONE		32
502*4882a593Smuzhiyun #define PHY_ADDR_INTERNAL	1
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun /* values we might find in the silicon revision register */
505*4882a593Smuzhiyun #define SRR_DP83815_C	0x0302
506*4882a593Smuzhiyun #define SRR_DP83815_D	0x0403
507*4882a593Smuzhiyun #define SRR_DP83816_A4	0x0504
508*4882a593Smuzhiyun #define SRR_DP83816_A5	0x0505
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun /* The Rx and Tx buffer descriptors. */
511*4882a593Smuzhiyun /* Note that using only 32 bit fields simplifies conversion to big-endian
512*4882a593Smuzhiyun    architectures. */
513*4882a593Smuzhiyun struct netdev_desc {
514*4882a593Smuzhiyun 	__le32 next_desc;
515*4882a593Smuzhiyun 	__le32 cmd_status;
516*4882a593Smuzhiyun 	__le32 addr;
517*4882a593Smuzhiyun 	__le32 software_use;
518*4882a593Smuzhiyun };
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /* Bits in network_desc.status */
521*4882a593Smuzhiyun enum desc_status_bits {
522*4882a593Smuzhiyun 	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
523*4882a593Smuzhiyun 	DescNoCRC=0x10000000, DescPktOK=0x08000000,
524*4882a593Smuzhiyun 	DescSizeMask=0xfff,
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
527*4882a593Smuzhiyun 	DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
528*4882a593Smuzhiyun 	DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
529*4882a593Smuzhiyun 	DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun 	DescRxAbort=0x04000000, DescRxOver=0x02000000,
532*4882a593Smuzhiyun 	DescRxDest=0x01800000, DescRxLong=0x00400000,
533*4882a593Smuzhiyun 	DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
534*4882a593Smuzhiyun 	DescRxCRC=0x00080000, DescRxAlign=0x00040000,
535*4882a593Smuzhiyun 	DescRxLoop=0x00020000, DesRxColl=0x00010000,
536*4882a593Smuzhiyun };
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun struct netdev_private {
539*4882a593Smuzhiyun 	/* Descriptor rings first for alignment */
540*4882a593Smuzhiyun 	dma_addr_t ring_dma;
541*4882a593Smuzhiyun 	struct netdev_desc *rx_ring;
542*4882a593Smuzhiyun 	struct netdev_desc *tx_ring;
543*4882a593Smuzhiyun 	/* The addresses of receive-in-place skbuffs */
544*4882a593Smuzhiyun 	struct sk_buff *rx_skbuff[RX_RING_SIZE];
545*4882a593Smuzhiyun 	dma_addr_t rx_dma[RX_RING_SIZE];
546*4882a593Smuzhiyun 	/* address of a sent-in-place packet/buffer, for later free() */
547*4882a593Smuzhiyun 	struct sk_buff *tx_skbuff[TX_RING_SIZE];
548*4882a593Smuzhiyun 	dma_addr_t tx_dma[TX_RING_SIZE];
549*4882a593Smuzhiyun 	struct net_device *dev;
550*4882a593Smuzhiyun 	void __iomem *ioaddr;
551*4882a593Smuzhiyun 	struct napi_struct napi;
552*4882a593Smuzhiyun 	/* Media monitoring timer */
553*4882a593Smuzhiyun 	struct timer_list timer;
554*4882a593Smuzhiyun 	/* Frequently used values: keep some adjacent for cache effect */
555*4882a593Smuzhiyun 	struct pci_dev *pci_dev;
556*4882a593Smuzhiyun 	struct netdev_desc *rx_head_desc;
557*4882a593Smuzhiyun 	/* Producer/consumer ring indices */
558*4882a593Smuzhiyun 	unsigned int cur_rx, dirty_rx;
559*4882a593Smuzhiyun 	unsigned int cur_tx, dirty_tx;
560*4882a593Smuzhiyun 	/* Based on MTU+slack. */
561*4882a593Smuzhiyun 	unsigned int rx_buf_sz;
562*4882a593Smuzhiyun 	int oom;
563*4882a593Smuzhiyun 	/* Interrupt status */
564*4882a593Smuzhiyun 	u32 intr_status;
565*4882a593Smuzhiyun 	/* Do not touch the nic registers */
566*4882a593Smuzhiyun 	int hands_off;
567*4882a593Smuzhiyun 	/* Don't pay attention to the reported link state. */
568*4882a593Smuzhiyun 	int ignore_phy;
569*4882a593Smuzhiyun 	/* external phy that is used: only valid if dev->if_port != PORT_TP */
570*4882a593Smuzhiyun 	int mii;
571*4882a593Smuzhiyun 	int phy_addr_external;
572*4882a593Smuzhiyun 	unsigned int full_duplex;
573*4882a593Smuzhiyun 	/* Rx filter */
574*4882a593Smuzhiyun 	u32 cur_rx_mode;
575*4882a593Smuzhiyun 	u32 rx_filter[16];
576*4882a593Smuzhiyun 	/* FIFO and PCI burst thresholds */
577*4882a593Smuzhiyun 	u32 tx_config, rx_config;
578*4882a593Smuzhiyun 	/* original contents of ClkRun register */
579*4882a593Smuzhiyun 	u32 SavedClkRun;
580*4882a593Smuzhiyun 	/* silicon revision */
581*4882a593Smuzhiyun 	u32 srr;
582*4882a593Smuzhiyun 	/* expected DSPCFG value */
583*4882a593Smuzhiyun 	u16 dspcfg;
584*4882a593Smuzhiyun 	int dspcfg_workaround;
585*4882a593Smuzhiyun 	/* parms saved in ethtool format */
586*4882a593Smuzhiyun 	u16	speed;		/* The forced speed, 10Mb, 100Mb, gigabit */
587*4882a593Smuzhiyun 	u8	duplex;		/* Duplex, half or full */
588*4882a593Smuzhiyun 	u8	autoneg;	/* Autonegotiation enabled */
589*4882a593Smuzhiyun 	/* MII transceiver section */
590*4882a593Smuzhiyun 	u16 advertising;
591*4882a593Smuzhiyun 	unsigned int iosize;
592*4882a593Smuzhiyun 	spinlock_t lock;
593*4882a593Smuzhiyun 	u32 msg_enable;
594*4882a593Smuzhiyun 	/* EEPROM data */
595*4882a593Smuzhiyun 	int eeprom_size;
596*4882a593Smuzhiyun };
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun static void move_int_phy(struct net_device *dev, int addr);
599*4882a593Smuzhiyun static int eeprom_read(void __iomem *ioaddr, int location);
600*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int reg);
601*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int reg, u16 data);
602*4882a593Smuzhiyun static void init_phy_fixup(struct net_device *dev);
603*4882a593Smuzhiyun static int miiport_read(struct net_device *dev, int phy_id, int reg);
604*4882a593Smuzhiyun static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data);
605*4882a593Smuzhiyun static int find_mii(struct net_device *dev);
606*4882a593Smuzhiyun static void natsemi_reset(struct net_device *dev);
607*4882a593Smuzhiyun static void natsemi_reload_eeprom(struct net_device *dev);
608*4882a593Smuzhiyun static void natsemi_stop_rxtx(struct net_device *dev);
609*4882a593Smuzhiyun static int netdev_open(struct net_device *dev);
610*4882a593Smuzhiyun static void do_cable_magic(struct net_device *dev);
611*4882a593Smuzhiyun static void undo_cable_magic(struct net_device *dev);
612*4882a593Smuzhiyun static void check_link(struct net_device *dev);
613*4882a593Smuzhiyun static void netdev_timer(struct timer_list *t);
614*4882a593Smuzhiyun static void dump_ring(struct net_device *dev);
615*4882a593Smuzhiyun static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue);
616*4882a593Smuzhiyun static int alloc_ring(struct net_device *dev);
617*4882a593Smuzhiyun static void refill_rx(struct net_device *dev);
618*4882a593Smuzhiyun static void init_ring(struct net_device *dev);
619*4882a593Smuzhiyun static void drain_tx(struct net_device *dev);
620*4882a593Smuzhiyun static void drain_ring(struct net_device *dev);
621*4882a593Smuzhiyun static void free_ring(struct net_device *dev);
622*4882a593Smuzhiyun static void reinit_ring(struct net_device *dev);
623*4882a593Smuzhiyun static void init_registers(struct net_device *dev);
624*4882a593Smuzhiyun static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev);
625*4882a593Smuzhiyun static irqreturn_t intr_handler(int irq, void *dev_instance);
626*4882a593Smuzhiyun static void netdev_error(struct net_device *dev, int intr_status);
627*4882a593Smuzhiyun static int natsemi_poll(struct napi_struct *napi, int budget);
628*4882a593Smuzhiyun static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do);
629*4882a593Smuzhiyun static void netdev_tx_done(struct net_device *dev);
630*4882a593Smuzhiyun static int natsemi_change_mtu(struct net_device *dev, int new_mtu);
631*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
632*4882a593Smuzhiyun static void natsemi_poll_controller(struct net_device *dev);
633*4882a593Smuzhiyun #endif
634*4882a593Smuzhiyun static void __set_rx_mode(struct net_device *dev);
635*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev);
636*4882a593Smuzhiyun static void __get_stats(struct net_device *dev);
637*4882a593Smuzhiyun static struct net_device_stats *get_stats(struct net_device *dev);
638*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
639*4882a593Smuzhiyun static int netdev_set_wol(struct net_device *dev, u32 newval);
640*4882a593Smuzhiyun static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur);
641*4882a593Smuzhiyun static int netdev_set_sopass(struct net_device *dev, u8 *newval);
642*4882a593Smuzhiyun static int netdev_get_sopass(struct net_device *dev, u8 *data);
643*4882a593Smuzhiyun static int netdev_get_ecmd(struct net_device *dev,
644*4882a593Smuzhiyun 			   struct ethtool_link_ksettings *ecmd);
645*4882a593Smuzhiyun static int netdev_set_ecmd(struct net_device *dev,
646*4882a593Smuzhiyun 			   const struct ethtool_link_ksettings *ecmd);
647*4882a593Smuzhiyun static void enable_wol_mode(struct net_device *dev, int enable_intr);
648*4882a593Smuzhiyun static int netdev_close(struct net_device *dev);
649*4882a593Smuzhiyun static int netdev_get_regs(struct net_device *dev, u8 *buf);
650*4882a593Smuzhiyun static int netdev_get_eeprom(struct net_device *dev, u8 *buf);
651*4882a593Smuzhiyun static const struct ethtool_ops ethtool_ops;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun #define NATSEMI_ATTR(_name) \
654*4882a593Smuzhiyun static ssize_t natsemi_show_##_name(struct device *dev, \
655*4882a593Smuzhiyun          struct device_attribute *attr, char *buf); \
656*4882a593Smuzhiyun 	 static ssize_t natsemi_set_##_name(struct device *dev, \
657*4882a593Smuzhiyun 		struct device_attribute *attr, \
658*4882a593Smuzhiyun 	        const char *buf, size_t count); \
659*4882a593Smuzhiyun 	 static DEVICE_ATTR(_name, 0644, natsemi_show_##_name, natsemi_set_##_name)
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun #define NATSEMI_CREATE_FILE(_dev, _name) \
662*4882a593Smuzhiyun          device_create_file(&_dev->dev, &dev_attr_##_name)
663*4882a593Smuzhiyun #define NATSEMI_REMOVE_FILE(_dev, _name) \
664*4882a593Smuzhiyun          device_remove_file(&_dev->dev, &dev_attr_##_name)
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun NATSEMI_ATTR(dspcfg_workaround);
667*4882a593Smuzhiyun 
natsemi_show_dspcfg_workaround(struct device * dev,struct device_attribute * attr,char * buf)668*4882a593Smuzhiyun static ssize_t natsemi_show_dspcfg_workaround(struct device *dev,
669*4882a593Smuzhiyun 				  	      struct device_attribute *attr,
670*4882a593Smuzhiyun 					      char *buf)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	return sprintf(buf, "%s\n", np->dspcfg_workaround ? "on" : "off");
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
natsemi_set_dspcfg_workaround(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)677*4882a593Smuzhiyun static ssize_t natsemi_set_dspcfg_workaround(struct device *dev,
678*4882a593Smuzhiyun 					     struct device_attribute *attr,
679*4882a593Smuzhiyun 					     const char *buf, size_t count)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(to_net_dev(dev));
682*4882a593Smuzhiyun 	int new_setting;
683*4882a593Smuzhiyun 	unsigned long flags;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun         /* Find out the new setting */
686*4882a593Smuzhiyun         if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
687*4882a593Smuzhiyun                 new_setting = 1;
688*4882a593Smuzhiyun         else if (!strncmp("off", buf, count - 1) ||
689*4882a593Smuzhiyun                  !strncmp("0", buf, count - 1))
690*4882a593Smuzhiyun 		new_setting = 0;
691*4882a593Smuzhiyun 	else
692*4882a593Smuzhiyun                  return count;
693*4882a593Smuzhiyun 
694*4882a593Smuzhiyun 	spin_lock_irqsave(&np->lock, flags);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	np->dspcfg_workaround = new_setting;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	spin_unlock_irqrestore(&np->lock, flags);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return count;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
ns_ioaddr(struct net_device * dev)703*4882a593Smuzhiyun static inline void __iomem *ns_ioaddr(struct net_device *dev)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	return np->ioaddr;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun 
natsemi_irq_enable(struct net_device * dev)710*4882a593Smuzhiyun static inline void natsemi_irq_enable(struct net_device *dev)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	writel(1, ns_ioaddr(dev) + IntrEnable);
713*4882a593Smuzhiyun 	readl(ns_ioaddr(dev) + IntrEnable);
714*4882a593Smuzhiyun }
715*4882a593Smuzhiyun 
natsemi_irq_disable(struct net_device * dev)716*4882a593Smuzhiyun static inline void natsemi_irq_disable(struct net_device *dev)
717*4882a593Smuzhiyun {
718*4882a593Smuzhiyun 	writel(0, ns_ioaddr(dev) + IntrEnable);
719*4882a593Smuzhiyun 	readl(ns_ioaddr(dev) + IntrEnable);
720*4882a593Smuzhiyun }
721*4882a593Smuzhiyun 
move_int_phy(struct net_device * dev,int addr)722*4882a593Smuzhiyun static void move_int_phy(struct net_device *dev, int addr)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
725*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
726*4882a593Smuzhiyun 	int target = 31;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	/*
729*4882a593Smuzhiyun 	 * The internal phy is visible on the external mii bus. Therefore we must
730*4882a593Smuzhiyun 	 * move it away before we can send commands to an external phy.
731*4882a593Smuzhiyun 	 * There are two addresses we must avoid:
732*4882a593Smuzhiyun 	 * - the address on the external phy that is used for transmission.
733*4882a593Smuzhiyun 	 * - the address that we want to access. User space can access phys
734*4882a593Smuzhiyun 	 *   on the mii bus with SIOCGMIIREG/SIOCSMIIREG, independent from the
735*4882a593Smuzhiyun 	 *   phy that is used for transmission.
736*4882a593Smuzhiyun 	 */
737*4882a593Smuzhiyun 
738*4882a593Smuzhiyun 	if (target == addr)
739*4882a593Smuzhiyun 		target--;
740*4882a593Smuzhiyun 	if (target == np->phy_addr_external)
741*4882a593Smuzhiyun 		target--;
742*4882a593Smuzhiyun 	writew(target, ioaddr + PhyCtrl);
743*4882a593Smuzhiyun 	readw(ioaddr + PhyCtrl);
744*4882a593Smuzhiyun 	udelay(1);
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun 
natsemi_init_media(struct net_device * dev)747*4882a593Smuzhiyun static void natsemi_init_media(struct net_device *dev)
748*4882a593Smuzhiyun {
749*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
750*4882a593Smuzhiyun 	u32 tmp;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	if (np->ignore_phy)
753*4882a593Smuzhiyun 		netif_carrier_on(dev);
754*4882a593Smuzhiyun 	else
755*4882a593Smuzhiyun 		netif_carrier_off(dev);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	/* get the initial settings from hardware */
758*4882a593Smuzhiyun 	tmp            = mdio_read(dev, MII_BMCR);
759*4882a593Smuzhiyun 	np->speed      = (tmp & BMCR_SPEED100)? SPEED_100     : SPEED_10;
760*4882a593Smuzhiyun 	np->duplex     = (tmp & BMCR_FULLDPLX)? DUPLEX_FULL   : DUPLEX_HALF;
761*4882a593Smuzhiyun 	np->autoneg    = (tmp & BMCR_ANENABLE)? AUTONEG_ENABLE: AUTONEG_DISABLE;
762*4882a593Smuzhiyun 	np->advertising= mdio_read(dev, MII_ADVERTISE);
763*4882a593Smuzhiyun 
764*4882a593Smuzhiyun 	if ((np->advertising & ADVERTISE_ALL) != ADVERTISE_ALL &&
765*4882a593Smuzhiyun 	    netif_msg_probe(np)) {
766*4882a593Smuzhiyun 		printk(KERN_INFO "natsemi %s: Transceiver default autonegotiation %s "
767*4882a593Smuzhiyun 			"10%s %s duplex.\n",
768*4882a593Smuzhiyun 			pci_name(np->pci_dev),
769*4882a593Smuzhiyun 			(mdio_read(dev, MII_BMCR) & BMCR_ANENABLE)?
770*4882a593Smuzhiyun 			  "enabled, advertise" : "disabled, force",
771*4882a593Smuzhiyun 			(np->advertising &
772*4882a593Smuzhiyun 			  (ADVERTISE_100FULL|ADVERTISE_100HALF))?
773*4882a593Smuzhiyun 			    "0" : "",
774*4882a593Smuzhiyun 			(np->advertising &
775*4882a593Smuzhiyun 			  (ADVERTISE_100FULL|ADVERTISE_10FULL))?
776*4882a593Smuzhiyun 			    "full" : "half");
777*4882a593Smuzhiyun 	}
778*4882a593Smuzhiyun 	if (netif_msg_probe(np))
779*4882a593Smuzhiyun 		printk(KERN_INFO
780*4882a593Smuzhiyun 			"natsemi %s: Transceiver status %#04x advertising %#04x.\n",
781*4882a593Smuzhiyun 			pci_name(np->pci_dev), mdio_read(dev, MII_BMSR),
782*4882a593Smuzhiyun 			np->advertising);
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
786*4882a593Smuzhiyun static const struct net_device_ops natsemi_netdev_ops = {
787*4882a593Smuzhiyun 	.ndo_open		= netdev_open,
788*4882a593Smuzhiyun 	.ndo_stop		= netdev_close,
789*4882a593Smuzhiyun 	.ndo_start_xmit		= start_tx,
790*4882a593Smuzhiyun 	.ndo_get_stats		= get_stats,
791*4882a593Smuzhiyun 	.ndo_set_rx_mode	= set_rx_mode,
792*4882a593Smuzhiyun 	.ndo_change_mtu		= natsemi_change_mtu,
793*4882a593Smuzhiyun 	.ndo_do_ioctl		= netdev_ioctl,
794*4882a593Smuzhiyun 	.ndo_tx_timeout 	= ns_tx_timeout,
795*4882a593Smuzhiyun 	.ndo_set_mac_address 	= eth_mac_addr,
796*4882a593Smuzhiyun 	.ndo_validate_addr	= eth_validate_addr,
797*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
798*4882a593Smuzhiyun 	.ndo_poll_controller	= natsemi_poll_controller,
799*4882a593Smuzhiyun #endif
800*4882a593Smuzhiyun };
801*4882a593Smuzhiyun 
natsemi_probe1(struct pci_dev * pdev,const struct pci_device_id * ent)802*4882a593Smuzhiyun static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	struct net_device *dev;
805*4882a593Smuzhiyun 	struct netdev_private *np;
806*4882a593Smuzhiyun 	int i, option, irq, chip_idx = ent->driver_data;
807*4882a593Smuzhiyun 	static int find_cnt = -1;
808*4882a593Smuzhiyun 	resource_size_t iostart;
809*4882a593Smuzhiyun 	unsigned long iosize;
810*4882a593Smuzhiyun 	void __iomem *ioaddr;
811*4882a593Smuzhiyun 	const int pcibar = 1; /* PCI base address register */
812*4882a593Smuzhiyun 	int prev_eedata;
813*4882a593Smuzhiyun 	u32 tmp;
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun /* when built into the kernel, we only print version if device is found */
816*4882a593Smuzhiyun #ifndef MODULE
817*4882a593Smuzhiyun 	static int printed_version;
818*4882a593Smuzhiyun 	if (!printed_version++)
819*4882a593Smuzhiyun 		printk(version);
820*4882a593Smuzhiyun #endif
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	i = pcim_enable_device(pdev);
823*4882a593Smuzhiyun 	if (i) return i;
824*4882a593Smuzhiyun 
825*4882a593Smuzhiyun 	/* natsemi has a non-standard PM control register
826*4882a593Smuzhiyun 	 * in PCI config space.  Some boards apparently need
827*4882a593Smuzhiyun 	 * to be brought to D0 in this manner.
828*4882a593Smuzhiyun 	 */
829*4882a593Smuzhiyun 	pci_read_config_dword(pdev, PCIPM, &tmp);
830*4882a593Smuzhiyun 	if (tmp & PCI_PM_CTRL_STATE_MASK) {
831*4882a593Smuzhiyun 		/* D0 state, disable PME assertion */
832*4882a593Smuzhiyun 		u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
833*4882a593Smuzhiyun 		pci_write_config_dword(pdev, PCIPM, newtmp);
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	find_cnt++;
837*4882a593Smuzhiyun 	iostart = pci_resource_start(pdev, pcibar);
838*4882a593Smuzhiyun 	iosize = pci_resource_len(pdev, pcibar);
839*4882a593Smuzhiyun 	irq = pdev->irq;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	pci_set_master(pdev);
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	dev = alloc_etherdev(sizeof (struct netdev_private));
844*4882a593Smuzhiyun 	if (!dev)
845*4882a593Smuzhiyun 		return -ENOMEM;
846*4882a593Smuzhiyun 	SET_NETDEV_DEV(dev, &pdev->dev);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	i = pci_request_regions(pdev, DRV_NAME);
849*4882a593Smuzhiyun 	if (i)
850*4882a593Smuzhiyun 		goto err_pci_request_regions;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	ioaddr = ioremap(iostart, iosize);
853*4882a593Smuzhiyun 	if (!ioaddr) {
854*4882a593Smuzhiyun 		i = -ENOMEM;
855*4882a593Smuzhiyun 		goto err_pci_request_regions;
856*4882a593Smuzhiyun 	}
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	/* Work around the dropped serial bit. */
859*4882a593Smuzhiyun 	prev_eedata = eeprom_read(ioaddr, 6);
860*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
861*4882a593Smuzhiyun 		int eedata = eeprom_read(ioaddr, i + 7);
862*4882a593Smuzhiyun 		dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
863*4882a593Smuzhiyun 		dev->dev_addr[i*2+1] = eedata >> 7;
864*4882a593Smuzhiyun 		prev_eedata = eedata;
865*4882a593Smuzhiyun 	}
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	np = netdev_priv(dev);
868*4882a593Smuzhiyun 	np->ioaddr = ioaddr;
869*4882a593Smuzhiyun 
870*4882a593Smuzhiyun 	netif_napi_add(dev, &np->napi, natsemi_poll, 64);
871*4882a593Smuzhiyun 	np->dev = dev;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	np->pci_dev = pdev;
874*4882a593Smuzhiyun 	pci_set_drvdata(pdev, dev);
875*4882a593Smuzhiyun 	np->iosize = iosize;
876*4882a593Smuzhiyun 	spin_lock_init(&np->lock);
877*4882a593Smuzhiyun 	np->msg_enable = (debug >= 0) ? (1<<debug)-1 : NATSEMI_DEF_MSG;
878*4882a593Smuzhiyun 	np->hands_off = 0;
879*4882a593Smuzhiyun 	np->intr_status = 0;
880*4882a593Smuzhiyun 	np->eeprom_size = natsemi_pci_info[chip_idx].eeprom_size;
881*4882a593Smuzhiyun 	if (natsemi_pci_info[chip_idx].flags & NATSEMI_FLAG_IGNORE_PHY)
882*4882a593Smuzhiyun 		np->ignore_phy = 1;
883*4882a593Smuzhiyun 	else
884*4882a593Smuzhiyun 		np->ignore_phy = 0;
885*4882a593Smuzhiyun 	np->dspcfg_workaround = dspcfg_workaround;
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	/* Initial port:
888*4882a593Smuzhiyun 	 * - If configured to ignore the PHY set up for external.
889*4882a593Smuzhiyun 	 * - If the nic was configured to use an external phy and if find_mii
890*4882a593Smuzhiyun 	 *   finds a phy: use external port, first phy that replies.
891*4882a593Smuzhiyun 	 * - Otherwise: internal port.
892*4882a593Smuzhiyun 	 * Note that the phy address for the internal phy doesn't matter:
893*4882a593Smuzhiyun 	 * The address would be used to access a phy over the mii bus, but
894*4882a593Smuzhiyun 	 * the internal phy is accessed through mapped registers.
895*4882a593Smuzhiyun 	 */
896*4882a593Smuzhiyun 	if (np->ignore_phy || readl(ioaddr + ChipConfig) & CfgExtPhy)
897*4882a593Smuzhiyun 		dev->if_port = PORT_MII;
898*4882a593Smuzhiyun 	else
899*4882a593Smuzhiyun 		dev->if_port = PORT_TP;
900*4882a593Smuzhiyun 	/* Reset the chip to erase previous misconfiguration. */
901*4882a593Smuzhiyun 	natsemi_reload_eeprom(dev);
902*4882a593Smuzhiyun 	natsemi_reset(dev);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (dev->if_port != PORT_TP) {
905*4882a593Smuzhiyun 		np->phy_addr_external = find_mii(dev);
906*4882a593Smuzhiyun 		/* If we're ignoring the PHY it doesn't matter if we can't
907*4882a593Smuzhiyun 		 * find one. */
908*4882a593Smuzhiyun 		if (!np->ignore_phy && np->phy_addr_external == PHY_ADDR_NONE) {
909*4882a593Smuzhiyun 			dev->if_port = PORT_TP;
910*4882a593Smuzhiyun 			np->phy_addr_external = PHY_ADDR_INTERNAL;
911*4882a593Smuzhiyun 		}
912*4882a593Smuzhiyun 	} else {
913*4882a593Smuzhiyun 		np->phy_addr_external = PHY_ADDR_INTERNAL;
914*4882a593Smuzhiyun 	}
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
917*4882a593Smuzhiyun 	/* The lower four bits are the media type. */
918*4882a593Smuzhiyun 	if (option) {
919*4882a593Smuzhiyun 		if (option & 0x200)
920*4882a593Smuzhiyun 			np->full_duplex = 1;
921*4882a593Smuzhiyun 		if (option & 15)
922*4882a593Smuzhiyun 			printk(KERN_INFO
923*4882a593Smuzhiyun 				"natsemi %s: ignoring user supplied media type %d",
924*4882a593Smuzhiyun 				pci_name(np->pci_dev), option & 15);
925*4882a593Smuzhiyun 	}
926*4882a593Smuzhiyun 	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
927*4882a593Smuzhiyun 		np->full_duplex = 1;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	dev->netdev_ops = &natsemi_netdev_ops;
930*4882a593Smuzhiyun 	dev->watchdog_timeo = TX_TIMEOUT;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	dev->ethtool_ops = &ethtool_ops;
933*4882a593Smuzhiyun 
934*4882a593Smuzhiyun 	/* MTU range: 64 - 2024 */
935*4882a593Smuzhiyun 	dev->min_mtu = ETH_ZLEN + ETH_FCS_LEN;
936*4882a593Smuzhiyun 	dev->max_mtu = NATSEMI_RX_LIMIT - NATSEMI_HEADERS;
937*4882a593Smuzhiyun 
938*4882a593Smuzhiyun 	if (mtu)
939*4882a593Smuzhiyun 		dev->mtu = mtu;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	natsemi_init_media(dev);
942*4882a593Smuzhiyun 
943*4882a593Smuzhiyun 	/* save the silicon revision for later querying */
944*4882a593Smuzhiyun 	np->srr = readl(ioaddr + SiliconRev);
945*4882a593Smuzhiyun 	if (netif_msg_hw(np))
946*4882a593Smuzhiyun 		printk(KERN_INFO "natsemi %s: silicon revision %#04x.\n",
947*4882a593Smuzhiyun 				pci_name(np->pci_dev), np->srr);
948*4882a593Smuzhiyun 
949*4882a593Smuzhiyun 	i = register_netdev(dev);
950*4882a593Smuzhiyun 	if (i)
951*4882a593Smuzhiyun 		goto err_register_netdev;
952*4882a593Smuzhiyun 	i = NATSEMI_CREATE_FILE(pdev, dspcfg_workaround);
953*4882a593Smuzhiyun 	if (i)
954*4882a593Smuzhiyun 		goto err_create_file;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	if (netif_msg_drv(np)) {
957*4882a593Smuzhiyun 		printk(KERN_INFO "natsemi %s: %s at %#08llx "
958*4882a593Smuzhiyun 		       "(%s), %pM, IRQ %d",
959*4882a593Smuzhiyun 		       dev->name, natsemi_pci_info[chip_idx].name,
960*4882a593Smuzhiyun 		       (unsigned long long)iostart, pci_name(np->pci_dev),
961*4882a593Smuzhiyun 		       dev->dev_addr, irq);
962*4882a593Smuzhiyun 		if (dev->if_port == PORT_TP)
963*4882a593Smuzhiyun 			printk(", port TP.\n");
964*4882a593Smuzhiyun 		else if (np->ignore_phy)
965*4882a593Smuzhiyun 			printk(", port MII, ignoring PHY\n");
966*4882a593Smuzhiyun 		else
967*4882a593Smuzhiyun 			printk(", port MII, phy ad %d.\n", np->phy_addr_external);
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 	return 0;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun  err_create_file:
972*4882a593Smuzhiyun  	unregister_netdev(dev);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun  err_register_netdev:
975*4882a593Smuzhiyun 	iounmap(ioaddr);
976*4882a593Smuzhiyun 
977*4882a593Smuzhiyun  err_pci_request_regions:
978*4882a593Smuzhiyun 	free_netdev(dev);
979*4882a593Smuzhiyun 	return i;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun 
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
984*4882a593Smuzhiyun    The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun /* Delay between EEPROM clock transitions.
987*4882a593Smuzhiyun    No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
988*4882a593Smuzhiyun    a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
989*4882a593Smuzhiyun    made udelay() unreliable.
990*4882a593Smuzhiyun    The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
991*4882a593Smuzhiyun    deprecated.
992*4882a593Smuzhiyun */
993*4882a593Smuzhiyun #define eeprom_delay(ee_addr)	readl(ee_addr)
994*4882a593Smuzhiyun 
995*4882a593Smuzhiyun #define EE_Write0 (EE_ChipSelect)
996*4882a593Smuzhiyun #define EE_Write1 (EE_ChipSelect | EE_DataIn)
997*4882a593Smuzhiyun 
998*4882a593Smuzhiyun /* The EEPROM commands include the alway-set leading bit. */
999*4882a593Smuzhiyun enum EEPROM_Cmds {
1000*4882a593Smuzhiyun 	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
1001*4882a593Smuzhiyun };
1002*4882a593Smuzhiyun 
eeprom_read(void __iomem * addr,int location)1003*4882a593Smuzhiyun static int eeprom_read(void __iomem *addr, int location)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun 	int i;
1006*4882a593Smuzhiyun 	int retval = 0;
1007*4882a593Smuzhiyun 	void __iomem *ee_addr = addr + EECtrl;
1008*4882a593Smuzhiyun 	int read_cmd = location | EE_ReadCmd;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	writel(EE_Write0, ee_addr);
1011*4882a593Smuzhiyun 
1012*4882a593Smuzhiyun 	/* Shift the read command bits out. */
1013*4882a593Smuzhiyun 	for (i = 10; i >= 0; i--) {
1014*4882a593Smuzhiyun 		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
1015*4882a593Smuzhiyun 		writel(dataval, ee_addr);
1016*4882a593Smuzhiyun 		eeprom_delay(ee_addr);
1017*4882a593Smuzhiyun 		writel(dataval | EE_ShiftClk, ee_addr);
1018*4882a593Smuzhiyun 		eeprom_delay(ee_addr);
1019*4882a593Smuzhiyun 	}
1020*4882a593Smuzhiyun 	writel(EE_ChipSelect, ee_addr);
1021*4882a593Smuzhiyun 	eeprom_delay(ee_addr);
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	for (i = 0; i < 16; i++) {
1024*4882a593Smuzhiyun 		writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
1025*4882a593Smuzhiyun 		eeprom_delay(ee_addr);
1026*4882a593Smuzhiyun 		retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
1027*4882a593Smuzhiyun 		writel(EE_ChipSelect, ee_addr);
1028*4882a593Smuzhiyun 		eeprom_delay(ee_addr);
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	/* Terminate the EEPROM access. */
1032*4882a593Smuzhiyun 	writel(EE_Write0, ee_addr);
1033*4882a593Smuzhiyun 	writel(0, ee_addr);
1034*4882a593Smuzhiyun 	return retval;
1035*4882a593Smuzhiyun }
1036*4882a593Smuzhiyun 
1037*4882a593Smuzhiyun /* MII transceiver control section.
1038*4882a593Smuzhiyun  * The 83815 series has an internal transceiver, and we present the
1039*4882a593Smuzhiyun  * internal management registers as if they were MII connected.
1040*4882a593Smuzhiyun  * External Phy registers are referenced through the MII interface.
1041*4882a593Smuzhiyun  */
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun /* clock transitions >= 20ns (25MHz)
1044*4882a593Smuzhiyun  * One readl should be good to PCI @ 100MHz
1045*4882a593Smuzhiyun  */
1046*4882a593Smuzhiyun #define mii_delay(ioaddr)  readl(ioaddr + EECtrl)
1047*4882a593Smuzhiyun 
mii_getbit(struct net_device * dev)1048*4882a593Smuzhiyun static int mii_getbit (struct net_device *dev)
1049*4882a593Smuzhiyun {
1050*4882a593Smuzhiyun 	int data;
1051*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1052*4882a593Smuzhiyun 
1053*4882a593Smuzhiyun 	writel(MII_ShiftClk, ioaddr + EECtrl);
1054*4882a593Smuzhiyun 	data = readl(ioaddr + EECtrl);
1055*4882a593Smuzhiyun 	writel(0, ioaddr + EECtrl);
1056*4882a593Smuzhiyun 	mii_delay(ioaddr);
1057*4882a593Smuzhiyun 	return (data & MII_Data)? 1 : 0;
1058*4882a593Smuzhiyun }
1059*4882a593Smuzhiyun 
mii_send_bits(struct net_device * dev,u32 data,int len)1060*4882a593Smuzhiyun static void mii_send_bits (struct net_device *dev, u32 data, int len)
1061*4882a593Smuzhiyun {
1062*4882a593Smuzhiyun 	u32 i;
1063*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	for (i = (1 << (len-1)); i; i >>= 1)
1066*4882a593Smuzhiyun 	{
1067*4882a593Smuzhiyun 		u32 mdio_val = MII_Write | ((data & i)? MII_Data : 0);
1068*4882a593Smuzhiyun 		writel(mdio_val, ioaddr + EECtrl);
1069*4882a593Smuzhiyun 		mii_delay(ioaddr);
1070*4882a593Smuzhiyun 		writel(mdio_val | MII_ShiftClk, ioaddr + EECtrl);
1071*4882a593Smuzhiyun 		mii_delay(ioaddr);
1072*4882a593Smuzhiyun 	}
1073*4882a593Smuzhiyun 	writel(0, ioaddr + EECtrl);
1074*4882a593Smuzhiyun 	mii_delay(ioaddr);
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun 
miiport_read(struct net_device * dev,int phy_id,int reg)1077*4882a593Smuzhiyun static int miiport_read(struct net_device *dev, int phy_id, int reg)
1078*4882a593Smuzhiyun {
1079*4882a593Smuzhiyun 	u32 cmd;
1080*4882a593Smuzhiyun 	int i;
1081*4882a593Smuzhiyun 	u32 retval = 0;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	/* Ensure sync */
1084*4882a593Smuzhiyun 	mii_send_bits (dev, 0xffffffff, 32);
1085*4882a593Smuzhiyun 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1086*4882a593Smuzhiyun 	/* ST,OP = 0110'b for read operation */
1087*4882a593Smuzhiyun 	cmd = (0x06 << 10) | (phy_id << 5) | reg;
1088*4882a593Smuzhiyun 	mii_send_bits (dev, cmd, 14);
1089*4882a593Smuzhiyun 	/* Turnaround */
1090*4882a593Smuzhiyun 	if (mii_getbit (dev))
1091*4882a593Smuzhiyun 		return 0;
1092*4882a593Smuzhiyun 	/* Read data */
1093*4882a593Smuzhiyun 	for (i = 0; i < 16; i++) {
1094*4882a593Smuzhiyun 		retval <<= 1;
1095*4882a593Smuzhiyun 		retval |= mii_getbit (dev);
1096*4882a593Smuzhiyun 	}
1097*4882a593Smuzhiyun 	/* End cycle */
1098*4882a593Smuzhiyun 	mii_getbit (dev);
1099*4882a593Smuzhiyun 	return retval;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun 
miiport_write(struct net_device * dev,int phy_id,int reg,u16 data)1102*4882a593Smuzhiyun static void miiport_write(struct net_device *dev, int phy_id, int reg, u16 data)
1103*4882a593Smuzhiyun {
1104*4882a593Smuzhiyun 	u32 cmd;
1105*4882a593Smuzhiyun 
1106*4882a593Smuzhiyun 	/* Ensure sync */
1107*4882a593Smuzhiyun 	mii_send_bits (dev, 0xffffffff, 32);
1108*4882a593Smuzhiyun 	/* ST(2), OP(2), ADDR(5), REG#(5), TA(2), Data(16) total 32 bits */
1109*4882a593Smuzhiyun 	/* ST,OP,AAAAA,RRRRR,TA = 0101xxxxxxxxxx10'b = 0x5002 for write */
1110*4882a593Smuzhiyun 	cmd = (0x5002 << 16) | (phy_id << 23) | (reg << 18) | data;
1111*4882a593Smuzhiyun 	mii_send_bits (dev, cmd, 32);
1112*4882a593Smuzhiyun 	/* End cycle */
1113*4882a593Smuzhiyun 	mii_getbit (dev);
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun 
mdio_read(struct net_device * dev,int reg)1116*4882a593Smuzhiyun static int mdio_read(struct net_device *dev, int reg)
1117*4882a593Smuzhiyun {
1118*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1119*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1120*4882a593Smuzhiyun 
1121*4882a593Smuzhiyun 	/* The 83815 series has two ports:
1122*4882a593Smuzhiyun 	 * - an internal transceiver
1123*4882a593Smuzhiyun 	 * - an external mii bus
1124*4882a593Smuzhiyun 	 */
1125*4882a593Smuzhiyun 	if (dev->if_port == PORT_TP)
1126*4882a593Smuzhiyun 		return readw(ioaddr+BasicControl+(reg<<2));
1127*4882a593Smuzhiyun 	else
1128*4882a593Smuzhiyun 		return miiport_read(dev, np->phy_addr_external, reg);
1129*4882a593Smuzhiyun }
1130*4882a593Smuzhiyun 
mdio_write(struct net_device * dev,int reg,u16 data)1131*4882a593Smuzhiyun static void mdio_write(struct net_device *dev, int reg, u16 data)
1132*4882a593Smuzhiyun {
1133*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1134*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1135*4882a593Smuzhiyun 
1136*4882a593Smuzhiyun 	/* The 83815 series has an internal transceiver; handle separately */
1137*4882a593Smuzhiyun 	if (dev->if_port == PORT_TP)
1138*4882a593Smuzhiyun 		writew(data, ioaddr+BasicControl+(reg<<2));
1139*4882a593Smuzhiyun 	else
1140*4882a593Smuzhiyun 		miiport_write(dev, np->phy_addr_external, reg, data);
1141*4882a593Smuzhiyun }
1142*4882a593Smuzhiyun 
init_phy_fixup(struct net_device * dev)1143*4882a593Smuzhiyun static void init_phy_fixup(struct net_device *dev)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1146*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1147*4882a593Smuzhiyun 	int i;
1148*4882a593Smuzhiyun 	u32 cfg;
1149*4882a593Smuzhiyun 	u16 tmp;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	/* restore stuff lost when power was out */
1152*4882a593Smuzhiyun 	tmp = mdio_read(dev, MII_BMCR);
1153*4882a593Smuzhiyun 	if (np->autoneg == AUTONEG_ENABLE) {
1154*4882a593Smuzhiyun 		/* renegotiate if something changed */
1155*4882a593Smuzhiyun 		if ((tmp & BMCR_ANENABLE) == 0 ||
1156*4882a593Smuzhiyun 		    np->advertising != mdio_read(dev, MII_ADVERTISE))
1157*4882a593Smuzhiyun 		{
1158*4882a593Smuzhiyun 			/* turn on autonegotiation and force negotiation */
1159*4882a593Smuzhiyun 			tmp |= (BMCR_ANENABLE | BMCR_ANRESTART);
1160*4882a593Smuzhiyun 			mdio_write(dev, MII_ADVERTISE, np->advertising);
1161*4882a593Smuzhiyun 		}
1162*4882a593Smuzhiyun 	} else {
1163*4882a593Smuzhiyun 		/* turn off auto negotiation, set speed and duplexity */
1164*4882a593Smuzhiyun 		tmp &= ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX);
1165*4882a593Smuzhiyun 		if (np->speed == SPEED_100)
1166*4882a593Smuzhiyun 			tmp |= BMCR_SPEED100;
1167*4882a593Smuzhiyun 		if (np->duplex == DUPLEX_FULL)
1168*4882a593Smuzhiyun 			tmp |= BMCR_FULLDPLX;
1169*4882a593Smuzhiyun 		/*
1170*4882a593Smuzhiyun 		 * Note: there is no good way to inform the link partner
1171*4882a593Smuzhiyun 		 * that our capabilities changed. The user has to unplug
1172*4882a593Smuzhiyun 		 * and replug the network cable after some changes, e.g.
1173*4882a593Smuzhiyun 		 * after switching from 10HD, autoneg off to 100 HD,
1174*4882a593Smuzhiyun 		 * autoneg off.
1175*4882a593Smuzhiyun 		 */
1176*4882a593Smuzhiyun 	}
1177*4882a593Smuzhiyun 	mdio_write(dev, MII_BMCR, tmp);
1178*4882a593Smuzhiyun 	readl(ioaddr + ChipConfig);
1179*4882a593Smuzhiyun 	udelay(1);
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun 	/* find out what phy this is */
1182*4882a593Smuzhiyun 	np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1183*4882a593Smuzhiyun 				+ mdio_read(dev, MII_PHYSID2);
1184*4882a593Smuzhiyun 
1185*4882a593Smuzhiyun 	/* handle external phys here */
1186*4882a593Smuzhiyun 	switch (np->mii) {
1187*4882a593Smuzhiyun 	case PHYID_AM79C874:
1188*4882a593Smuzhiyun 		/* phy specific configuration for fibre/tp operation */
1189*4882a593Smuzhiyun 		tmp = mdio_read(dev, MII_MCTRL);
1190*4882a593Smuzhiyun 		tmp &= ~(MII_FX_SEL | MII_EN_SCRM);
1191*4882a593Smuzhiyun 		if (dev->if_port == PORT_FIBRE)
1192*4882a593Smuzhiyun 			tmp |= MII_FX_SEL;
1193*4882a593Smuzhiyun 		else
1194*4882a593Smuzhiyun 			tmp |= MII_EN_SCRM;
1195*4882a593Smuzhiyun 		mdio_write(dev, MII_MCTRL, tmp);
1196*4882a593Smuzhiyun 		break;
1197*4882a593Smuzhiyun 	default:
1198*4882a593Smuzhiyun 		break;
1199*4882a593Smuzhiyun 	}
1200*4882a593Smuzhiyun 	cfg = readl(ioaddr + ChipConfig);
1201*4882a593Smuzhiyun 	if (cfg & CfgExtPhy)
1202*4882a593Smuzhiyun 		return;
1203*4882a593Smuzhiyun 
1204*4882a593Smuzhiyun 	/* On page 78 of the spec, they recommend some settings for "optimum
1205*4882a593Smuzhiyun 	   performance" to be done in sequence.  These settings optimize some
1206*4882a593Smuzhiyun 	   of the 100Mbit autodetection circuitry.  They say we only want to
1207*4882a593Smuzhiyun 	   do this for rev C of the chip, but engineers at NSC (Bradley
1208*4882a593Smuzhiyun 	   Kennedy) recommends always setting them.  If you don't, you get
1209*4882a593Smuzhiyun 	   errors on some autonegotiations that make the device unusable.
1210*4882a593Smuzhiyun 
1211*4882a593Smuzhiyun 	   It seems that the DSP needs a few usec to reinitialize after
1212*4882a593Smuzhiyun 	   the start of the phy. Just retry writing these values until they
1213*4882a593Smuzhiyun 	   stick.
1214*4882a593Smuzhiyun 	*/
1215*4882a593Smuzhiyun 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1216*4882a593Smuzhiyun 
1217*4882a593Smuzhiyun 		int dspcfg;
1218*4882a593Smuzhiyun 		writew(1, ioaddr + PGSEL);
1219*4882a593Smuzhiyun 		writew(PMDCSR_VAL, ioaddr + PMDCSR);
1220*4882a593Smuzhiyun 		writew(TSTDAT_VAL, ioaddr + TSTDAT);
1221*4882a593Smuzhiyun 		np->dspcfg = (np->srr <= SRR_DP83815_C)?
1222*4882a593Smuzhiyun 			DSPCFG_VAL : (DSPCFG_COEF | readw(ioaddr + DSPCFG));
1223*4882a593Smuzhiyun 		writew(np->dspcfg, ioaddr + DSPCFG);
1224*4882a593Smuzhiyun 		writew(SDCFG_VAL, ioaddr + SDCFG);
1225*4882a593Smuzhiyun 		writew(0, ioaddr + PGSEL);
1226*4882a593Smuzhiyun 		readl(ioaddr + ChipConfig);
1227*4882a593Smuzhiyun 		udelay(10);
1228*4882a593Smuzhiyun 
1229*4882a593Smuzhiyun 		writew(1, ioaddr + PGSEL);
1230*4882a593Smuzhiyun 		dspcfg = readw(ioaddr + DSPCFG);
1231*4882a593Smuzhiyun 		writew(0, ioaddr + PGSEL);
1232*4882a593Smuzhiyun 		if (np->dspcfg == dspcfg)
1233*4882a593Smuzhiyun 			break;
1234*4882a593Smuzhiyun 	}
1235*4882a593Smuzhiyun 
1236*4882a593Smuzhiyun 	if (netif_msg_link(np)) {
1237*4882a593Smuzhiyun 		if (i==NATSEMI_HW_TIMEOUT) {
1238*4882a593Smuzhiyun 			printk(KERN_INFO
1239*4882a593Smuzhiyun 				"%s: DSPCFG mismatch after retrying for %d usec.\n",
1240*4882a593Smuzhiyun 				dev->name, i*10);
1241*4882a593Smuzhiyun 		} else {
1242*4882a593Smuzhiyun 			printk(KERN_INFO
1243*4882a593Smuzhiyun 				"%s: DSPCFG accepted after %d usec.\n",
1244*4882a593Smuzhiyun 				dev->name, i*10);
1245*4882a593Smuzhiyun 		}
1246*4882a593Smuzhiyun 	}
1247*4882a593Smuzhiyun 	/*
1248*4882a593Smuzhiyun 	 * Enable PHY Specific event based interrupts.  Link state change
1249*4882a593Smuzhiyun 	 * and Auto-Negotiation Completion are among the affected.
1250*4882a593Smuzhiyun 	 * Read the intr status to clear it (needed for wake events).
1251*4882a593Smuzhiyun 	 */
1252*4882a593Smuzhiyun 	readw(ioaddr + MIntrStatus);
1253*4882a593Smuzhiyun 	writew(MICRIntEn, ioaddr + MIntrCtrl);
1254*4882a593Smuzhiyun }
1255*4882a593Smuzhiyun 
switch_port_external(struct net_device * dev)1256*4882a593Smuzhiyun static int switch_port_external(struct net_device *dev)
1257*4882a593Smuzhiyun {
1258*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1259*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1260*4882a593Smuzhiyun 	u32 cfg;
1261*4882a593Smuzhiyun 
1262*4882a593Smuzhiyun 	cfg = readl(ioaddr + ChipConfig);
1263*4882a593Smuzhiyun 	if (cfg & CfgExtPhy)
1264*4882a593Smuzhiyun 		return 0;
1265*4882a593Smuzhiyun 
1266*4882a593Smuzhiyun 	if (netif_msg_link(np)) {
1267*4882a593Smuzhiyun 		printk(KERN_INFO "%s: switching to external transceiver.\n",
1268*4882a593Smuzhiyun 				dev->name);
1269*4882a593Smuzhiyun 	}
1270*4882a593Smuzhiyun 
1271*4882a593Smuzhiyun 	/* 1) switch back to external phy */
1272*4882a593Smuzhiyun 	writel(cfg | (CfgExtPhy | CfgPhyDis), ioaddr + ChipConfig);
1273*4882a593Smuzhiyun 	readl(ioaddr + ChipConfig);
1274*4882a593Smuzhiyun 	udelay(1);
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	/* 2) reset the external phy: */
1277*4882a593Smuzhiyun 	/* resetting the external PHY has been known to cause a hub supplying
1278*4882a593Smuzhiyun 	 * power over Ethernet to kill the power.  We don't want to kill
1279*4882a593Smuzhiyun 	 * power to this computer, so we avoid resetting the phy.
1280*4882a593Smuzhiyun 	 */
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	/* 3) reinit the phy fixup, it got lost during power down. */
1283*4882a593Smuzhiyun 	move_int_phy(dev, np->phy_addr_external);
1284*4882a593Smuzhiyun 	init_phy_fixup(dev);
1285*4882a593Smuzhiyun 
1286*4882a593Smuzhiyun 	return 1;
1287*4882a593Smuzhiyun }
1288*4882a593Smuzhiyun 
switch_port_internal(struct net_device * dev)1289*4882a593Smuzhiyun static int switch_port_internal(struct net_device *dev)
1290*4882a593Smuzhiyun {
1291*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1292*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1293*4882a593Smuzhiyun 	int i;
1294*4882a593Smuzhiyun 	u32 cfg;
1295*4882a593Smuzhiyun 	u16 bmcr;
1296*4882a593Smuzhiyun 
1297*4882a593Smuzhiyun 	cfg = readl(ioaddr + ChipConfig);
1298*4882a593Smuzhiyun 	if (!(cfg &CfgExtPhy))
1299*4882a593Smuzhiyun 		return 0;
1300*4882a593Smuzhiyun 
1301*4882a593Smuzhiyun 	if (netif_msg_link(np)) {
1302*4882a593Smuzhiyun 		printk(KERN_INFO "%s: switching to internal transceiver.\n",
1303*4882a593Smuzhiyun 				dev->name);
1304*4882a593Smuzhiyun 	}
1305*4882a593Smuzhiyun 	/* 1) switch back to internal phy: */
1306*4882a593Smuzhiyun 	cfg = cfg & ~(CfgExtPhy | CfgPhyDis);
1307*4882a593Smuzhiyun 	writel(cfg, ioaddr + ChipConfig);
1308*4882a593Smuzhiyun 	readl(ioaddr + ChipConfig);
1309*4882a593Smuzhiyun 	udelay(1);
1310*4882a593Smuzhiyun 
1311*4882a593Smuzhiyun 	/* 2) reset the internal phy: */
1312*4882a593Smuzhiyun 	bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1313*4882a593Smuzhiyun 	writel(bmcr | BMCR_RESET, ioaddr+BasicControl+(MII_BMCR<<2));
1314*4882a593Smuzhiyun 	readl(ioaddr + ChipConfig);
1315*4882a593Smuzhiyun 	udelay(10);
1316*4882a593Smuzhiyun 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1317*4882a593Smuzhiyun 		bmcr = readw(ioaddr+BasicControl+(MII_BMCR<<2));
1318*4882a593Smuzhiyun 		if (!(bmcr & BMCR_RESET))
1319*4882a593Smuzhiyun 			break;
1320*4882a593Smuzhiyun 		udelay(10);
1321*4882a593Smuzhiyun 	}
1322*4882a593Smuzhiyun 	if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
1323*4882a593Smuzhiyun 		printk(KERN_INFO
1324*4882a593Smuzhiyun 			"%s: phy reset did not complete in %d usec.\n",
1325*4882a593Smuzhiyun 			dev->name, i*10);
1326*4882a593Smuzhiyun 	}
1327*4882a593Smuzhiyun 	/* 3) reinit the phy fixup, it got lost during power down. */
1328*4882a593Smuzhiyun 	init_phy_fixup(dev);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	return 1;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
1333*4882a593Smuzhiyun /* Scan for a PHY on the external mii bus.
1334*4882a593Smuzhiyun  * There are two tricky points:
1335*4882a593Smuzhiyun  * - Do not scan while the internal phy is enabled. The internal phy will
1336*4882a593Smuzhiyun  *   crash: e.g. reads from the DSPCFG register will return odd values and
1337*4882a593Smuzhiyun  *   the nasty random phy reset code will reset the nic every few seconds.
1338*4882a593Smuzhiyun  * - The internal phy must be moved around, an external phy could
1339*4882a593Smuzhiyun  *   have the same address as the internal phy.
1340*4882a593Smuzhiyun  */
find_mii(struct net_device * dev)1341*4882a593Smuzhiyun static int find_mii(struct net_device *dev)
1342*4882a593Smuzhiyun {
1343*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1344*4882a593Smuzhiyun 	int tmp;
1345*4882a593Smuzhiyun 	int i;
1346*4882a593Smuzhiyun 	int did_switch;
1347*4882a593Smuzhiyun 
1348*4882a593Smuzhiyun 	/* Switch to external phy */
1349*4882a593Smuzhiyun 	did_switch = switch_port_external(dev);
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	/* Scan the possible phy addresses:
1352*4882a593Smuzhiyun 	 *
1353*4882a593Smuzhiyun 	 * PHY address 0 means that the phy is in isolate mode. Not yet
1354*4882a593Smuzhiyun 	 * supported due to lack of test hardware. User space should
1355*4882a593Smuzhiyun 	 * handle it through ethtool.
1356*4882a593Smuzhiyun 	 */
1357*4882a593Smuzhiyun 	for (i = 1; i <= 31; i++) {
1358*4882a593Smuzhiyun 		move_int_phy(dev, i);
1359*4882a593Smuzhiyun 		tmp = miiport_read(dev, i, MII_BMSR);
1360*4882a593Smuzhiyun 		if (tmp != 0xffff && tmp != 0x0000) {
1361*4882a593Smuzhiyun 			/* found something! */
1362*4882a593Smuzhiyun 			np->mii = (mdio_read(dev, MII_PHYSID1) << 16)
1363*4882a593Smuzhiyun 					+ mdio_read(dev, MII_PHYSID2);
1364*4882a593Smuzhiyun 	 		if (netif_msg_probe(np)) {
1365*4882a593Smuzhiyun 				printk(KERN_INFO "natsemi %s: found external phy %08x at address %d.\n",
1366*4882a593Smuzhiyun 						pci_name(np->pci_dev), np->mii, i);
1367*4882a593Smuzhiyun 			}
1368*4882a593Smuzhiyun 			break;
1369*4882a593Smuzhiyun 		}
1370*4882a593Smuzhiyun 	}
1371*4882a593Smuzhiyun 	/* And switch back to internal phy: */
1372*4882a593Smuzhiyun 	if (did_switch)
1373*4882a593Smuzhiyun 		switch_port_internal(dev);
1374*4882a593Smuzhiyun 	return i;
1375*4882a593Smuzhiyun }
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun /* CFG bits [13:16] [18:23] */
1378*4882a593Smuzhiyun #define CFG_RESET_SAVE 0xfde000
1379*4882a593Smuzhiyun /* WCSR bits [0:4] [9:10] */
1380*4882a593Smuzhiyun #define WCSR_RESET_SAVE 0x61f
1381*4882a593Smuzhiyun /* RFCR bits [20] [22] [27:31] */
1382*4882a593Smuzhiyun #define RFCR_RESET_SAVE 0xf8500000
1383*4882a593Smuzhiyun 
natsemi_reset(struct net_device * dev)1384*4882a593Smuzhiyun static void natsemi_reset(struct net_device *dev)
1385*4882a593Smuzhiyun {
1386*4882a593Smuzhiyun 	int i;
1387*4882a593Smuzhiyun 	u32 cfg;
1388*4882a593Smuzhiyun 	u32 wcsr;
1389*4882a593Smuzhiyun 	u32 rfcr;
1390*4882a593Smuzhiyun 	u16 pmatch[3];
1391*4882a593Smuzhiyun 	u16 sopass[3];
1392*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1393*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1394*4882a593Smuzhiyun 
1395*4882a593Smuzhiyun 	/*
1396*4882a593Smuzhiyun 	 * Resetting the chip causes some registers to be lost.
1397*4882a593Smuzhiyun 	 * Natsemi suggests NOT reloading the EEPROM while live, so instead
1398*4882a593Smuzhiyun 	 * we save the state that would have been loaded from EEPROM
1399*4882a593Smuzhiyun 	 * on a normal power-up (see the spec EEPROM map).  This assumes
1400*4882a593Smuzhiyun 	 * whoever calls this will follow up with init_registers() eventually.
1401*4882a593Smuzhiyun 	 */
1402*4882a593Smuzhiyun 
1403*4882a593Smuzhiyun 	/* CFG */
1404*4882a593Smuzhiyun 	cfg = readl(ioaddr + ChipConfig) & CFG_RESET_SAVE;
1405*4882a593Smuzhiyun 	/* WCSR */
1406*4882a593Smuzhiyun 	wcsr = readl(ioaddr + WOLCmd) & WCSR_RESET_SAVE;
1407*4882a593Smuzhiyun 	/* RFCR */
1408*4882a593Smuzhiyun 	rfcr = readl(ioaddr + RxFilterAddr) & RFCR_RESET_SAVE;
1409*4882a593Smuzhiyun 	/* PMATCH */
1410*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
1411*4882a593Smuzhiyun 		writel(i*2, ioaddr + RxFilterAddr);
1412*4882a593Smuzhiyun 		pmatch[i] = readw(ioaddr + RxFilterData);
1413*4882a593Smuzhiyun 	}
1414*4882a593Smuzhiyun 	/* SOPAS */
1415*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
1416*4882a593Smuzhiyun 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1417*4882a593Smuzhiyun 		sopass[i] = readw(ioaddr + RxFilterData);
1418*4882a593Smuzhiyun 	}
1419*4882a593Smuzhiyun 
1420*4882a593Smuzhiyun 	/* now whack the chip */
1421*4882a593Smuzhiyun 	writel(ChipReset, ioaddr + ChipCmd);
1422*4882a593Smuzhiyun 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1423*4882a593Smuzhiyun 		if (!(readl(ioaddr + ChipCmd) & ChipReset))
1424*4882a593Smuzhiyun 			break;
1425*4882a593Smuzhiyun 		udelay(5);
1426*4882a593Smuzhiyun 	}
1427*4882a593Smuzhiyun 	if (i==NATSEMI_HW_TIMEOUT) {
1428*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
1429*4882a593Smuzhiyun 			dev->name, i*5);
1430*4882a593Smuzhiyun 	} else if (netif_msg_hw(np)) {
1431*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
1432*4882a593Smuzhiyun 			dev->name, i*5);
1433*4882a593Smuzhiyun 	}
1434*4882a593Smuzhiyun 
1435*4882a593Smuzhiyun 	/* restore CFG */
1436*4882a593Smuzhiyun 	cfg |= readl(ioaddr + ChipConfig) & ~CFG_RESET_SAVE;
1437*4882a593Smuzhiyun 	/* turn on external phy if it was selected */
1438*4882a593Smuzhiyun 	if (dev->if_port == PORT_TP)
1439*4882a593Smuzhiyun 		cfg &= ~(CfgExtPhy | CfgPhyDis);
1440*4882a593Smuzhiyun 	else
1441*4882a593Smuzhiyun 		cfg |= (CfgExtPhy | CfgPhyDis);
1442*4882a593Smuzhiyun 	writel(cfg, ioaddr + ChipConfig);
1443*4882a593Smuzhiyun 	/* restore WCSR */
1444*4882a593Smuzhiyun 	wcsr |= readl(ioaddr + WOLCmd) & ~WCSR_RESET_SAVE;
1445*4882a593Smuzhiyun 	writel(wcsr, ioaddr + WOLCmd);
1446*4882a593Smuzhiyun 	/* read RFCR */
1447*4882a593Smuzhiyun 	rfcr |= readl(ioaddr + RxFilterAddr) & ~RFCR_RESET_SAVE;
1448*4882a593Smuzhiyun 	/* restore PMATCH */
1449*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
1450*4882a593Smuzhiyun 		writel(i*2, ioaddr + RxFilterAddr);
1451*4882a593Smuzhiyun 		writew(pmatch[i], ioaddr + RxFilterData);
1452*4882a593Smuzhiyun 	}
1453*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
1454*4882a593Smuzhiyun 		writel(0xa+(i*2), ioaddr + RxFilterAddr);
1455*4882a593Smuzhiyun 		writew(sopass[i], ioaddr + RxFilterData);
1456*4882a593Smuzhiyun 	}
1457*4882a593Smuzhiyun 	/* restore RFCR */
1458*4882a593Smuzhiyun 	writel(rfcr, ioaddr + RxFilterAddr);
1459*4882a593Smuzhiyun }
1460*4882a593Smuzhiyun 
reset_rx(struct net_device * dev)1461*4882a593Smuzhiyun static void reset_rx(struct net_device *dev)
1462*4882a593Smuzhiyun {
1463*4882a593Smuzhiyun 	int i;
1464*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1465*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1466*4882a593Smuzhiyun 
1467*4882a593Smuzhiyun 	np->intr_status &= ~RxResetDone;
1468*4882a593Smuzhiyun 
1469*4882a593Smuzhiyun 	writel(RxReset, ioaddr + ChipCmd);
1470*4882a593Smuzhiyun 
1471*4882a593Smuzhiyun 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1472*4882a593Smuzhiyun 		np->intr_status |= readl(ioaddr + IntrStatus);
1473*4882a593Smuzhiyun 		if (np->intr_status & RxResetDone)
1474*4882a593Smuzhiyun 			break;
1475*4882a593Smuzhiyun 		udelay(15);
1476*4882a593Smuzhiyun 	}
1477*4882a593Smuzhiyun 	if (i==NATSEMI_HW_TIMEOUT) {
1478*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: RX reset did not complete in %d usec.\n",
1479*4882a593Smuzhiyun 		       dev->name, i*15);
1480*4882a593Smuzhiyun 	} else if (netif_msg_hw(np)) {
1481*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: RX reset took %d usec.\n",
1482*4882a593Smuzhiyun 		       dev->name, i*15);
1483*4882a593Smuzhiyun 	}
1484*4882a593Smuzhiyun }
1485*4882a593Smuzhiyun 
natsemi_reload_eeprom(struct net_device * dev)1486*4882a593Smuzhiyun static void natsemi_reload_eeprom(struct net_device *dev)
1487*4882a593Smuzhiyun {
1488*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1489*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1490*4882a593Smuzhiyun 	int i;
1491*4882a593Smuzhiyun 
1492*4882a593Smuzhiyun 	writel(EepromReload, ioaddr + PCIBusCfg);
1493*4882a593Smuzhiyun 	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
1494*4882a593Smuzhiyun 		udelay(50);
1495*4882a593Smuzhiyun 		if (!(readl(ioaddr + PCIBusCfg) & EepromReload))
1496*4882a593Smuzhiyun 			break;
1497*4882a593Smuzhiyun 	}
1498*4882a593Smuzhiyun 	if (i==NATSEMI_HW_TIMEOUT) {
1499*4882a593Smuzhiyun 		printk(KERN_WARNING "natsemi %s: EEPROM did not reload in %d usec.\n",
1500*4882a593Smuzhiyun 			pci_name(np->pci_dev), i*50);
1501*4882a593Smuzhiyun 	} else if (netif_msg_hw(np)) {
1502*4882a593Smuzhiyun 		printk(KERN_DEBUG "natsemi %s: EEPROM reloaded in %d usec.\n",
1503*4882a593Smuzhiyun 			pci_name(np->pci_dev), i*50);
1504*4882a593Smuzhiyun 	}
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun 
natsemi_stop_rxtx(struct net_device * dev)1507*4882a593Smuzhiyun static void natsemi_stop_rxtx(struct net_device *dev)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1510*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1511*4882a593Smuzhiyun 	int i;
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun 	writel(RxOff | TxOff, ioaddr + ChipCmd);
1514*4882a593Smuzhiyun 	for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
1515*4882a593Smuzhiyun 		if ((readl(ioaddr + ChipCmd) & (TxOn|RxOn)) == 0)
1516*4882a593Smuzhiyun 			break;
1517*4882a593Smuzhiyun 		udelay(5);
1518*4882a593Smuzhiyun 	}
1519*4882a593Smuzhiyun 	if (i==NATSEMI_HW_TIMEOUT) {
1520*4882a593Smuzhiyun 		printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
1521*4882a593Smuzhiyun 			dev->name, i*5);
1522*4882a593Smuzhiyun 	} else if (netif_msg_hw(np)) {
1523*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
1524*4882a593Smuzhiyun 			dev->name, i*5);
1525*4882a593Smuzhiyun 	}
1526*4882a593Smuzhiyun }
1527*4882a593Smuzhiyun 
netdev_open(struct net_device * dev)1528*4882a593Smuzhiyun static int netdev_open(struct net_device *dev)
1529*4882a593Smuzhiyun {
1530*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1531*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1532*4882a593Smuzhiyun 	const int irq = np->pci_dev->irq;
1533*4882a593Smuzhiyun 	int i;
1534*4882a593Smuzhiyun 
1535*4882a593Smuzhiyun 	/* Reset the chip, just in case. */
1536*4882a593Smuzhiyun 	natsemi_reset(dev);
1537*4882a593Smuzhiyun 
1538*4882a593Smuzhiyun 	i = request_irq(irq, intr_handler, IRQF_SHARED, dev->name, dev);
1539*4882a593Smuzhiyun 	if (i) return i;
1540*4882a593Smuzhiyun 
1541*4882a593Smuzhiyun 	if (netif_msg_ifup(np))
1542*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
1543*4882a593Smuzhiyun 			dev->name, irq);
1544*4882a593Smuzhiyun 	i = alloc_ring(dev);
1545*4882a593Smuzhiyun 	if (i < 0) {
1546*4882a593Smuzhiyun 		free_irq(irq, dev);
1547*4882a593Smuzhiyun 		return i;
1548*4882a593Smuzhiyun 	}
1549*4882a593Smuzhiyun 	napi_enable(&np->napi);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	init_ring(dev);
1552*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
1553*4882a593Smuzhiyun 	init_registers(dev);
1554*4882a593Smuzhiyun 	/* now set the MAC address according to dev->dev_addr */
1555*4882a593Smuzhiyun 	for (i = 0; i < 3; i++) {
1556*4882a593Smuzhiyun 		u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
1557*4882a593Smuzhiyun 
1558*4882a593Smuzhiyun 		writel(i*2, ioaddr + RxFilterAddr);
1559*4882a593Smuzhiyun 		writew(mac, ioaddr + RxFilterData);
1560*4882a593Smuzhiyun 	}
1561*4882a593Smuzhiyun 	writel(np->cur_rx_mode, ioaddr + RxFilterAddr);
1562*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
1563*4882a593Smuzhiyun 
1564*4882a593Smuzhiyun 	netif_start_queue(dev);
1565*4882a593Smuzhiyun 
1566*4882a593Smuzhiyun 	if (netif_msg_ifup(np))
1567*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
1568*4882a593Smuzhiyun 			dev->name, (int)readl(ioaddr + ChipCmd));
1569*4882a593Smuzhiyun 
1570*4882a593Smuzhiyun 	/* Set the timer to check for link beat. */
1571*4882a593Smuzhiyun 	timer_setup(&np->timer, netdev_timer, 0);
1572*4882a593Smuzhiyun 	np->timer.expires = round_jiffies(jiffies + NATSEMI_TIMER_FREQ);
1573*4882a593Smuzhiyun 	add_timer(&np->timer);
1574*4882a593Smuzhiyun 
1575*4882a593Smuzhiyun 	return 0;
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun 
do_cable_magic(struct net_device * dev)1578*4882a593Smuzhiyun static void do_cable_magic(struct net_device *dev)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1581*4882a593Smuzhiyun 	void __iomem *ioaddr = ns_ioaddr(dev);
1582*4882a593Smuzhiyun 
1583*4882a593Smuzhiyun 	if (dev->if_port != PORT_TP)
1584*4882a593Smuzhiyun 		return;
1585*4882a593Smuzhiyun 
1586*4882a593Smuzhiyun 	if (np->srr >= SRR_DP83816_A5)
1587*4882a593Smuzhiyun 		return;
1588*4882a593Smuzhiyun 
1589*4882a593Smuzhiyun 	/*
1590*4882a593Smuzhiyun 	 * 100 MBit links with short cables can trip an issue with the chip.
1591*4882a593Smuzhiyun 	 * The problem manifests as lots of CRC errors and/or flickering
1592*4882a593Smuzhiyun 	 * activity LED while idle.  This process is based on instructions
1593*4882a593Smuzhiyun 	 * from engineers at National.
1594*4882a593Smuzhiyun 	 */
1595*4882a593Smuzhiyun 	if (readl(ioaddr + ChipConfig) & CfgSpeed100) {
1596*4882a593Smuzhiyun 		u16 data;
1597*4882a593Smuzhiyun 
1598*4882a593Smuzhiyun 		writew(1, ioaddr + PGSEL);
1599*4882a593Smuzhiyun 		/*
1600*4882a593Smuzhiyun 		 * coefficient visibility should already be enabled via
1601*4882a593Smuzhiyun 		 * DSPCFG | 0x1000
1602*4882a593Smuzhiyun 		 */
1603*4882a593Smuzhiyun 		data = readw(ioaddr + TSTDAT) & 0xff;
1604*4882a593Smuzhiyun 		/*
1605*4882a593Smuzhiyun 		 * the value must be negative, and within certain values
1606*4882a593Smuzhiyun 		 * (these values all come from National)
1607*4882a593Smuzhiyun 		 */
1608*4882a593Smuzhiyun 		if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
1609*4882a593Smuzhiyun 			np = netdev_priv(dev);
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 			/* the bug has been triggered - fix the coefficient */
1612*4882a593Smuzhiyun 			writew(TSTDAT_FIXED, ioaddr + TSTDAT);
1613*4882a593Smuzhiyun 			/* lock the value */
1614*4882a593Smuzhiyun 			data = readw(ioaddr + DSPCFG);
1615*4882a593Smuzhiyun 			np->dspcfg = data | DSPCFG_LOCK;
1616*4882a593Smuzhiyun 			writew(np->dspcfg, ioaddr + DSPCFG);
1617*4882a593Smuzhiyun 		}
1618*4882a593Smuzhiyun 		writew(0, ioaddr + PGSEL);
1619*4882a593Smuzhiyun 	}
1620*4882a593Smuzhiyun }
1621*4882a593Smuzhiyun 
undo_cable_magic(struct net_device * dev)1622*4882a593Smuzhiyun static void undo_cable_magic(struct net_device *dev)
1623*4882a593Smuzhiyun {
1624*4882a593Smuzhiyun 	u16 data;
1625*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1626*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun 	if (dev->if_port != PORT_TP)
1629*4882a593Smuzhiyun 		return;
1630*4882a593Smuzhiyun 
1631*4882a593Smuzhiyun 	if (np->srr >= SRR_DP83816_A5)
1632*4882a593Smuzhiyun 		return;
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun 	writew(1, ioaddr + PGSEL);
1635*4882a593Smuzhiyun 	/* make sure the lock bit is clear */
1636*4882a593Smuzhiyun 	data = readw(ioaddr + DSPCFG);
1637*4882a593Smuzhiyun 	np->dspcfg = data & ~DSPCFG_LOCK;
1638*4882a593Smuzhiyun 	writew(np->dspcfg, ioaddr + DSPCFG);
1639*4882a593Smuzhiyun 	writew(0, ioaddr + PGSEL);
1640*4882a593Smuzhiyun }
1641*4882a593Smuzhiyun 
check_link(struct net_device * dev)1642*4882a593Smuzhiyun static void check_link(struct net_device *dev)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1645*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1646*4882a593Smuzhiyun 	int duplex = np->duplex;
1647*4882a593Smuzhiyun 	u16 bmsr;
1648*4882a593Smuzhiyun 
1649*4882a593Smuzhiyun 	/* If we are ignoring the PHY then don't try reading it. */
1650*4882a593Smuzhiyun 	if (np->ignore_phy)
1651*4882a593Smuzhiyun 		goto propagate_state;
1652*4882a593Smuzhiyun 
1653*4882a593Smuzhiyun 	/* The link status field is latched: it remains low after a temporary
1654*4882a593Smuzhiyun 	 * link failure until it's read. We need the current link status,
1655*4882a593Smuzhiyun 	 * thus read twice.
1656*4882a593Smuzhiyun 	 */
1657*4882a593Smuzhiyun 	mdio_read(dev, MII_BMSR);
1658*4882a593Smuzhiyun 	bmsr = mdio_read(dev, MII_BMSR);
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	if (!(bmsr & BMSR_LSTATUS)) {
1661*4882a593Smuzhiyun 		if (netif_carrier_ok(dev)) {
1662*4882a593Smuzhiyun 			if (netif_msg_link(np))
1663*4882a593Smuzhiyun 				printk(KERN_NOTICE "%s: link down.\n",
1664*4882a593Smuzhiyun 				       dev->name);
1665*4882a593Smuzhiyun 			netif_carrier_off(dev);
1666*4882a593Smuzhiyun 			undo_cable_magic(dev);
1667*4882a593Smuzhiyun 		}
1668*4882a593Smuzhiyun 		return;
1669*4882a593Smuzhiyun 	}
1670*4882a593Smuzhiyun 	if (!netif_carrier_ok(dev)) {
1671*4882a593Smuzhiyun 		if (netif_msg_link(np))
1672*4882a593Smuzhiyun 			printk(KERN_NOTICE "%s: link up.\n", dev->name);
1673*4882a593Smuzhiyun 		netif_carrier_on(dev);
1674*4882a593Smuzhiyun 		do_cable_magic(dev);
1675*4882a593Smuzhiyun 	}
1676*4882a593Smuzhiyun 
1677*4882a593Smuzhiyun 	duplex = np->full_duplex;
1678*4882a593Smuzhiyun 	if (!duplex) {
1679*4882a593Smuzhiyun 		if (bmsr & BMSR_ANEGCOMPLETE) {
1680*4882a593Smuzhiyun 			int tmp = mii_nway_result(
1681*4882a593Smuzhiyun 				np->advertising & mdio_read(dev, MII_LPA));
1682*4882a593Smuzhiyun 			if (tmp == LPA_100FULL || tmp == LPA_10FULL)
1683*4882a593Smuzhiyun 				duplex = 1;
1684*4882a593Smuzhiyun 		} else if (mdio_read(dev, MII_BMCR) & BMCR_FULLDPLX)
1685*4882a593Smuzhiyun 			duplex = 1;
1686*4882a593Smuzhiyun 	}
1687*4882a593Smuzhiyun 
1688*4882a593Smuzhiyun propagate_state:
1689*4882a593Smuzhiyun 	/* if duplex is set then bit 28 must be set, too */
1690*4882a593Smuzhiyun 	if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
1691*4882a593Smuzhiyun 		if (netif_msg_link(np))
1692*4882a593Smuzhiyun 			printk(KERN_INFO
1693*4882a593Smuzhiyun 				"%s: Setting %s-duplex based on negotiated "
1694*4882a593Smuzhiyun 				"link capability.\n", dev->name,
1695*4882a593Smuzhiyun 				duplex ? "full" : "half");
1696*4882a593Smuzhiyun 		if (duplex) {
1697*4882a593Smuzhiyun 			np->rx_config |= RxAcceptTx;
1698*4882a593Smuzhiyun 			np->tx_config |= TxCarrierIgn | TxHeartIgn;
1699*4882a593Smuzhiyun 		} else {
1700*4882a593Smuzhiyun 			np->rx_config &= ~RxAcceptTx;
1701*4882a593Smuzhiyun 			np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
1702*4882a593Smuzhiyun 		}
1703*4882a593Smuzhiyun 		writel(np->tx_config, ioaddr + TxConfig);
1704*4882a593Smuzhiyun 		writel(np->rx_config, ioaddr + RxConfig);
1705*4882a593Smuzhiyun 	}
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun 
init_registers(struct net_device * dev)1708*4882a593Smuzhiyun static void init_registers(struct net_device *dev)
1709*4882a593Smuzhiyun {
1710*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1711*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1712*4882a593Smuzhiyun 
1713*4882a593Smuzhiyun 	init_phy_fixup(dev);
1714*4882a593Smuzhiyun 
1715*4882a593Smuzhiyun 	/* clear any interrupts that are pending, such as wake events */
1716*4882a593Smuzhiyun 	readl(ioaddr + IntrStatus);
1717*4882a593Smuzhiyun 
1718*4882a593Smuzhiyun 	writel(np->ring_dma, ioaddr + RxRingPtr);
1719*4882a593Smuzhiyun 	writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
1720*4882a593Smuzhiyun 		ioaddr + TxRingPtr);
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	/* Initialize other registers.
1723*4882a593Smuzhiyun 	 * Configure the PCI bus bursts and FIFO thresholds.
1724*4882a593Smuzhiyun 	 * Configure for standard, in-spec Ethernet.
1725*4882a593Smuzhiyun 	 * Start with half-duplex. check_link will update
1726*4882a593Smuzhiyun 	 * to the correct settings.
1727*4882a593Smuzhiyun 	 */
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	/* DRTH: 2: start tx if 64 bytes are in the fifo
1730*4882a593Smuzhiyun 	 * FLTH: 0x10: refill with next packet if 512 bytes are free
1731*4882a593Smuzhiyun 	 * MXDMA: 0: up to 256 byte bursts.
1732*4882a593Smuzhiyun 	 * 	MXDMA must be <= FLTH
1733*4882a593Smuzhiyun 	 * ECRETRY=1
1734*4882a593Smuzhiyun 	 * ATP=1
1735*4882a593Smuzhiyun 	 */
1736*4882a593Smuzhiyun 	np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 |
1737*4882a593Smuzhiyun 				TX_FLTH_VAL | TX_DRTH_VAL_START;
1738*4882a593Smuzhiyun 	writel(np->tx_config, ioaddr + TxConfig);
1739*4882a593Smuzhiyun 
1740*4882a593Smuzhiyun 	/* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
1741*4882a593Smuzhiyun 	 * MXDMA 0: up to 256 byte bursts
1742*4882a593Smuzhiyun 	 */
1743*4882a593Smuzhiyun 	np->rx_config = RxMxdma_256 | RX_DRTH_VAL;
1744*4882a593Smuzhiyun 	/* if receive ring now has bigger buffers than normal, enable jumbo */
1745*4882a593Smuzhiyun 	if (np->rx_buf_sz > NATSEMI_LONGPKT)
1746*4882a593Smuzhiyun 		np->rx_config |= RxAcceptLong;
1747*4882a593Smuzhiyun 
1748*4882a593Smuzhiyun 	writel(np->rx_config, ioaddr + RxConfig);
1749*4882a593Smuzhiyun 
1750*4882a593Smuzhiyun 	/* Disable PME:
1751*4882a593Smuzhiyun 	 * The PME bit is initialized from the EEPROM contents.
1752*4882a593Smuzhiyun 	 * PCI cards probably have PME disabled, but motherboard
1753*4882a593Smuzhiyun 	 * implementations may have PME set to enable WakeOnLan.
1754*4882a593Smuzhiyun 	 * With PME set the chip will scan incoming packets but
1755*4882a593Smuzhiyun 	 * nothing will be written to memory. */
1756*4882a593Smuzhiyun 	np->SavedClkRun = readl(ioaddr + ClkRun);
1757*4882a593Smuzhiyun 	writel(np->SavedClkRun & ~PMEEnable, ioaddr + ClkRun);
1758*4882a593Smuzhiyun 	if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
1759*4882a593Smuzhiyun 		printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
1760*4882a593Smuzhiyun 			dev->name, readl(ioaddr + WOLCmd));
1761*4882a593Smuzhiyun 	}
1762*4882a593Smuzhiyun 
1763*4882a593Smuzhiyun 	check_link(dev);
1764*4882a593Smuzhiyun 	__set_rx_mode(dev);
1765*4882a593Smuzhiyun 
1766*4882a593Smuzhiyun 	/* Enable interrupts by setting the interrupt mask. */
1767*4882a593Smuzhiyun 	writel(DEFAULT_INTR, ioaddr + IntrMask);
1768*4882a593Smuzhiyun 	natsemi_irq_enable(dev);
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	writel(RxOn | TxOn, ioaddr + ChipCmd);
1771*4882a593Smuzhiyun 	writel(StatsClear, ioaddr + StatsCtrl); /* Clear Stats */
1772*4882a593Smuzhiyun }
1773*4882a593Smuzhiyun 
1774*4882a593Smuzhiyun /*
1775*4882a593Smuzhiyun  * netdev_timer:
1776*4882a593Smuzhiyun  * Purpose:
1777*4882a593Smuzhiyun  * 1) check for link changes. Usually they are handled by the MII interrupt
1778*4882a593Smuzhiyun  *    but it doesn't hurt to check twice.
1779*4882a593Smuzhiyun  * 2) check for sudden death of the NIC:
1780*4882a593Smuzhiyun  *    It seems that a reference set for this chip went out with incorrect info,
1781*4882a593Smuzhiyun  *    and there exist boards that aren't quite right.  An unexpected voltage
1782*4882a593Smuzhiyun  *    drop can cause the PHY to get itself in a weird state (basically reset).
1783*4882a593Smuzhiyun  *    NOTE: this only seems to affect revC chips.  The user can disable
1784*4882a593Smuzhiyun  *    this check via dspcfg_workaround sysfs option.
1785*4882a593Smuzhiyun  * 3) check of death of the RX path due to OOM
1786*4882a593Smuzhiyun  */
netdev_timer(struct timer_list * t)1787*4882a593Smuzhiyun static void netdev_timer(struct timer_list *t)
1788*4882a593Smuzhiyun {
1789*4882a593Smuzhiyun 	struct netdev_private *np = from_timer(np, t, timer);
1790*4882a593Smuzhiyun 	struct net_device *dev = np->dev;
1791*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1792*4882a593Smuzhiyun 	int next_tick = NATSEMI_TIMER_FREQ;
1793*4882a593Smuzhiyun 	const int irq = np->pci_dev->irq;
1794*4882a593Smuzhiyun 
1795*4882a593Smuzhiyun 	if (netif_msg_timer(np)) {
1796*4882a593Smuzhiyun 		/* DO NOT read the IntrStatus register,
1797*4882a593Smuzhiyun 		 * a read clears any pending interrupts.
1798*4882a593Smuzhiyun 		 */
1799*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: Media selection timer tick.\n",
1800*4882a593Smuzhiyun 			dev->name);
1801*4882a593Smuzhiyun 	}
1802*4882a593Smuzhiyun 
1803*4882a593Smuzhiyun 	if (dev->if_port == PORT_TP) {
1804*4882a593Smuzhiyun 		u16 dspcfg;
1805*4882a593Smuzhiyun 
1806*4882a593Smuzhiyun 		spin_lock_irq(&np->lock);
1807*4882a593Smuzhiyun 		/* check for a nasty random phy-reset - use dspcfg as a flag */
1808*4882a593Smuzhiyun 		writew(1, ioaddr+PGSEL);
1809*4882a593Smuzhiyun 		dspcfg = readw(ioaddr+DSPCFG);
1810*4882a593Smuzhiyun 		writew(0, ioaddr+PGSEL);
1811*4882a593Smuzhiyun 		if (np->dspcfg_workaround && dspcfg != np->dspcfg) {
1812*4882a593Smuzhiyun 			if (!netif_queue_stopped(dev)) {
1813*4882a593Smuzhiyun 				spin_unlock_irq(&np->lock);
1814*4882a593Smuzhiyun 				if (netif_msg_drv(np))
1815*4882a593Smuzhiyun 					printk(KERN_NOTICE "%s: possible phy reset: "
1816*4882a593Smuzhiyun 						"re-initializing\n", dev->name);
1817*4882a593Smuzhiyun 				disable_irq(irq);
1818*4882a593Smuzhiyun 				spin_lock_irq(&np->lock);
1819*4882a593Smuzhiyun 				natsemi_stop_rxtx(dev);
1820*4882a593Smuzhiyun 				dump_ring(dev);
1821*4882a593Smuzhiyun 				reinit_ring(dev);
1822*4882a593Smuzhiyun 				init_registers(dev);
1823*4882a593Smuzhiyun 				spin_unlock_irq(&np->lock);
1824*4882a593Smuzhiyun 				enable_irq(irq);
1825*4882a593Smuzhiyun 			} else {
1826*4882a593Smuzhiyun 				/* hurry back */
1827*4882a593Smuzhiyun 				next_tick = HZ;
1828*4882a593Smuzhiyun 				spin_unlock_irq(&np->lock);
1829*4882a593Smuzhiyun 			}
1830*4882a593Smuzhiyun 		} else {
1831*4882a593Smuzhiyun 			/* init_registers() calls check_link() for the above case */
1832*4882a593Smuzhiyun 			check_link(dev);
1833*4882a593Smuzhiyun 			spin_unlock_irq(&np->lock);
1834*4882a593Smuzhiyun 		}
1835*4882a593Smuzhiyun 	} else {
1836*4882a593Smuzhiyun 		spin_lock_irq(&np->lock);
1837*4882a593Smuzhiyun 		check_link(dev);
1838*4882a593Smuzhiyun 		spin_unlock_irq(&np->lock);
1839*4882a593Smuzhiyun 	}
1840*4882a593Smuzhiyun 	if (np->oom) {
1841*4882a593Smuzhiyun 		disable_irq(irq);
1842*4882a593Smuzhiyun 		np->oom = 0;
1843*4882a593Smuzhiyun 		refill_rx(dev);
1844*4882a593Smuzhiyun 		enable_irq(irq);
1845*4882a593Smuzhiyun 		if (!np->oom) {
1846*4882a593Smuzhiyun 			writel(RxOn, ioaddr + ChipCmd);
1847*4882a593Smuzhiyun 		} else {
1848*4882a593Smuzhiyun 			next_tick = 1;
1849*4882a593Smuzhiyun 		}
1850*4882a593Smuzhiyun 	}
1851*4882a593Smuzhiyun 
1852*4882a593Smuzhiyun 	if (next_tick > 1)
1853*4882a593Smuzhiyun 		mod_timer(&np->timer, round_jiffies(jiffies + next_tick));
1854*4882a593Smuzhiyun 	else
1855*4882a593Smuzhiyun 		mod_timer(&np->timer, jiffies + next_tick);
1856*4882a593Smuzhiyun }
1857*4882a593Smuzhiyun 
dump_ring(struct net_device * dev)1858*4882a593Smuzhiyun static void dump_ring(struct net_device *dev)
1859*4882a593Smuzhiyun {
1860*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1861*4882a593Smuzhiyun 
1862*4882a593Smuzhiyun 	if (netif_msg_pktdata(np)) {
1863*4882a593Smuzhiyun 		int i;
1864*4882a593Smuzhiyun 		printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
1865*4882a593Smuzhiyun 		for (i = 0; i < TX_RING_SIZE; i++) {
1866*4882a593Smuzhiyun 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1867*4882a593Smuzhiyun 				i, np->tx_ring[i].next_desc,
1868*4882a593Smuzhiyun 				np->tx_ring[i].cmd_status,
1869*4882a593Smuzhiyun 				np->tx_ring[i].addr);
1870*4882a593Smuzhiyun 		}
1871*4882a593Smuzhiyun 		printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
1872*4882a593Smuzhiyun 		for (i = 0; i < RX_RING_SIZE; i++) {
1873*4882a593Smuzhiyun 			printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
1874*4882a593Smuzhiyun 				i, np->rx_ring[i].next_desc,
1875*4882a593Smuzhiyun 				np->rx_ring[i].cmd_status,
1876*4882a593Smuzhiyun 				np->rx_ring[i].addr);
1877*4882a593Smuzhiyun 		}
1878*4882a593Smuzhiyun 	}
1879*4882a593Smuzhiyun }
1880*4882a593Smuzhiyun 
ns_tx_timeout(struct net_device * dev,unsigned int txqueue)1881*4882a593Smuzhiyun static void ns_tx_timeout(struct net_device *dev, unsigned int txqueue)
1882*4882a593Smuzhiyun {
1883*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1884*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
1885*4882a593Smuzhiyun 	const int irq = np->pci_dev->irq;
1886*4882a593Smuzhiyun 
1887*4882a593Smuzhiyun 	disable_irq(irq);
1888*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
1889*4882a593Smuzhiyun 	if (!np->hands_off) {
1890*4882a593Smuzhiyun 		if (netif_msg_tx_err(np))
1891*4882a593Smuzhiyun 			printk(KERN_WARNING
1892*4882a593Smuzhiyun 				"%s: Transmit timed out, status %#08x,"
1893*4882a593Smuzhiyun 				" resetting...\n",
1894*4882a593Smuzhiyun 				dev->name, readl(ioaddr + IntrStatus));
1895*4882a593Smuzhiyun 		dump_ring(dev);
1896*4882a593Smuzhiyun 
1897*4882a593Smuzhiyun 		natsemi_reset(dev);
1898*4882a593Smuzhiyun 		reinit_ring(dev);
1899*4882a593Smuzhiyun 		init_registers(dev);
1900*4882a593Smuzhiyun 	} else {
1901*4882a593Smuzhiyun 		printk(KERN_WARNING
1902*4882a593Smuzhiyun 			"%s: tx_timeout while in hands_off state?\n",
1903*4882a593Smuzhiyun 			dev->name);
1904*4882a593Smuzhiyun 	}
1905*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
1906*4882a593Smuzhiyun 	enable_irq(irq);
1907*4882a593Smuzhiyun 
1908*4882a593Smuzhiyun 	netif_trans_update(dev); /* prevent tx timeout */
1909*4882a593Smuzhiyun 	dev->stats.tx_errors++;
1910*4882a593Smuzhiyun 	netif_wake_queue(dev);
1911*4882a593Smuzhiyun }
1912*4882a593Smuzhiyun 
alloc_ring(struct net_device * dev)1913*4882a593Smuzhiyun static int alloc_ring(struct net_device *dev)
1914*4882a593Smuzhiyun {
1915*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1916*4882a593Smuzhiyun 	np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
1917*4882a593Smuzhiyun 					 sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
1918*4882a593Smuzhiyun 					 &np->ring_dma, GFP_KERNEL);
1919*4882a593Smuzhiyun 	if (!np->rx_ring)
1920*4882a593Smuzhiyun 		return -ENOMEM;
1921*4882a593Smuzhiyun 	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
1922*4882a593Smuzhiyun 	return 0;
1923*4882a593Smuzhiyun }
1924*4882a593Smuzhiyun 
refill_rx(struct net_device * dev)1925*4882a593Smuzhiyun static void refill_rx(struct net_device *dev)
1926*4882a593Smuzhiyun {
1927*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1928*4882a593Smuzhiyun 
1929*4882a593Smuzhiyun 	/* Refill the Rx ring buffers. */
1930*4882a593Smuzhiyun 	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1931*4882a593Smuzhiyun 		struct sk_buff *skb;
1932*4882a593Smuzhiyun 		int entry = np->dirty_rx % RX_RING_SIZE;
1933*4882a593Smuzhiyun 		if (np->rx_skbuff[entry] == NULL) {
1934*4882a593Smuzhiyun 			unsigned int buflen = np->rx_buf_sz+NATSEMI_PADDING;
1935*4882a593Smuzhiyun 			skb = netdev_alloc_skb(dev, buflen);
1936*4882a593Smuzhiyun 			np->rx_skbuff[entry] = skb;
1937*4882a593Smuzhiyun 			if (skb == NULL)
1938*4882a593Smuzhiyun 				break; /* Better luck next round. */
1939*4882a593Smuzhiyun 			np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
1940*4882a593Smuzhiyun 							   skb->data, buflen,
1941*4882a593Smuzhiyun 							   DMA_FROM_DEVICE);
1942*4882a593Smuzhiyun 			if (dma_mapping_error(&np->pci_dev->dev, np->rx_dma[entry])) {
1943*4882a593Smuzhiyun 				dev_kfree_skb_any(skb);
1944*4882a593Smuzhiyun 				np->rx_skbuff[entry] = NULL;
1945*4882a593Smuzhiyun 				break; /* Better luck next round. */
1946*4882a593Smuzhiyun 			}
1947*4882a593Smuzhiyun 			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1948*4882a593Smuzhiyun 		}
1949*4882a593Smuzhiyun 		np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
1950*4882a593Smuzhiyun 	}
1951*4882a593Smuzhiyun 	if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
1952*4882a593Smuzhiyun 		if (netif_msg_rx_err(np))
1953*4882a593Smuzhiyun 			printk(KERN_WARNING "%s: going OOM.\n", dev->name);
1954*4882a593Smuzhiyun 		np->oom = 1;
1955*4882a593Smuzhiyun 	}
1956*4882a593Smuzhiyun }
1957*4882a593Smuzhiyun 
set_bufsize(struct net_device * dev)1958*4882a593Smuzhiyun static void set_bufsize(struct net_device *dev)
1959*4882a593Smuzhiyun {
1960*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1961*4882a593Smuzhiyun 	if (dev->mtu <= ETH_DATA_LEN)
1962*4882a593Smuzhiyun 		np->rx_buf_sz = ETH_DATA_LEN + NATSEMI_HEADERS;
1963*4882a593Smuzhiyun 	else
1964*4882a593Smuzhiyun 		np->rx_buf_sz = dev->mtu + NATSEMI_HEADERS;
1965*4882a593Smuzhiyun }
1966*4882a593Smuzhiyun 
1967*4882a593Smuzhiyun /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
init_ring(struct net_device * dev)1968*4882a593Smuzhiyun static void init_ring(struct net_device *dev)
1969*4882a593Smuzhiyun {
1970*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
1971*4882a593Smuzhiyun 	int i;
1972*4882a593Smuzhiyun 
1973*4882a593Smuzhiyun 	/* 1) TX ring */
1974*4882a593Smuzhiyun 	np->dirty_tx = np->cur_tx = 0;
1975*4882a593Smuzhiyun 	for (i = 0; i < TX_RING_SIZE; i++) {
1976*4882a593Smuzhiyun 		np->tx_skbuff[i] = NULL;
1977*4882a593Smuzhiyun 		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1978*4882a593Smuzhiyun 			+sizeof(struct netdev_desc)
1979*4882a593Smuzhiyun 			*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
1980*4882a593Smuzhiyun 		np->tx_ring[i].cmd_status = 0;
1981*4882a593Smuzhiyun 	}
1982*4882a593Smuzhiyun 
1983*4882a593Smuzhiyun 	/* 2) RX ring */
1984*4882a593Smuzhiyun 	np->dirty_rx = 0;
1985*4882a593Smuzhiyun 	np->cur_rx = RX_RING_SIZE;
1986*4882a593Smuzhiyun 	np->oom = 0;
1987*4882a593Smuzhiyun 	set_bufsize(dev);
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 	np->rx_head_desc = &np->rx_ring[0];
1990*4882a593Smuzhiyun 
1991*4882a593Smuzhiyun 	/* Please be careful before changing this loop - at least gcc-2.95.1
1992*4882a593Smuzhiyun 	 * miscompiles it otherwise.
1993*4882a593Smuzhiyun 	 */
1994*4882a593Smuzhiyun 	/* Initialize all Rx descriptors. */
1995*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
1996*4882a593Smuzhiyun 		np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
1997*4882a593Smuzhiyun 				+sizeof(struct netdev_desc)
1998*4882a593Smuzhiyun 				*((i+1)%RX_RING_SIZE));
1999*4882a593Smuzhiyun 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2000*4882a593Smuzhiyun 		np->rx_skbuff[i] = NULL;
2001*4882a593Smuzhiyun 	}
2002*4882a593Smuzhiyun 	refill_rx(dev);
2003*4882a593Smuzhiyun 	dump_ring(dev);
2004*4882a593Smuzhiyun }
2005*4882a593Smuzhiyun 
drain_tx(struct net_device * dev)2006*4882a593Smuzhiyun static void drain_tx(struct net_device *dev)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2009*4882a593Smuzhiyun 	int i;
2010*4882a593Smuzhiyun 
2011*4882a593Smuzhiyun 	for (i = 0; i < TX_RING_SIZE; i++) {
2012*4882a593Smuzhiyun 		if (np->tx_skbuff[i]) {
2013*4882a593Smuzhiyun 			dma_unmap_single(&np->pci_dev->dev, np->tx_dma[i],
2014*4882a593Smuzhiyun 					 np->tx_skbuff[i]->len, DMA_TO_DEVICE);
2015*4882a593Smuzhiyun 			dev_kfree_skb(np->tx_skbuff[i]);
2016*4882a593Smuzhiyun 			dev->stats.tx_dropped++;
2017*4882a593Smuzhiyun 		}
2018*4882a593Smuzhiyun 		np->tx_skbuff[i] = NULL;
2019*4882a593Smuzhiyun 	}
2020*4882a593Smuzhiyun }
2021*4882a593Smuzhiyun 
drain_rx(struct net_device * dev)2022*4882a593Smuzhiyun static void drain_rx(struct net_device *dev)
2023*4882a593Smuzhiyun {
2024*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2025*4882a593Smuzhiyun 	unsigned int buflen = np->rx_buf_sz;
2026*4882a593Smuzhiyun 	int i;
2027*4882a593Smuzhiyun 
2028*4882a593Smuzhiyun 	/* Free all the skbuffs in the Rx queue. */
2029*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++) {
2030*4882a593Smuzhiyun 		np->rx_ring[i].cmd_status = 0;
2031*4882a593Smuzhiyun 		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
2032*4882a593Smuzhiyun 		if (np->rx_skbuff[i]) {
2033*4882a593Smuzhiyun 			dma_unmap_single(&np->pci_dev->dev, np->rx_dma[i],
2034*4882a593Smuzhiyun 					 buflen + NATSEMI_PADDING,
2035*4882a593Smuzhiyun 					 DMA_FROM_DEVICE);
2036*4882a593Smuzhiyun 			dev_kfree_skb(np->rx_skbuff[i]);
2037*4882a593Smuzhiyun 		}
2038*4882a593Smuzhiyun 		np->rx_skbuff[i] = NULL;
2039*4882a593Smuzhiyun 	}
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun 
drain_ring(struct net_device * dev)2042*4882a593Smuzhiyun static void drain_ring(struct net_device *dev)
2043*4882a593Smuzhiyun {
2044*4882a593Smuzhiyun 	drain_rx(dev);
2045*4882a593Smuzhiyun 	drain_tx(dev);
2046*4882a593Smuzhiyun }
2047*4882a593Smuzhiyun 
free_ring(struct net_device * dev)2048*4882a593Smuzhiyun static void free_ring(struct net_device *dev)
2049*4882a593Smuzhiyun {
2050*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2051*4882a593Smuzhiyun 	dma_free_coherent(&np->pci_dev->dev,
2052*4882a593Smuzhiyun 			  sizeof(struct netdev_desc) * (RX_RING_SIZE + TX_RING_SIZE),
2053*4882a593Smuzhiyun 			  np->rx_ring, np->ring_dma);
2054*4882a593Smuzhiyun }
2055*4882a593Smuzhiyun 
reinit_rx(struct net_device * dev)2056*4882a593Smuzhiyun static void reinit_rx(struct net_device *dev)
2057*4882a593Smuzhiyun {
2058*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2059*4882a593Smuzhiyun 	int i;
2060*4882a593Smuzhiyun 
2061*4882a593Smuzhiyun 	/* RX Ring */
2062*4882a593Smuzhiyun 	np->dirty_rx = 0;
2063*4882a593Smuzhiyun 	np->cur_rx = RX_RING_SIZE;
2064*4882a593Smuzhiyun 	np->rx_head_desc = &np->rx_ring[0];
2065*4882a593Smuzhiyun 	/* Initialize all Rx descriptors. */
2066*4882a593Smuzhiyun 	for (i = 0; i < RX_RING_SIZE; i++)
2067*4882a593Smuzhiyun 		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
2068*4882a593Smuzhiyun 
2069*4882a593Smuzhiyun 	refill_rx(dev);
2070*4882a593Smuzhiyun }
2071*4882a593Smuzhiyun 
reinit_ring(struct net_device * dev)2072*4882a593Smuzhiyun static void reinit_ring(struct net_device *dev)
2073*4882a593Smuzhiyun {
2074*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2075*4882a593Smuzhiyun 	int i;
2076*4882a593Smuzhiyun 
2077*4882a593Smuzhiyun 	/* drain TX ring */
2078*4882a593Smuzhiyun 	drain_tx(dev);
2079*4882a593Smuzhiyun 	np->dirty_tx = np->cur_tx = 0;
2080*4882a593Smuzhiyun 	for (i=0;i<TX_RING_SIZE;i++)
2081*4882a593Smuzhiyun 		np->tx_ring[i].cmd_status = 0;
2082*4882a593Smuzhiyun 
2083*4882a593Smuzhiyun 	reinit_rx(dev);
2084*4882a593Smuzhiyun }
2085*4882a593Smuzhiyun 
start_tx(struct sk_buff * skb,struct net_device * dev)2086*4882a593Smuzhiyun static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
2087*4882a593Smuzhiyun {
2088*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2089*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2090*4882a593Smuzhiyun 	unsigned entry;
2091*4882a593Smuzhiyun 	unsigned long flags;
2092*4882a593Smuzhiyun 
2093*4882a593Smuzhiyun 	/* Note: Ordering is important here, set the field with the
2094*4882a593Smuzhiyun 	   "ownership" bit last, and only then increment cur_tx. */
2095*4882a593Smuzhiyun 
2096*4882a593Smuzhiyun 	/* Calculate the next Tx descriptor entry. */
2097*4882a593Smuzhiyun 	entry = np->cur_tx % TX_RING_SIZE;
2098*4882a593Smuzhiyun 
2099*4882a593Smuzhiyun 	np->tx_skbuff[entry] = skb;
2100*4882a593Smuzhiyun 	np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev, skb->data,
2101*4882a593Smuzhiyun 					   skb->len, DMA_TO_DEVICE);
2102*4882a593Smuzhiyun 	if (dma_mapping_error(&np->pci_dev->dev, np->tx_dma[entry])) {
2103*4882a593Smuzhiyun 		np->tx_skbuff[entry] = NULL;
2104*4882a593Smuzhiyun 		dev_kfree_skb_irq(skb);
2105*4882a593Smuzhiyun 		dev->stats.tx_dropped++;
2106*4882a593Smuzhiyun 		return NETDEV_TX_OK;
2107*4882a593Smuzhiyun 	}
2108*4882a593Smuzhiyun 
2109*4882a593Smuzhiyun 	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
2110*4882a593Smuzhiyun 
2111*4882a593Smuzhiyun 	spin_lock_irqsave(&np->lock, flags);
2112*4882a593Smuzhiyun 
2113*4882a593Smuzhiyun 	if (!np->hands_off) {
2114*4882a593Smuzhiyun 		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
2115*4882a593Smuzhiyun 		/* StrongARM: Explicitly cache flush np->tx_ring and
2116*4882a593Smuzhiyun 		 * skb->data,skb->len. */
2117*4882a593Smuzhiyun 		wmb();
2118*4882a593Smuzhiyun 		np->cur_tx++;
2119*4882a593Smuzhiyun 		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
2120*4882a593Smuzhiyun 			netdev_tx_done(dev);
2121*4882a593Smuzhiyun 			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
2122*4882a593Smuzhiyun 				netif_stop_queue(dev);
2123*4882a593Smuzhiyun 		}
2124*4882a593Smuzhiyun 		/* Wake the potentially-idle transmit channel. */
2125*4882a593Smuzhiyun 		writel(TxOn, ioaddr + ChipCmd);
2126*4882a593Smuzhiyun 	} else {
2127*4882a593Smuzhiyun 		dev_kfree_skb_irq(skb);
2128*4882a593Smuzhiyun 		dev->stats.tx_dropped++;
2129*4882a593Smuzhiyun 	}
2130*4882a593Smuzhiyun 	spin_unlock_irqrestore(&np->lock, flags);
2131*4882a593Smuzhiyun 
2132*4882a593Smuzhiyun 	if (netif_msg_tx_queued(np)) {
2133*4882a593Smuzhiyun 		printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
2134*4882a593Smuzhiyun 			dev->name, np->cur_tx, entry);
2135*4882a593Smuzhiyun 	}
2136*4882a593Smuzhiyun 	return NETDEV_TX_OK;
2137*4882a593Smuzhiyun }
2138*4882a593Smuzhiyun 
netdev_tx_done(struct net_device * dev)2139*4882a593Smuzhiyun static void netdev_tx_done(struct net_device *dev)
2140*4882a593Smuzhiyun {
2141*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2142*4882a593Smuzhiyun 
2143*4882a593Smuzhiyun 	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
2144*4882a593Smuzhiyun 		int entry = np->dirty_tx % TX_RING_SIZE;
2145*4882a593Smuzhiyun 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
2146*4882a593Smuzhiyun 			break;
2147*4882a593Smuzhiyun 		if (netif_msg_tx_done(np))
2148*4882a593Smuzhiyun 			printk(KERN_DEBUG
2149*4882a593Smuzhiyun 				"%s: tx frame #%d finished, status %#08x.\n",
2150*4882a593Smuzhiyun 					dev->name, np->dirty_tx,
2151*4882a593Smuzhiyun 					le32_to_cpu(np->tx_ring[entry].cmd_status));
2152*4882a593Smuzhiyun 		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
2153*4882a593Smuzhiyun 			dev->stats.tx_packets++;
2154*4882a593Smuzhiyun 			dev->stats.tx_bytes += np->tx_skbuff[entry]->len;
2155*4882a593Smuzhiyun 		} else { /* Various Tx errors */
2156*4882a593Smuzhiyun 			int tx_status =
2157*4882a593Smuzhiyun 				le32_to_cpu(np->tx_ring[entry].cmd_status);
2158*4882a593Smuzhiyun 			if (tx_status & (DescTxAbort|DescTxExcColl))
2159*4882a593Smuzhiyun 				dev->stats.tx_aborted_errors++;
2160*4882a593Smuzhiyun 			if (tx_status & DescTxFIFO)
2161*4882a593Smuzhiyun 				dev->stats.tx_fifo_errors++;
2162*4882a593Smuzhiyun 			if (tx_status & DescTxCarrier)
2163*4882a593Smuzhiyun 				dev->stats.tx_carrier_errors++;
2164*4882a593Smuzhiyun 			if (tx_status & DescTxOOWCol)
2165*4882a593Smuzhiyun 				dev->stats.tx_window_errors++;
2166*4882a593Smuzhiyun 			dev->stats.tx_errors++;
2167*4882a593Smuzhiyun 		}
2168*4882a593Smuzhiyun 		dma_unmap_single(&np->pci_dev->dev, np->tx_dma[entry],
2169*4882a593Smuzhiyun 				 np->tx_skbuff[entry]->len, DMA_TO_DEVICE);
2170*4882a593Smuzhiyun 		/* Free the original skb. */
2171*4882a593Smuzhiyun 		dev_consume_skb_irq(np->tx_skbuff[entry]);
2172*4882a593Smuzhiyun 		np->tx_skbuff[entry] = NULL;
2173*4882a593Smuzhiyun 	}
2174*4882a593Smuzhiyun 	if (netif_queue_stopped(dev) &&
2175*4882a593Smuzhiyun 	    np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
2176*4882a593Smuzhiyun 		/* The ring is no longer full, wake queue. */
2177*4882a593Smuzhiyun 		netif_wake_queue(dev);
2178*4882a593Smuzhiyun 	}
2179*4882a593Smuzhiyun }
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun /* The interrupt handler doesn't actually handle interrupts itself, it
2182*4882a593Smuzhiyun  * schedules a NAPI poll if there is anything to do. */
intr_handler(int irq,void * dev_instance)2183*4882a593Smuzhiyun static irqreturn_t intr_handler(int irq, void *dev_instance)
2184*4882a593Smuzhiyun {
2185*4882a593Smuzhiyun 	struct net_device *dev = dev_instance;
2186*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2187*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2188*4882a593Smuzhiyun 
2189*4882a593Smuzhiyun 	/* Reading IntrStatus automatically acknowledges so don't do
2190*4882a593Smuzhiyun 	 * that while interrupts are disabled, (for example, while a
2191*4882a593Smuzhiyun 	 * poll is scheduled).  */
2192*4882a593Smuzhiyun 	if (np->hands_off || !readl(ioaddr + IntrEnable))
2193*4882a593Smuzhiyun 		return IRQ_NONE;
2194*4882a593Smuzhiyun 
2195*4882a593Smuzhiyun 	np->intr_status = readl(ioaddr + IntrStatus);
2196*4882a593Smuzhiyun 
2197*4882a593Smuzhiyun 	if (!np->intr_status)
2198*4882a593Smuzhiyun 		return IRQ_NONE;
2199*4882a593Smuzhiyun 
2200*4882a593Smuzhiyun 	if (netif_msg_intr(np))
2201*4882a593Smuzhiyun 		printk(KERN_DEBUG
2202*4882a593Smuzhiyun 		       "%s: Interrupt, status %#08x, mask %#08x.\n",
2203*4882a593Smuzhiyun 		       dev->name, np->intr_status,
2204*4882a593Smuzhiyun 		       readl(ioaddr + IntrMask));
2205*4882a593Smuzhiyun 
2206*4882a593Smuzhiyun 	prefetch(&np->rx_skbuff[np->cur_rx % RX_RING_SIZE]);
2207*4882a593Smuzhiyun 
2208*4882a593Smuzhiyun 	if (napi_schedule_prep(&np->napi)) {
2209*4882a593Smuzhiyun 		/* Disable interrupts and register for poll */
2210*4882a593Smuzhiyun 		natsemi_irq_disable(dev);
2211*4882a593Smuzhiyun 		__napi_schedule(&np->napi);
2212*4882a593Smuzhiyun 	} else
2213*4882a593Smuzhiyun 		printk(KERN_WARNING
2214*4882a593Smuzhiyun 	       	       "%s: Ignoring interrupt, status %#08x, mask %#08x.\n",
2215*4882a593Smuzhiyun 		       dev->name, np->intr_status,
2216*4882a593Smuzhiyun 		       readl(ioaddr + IntrMask));
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun 	return IRQ_HANDLED;
2219*4882a593Smuzhiyun }
2220*4882a593Smuzhiyun 
2221*4882a593Smuzhiyun /* This is the NAPI poll routine.  As well as the standard RX handling
2222*4882a593Smuzhiyun  * it also handles all other interrupts that the chip might raise.
2223*4882a593Smuzhiyun  */
natsemi_poll(struct napi_struct * napi,int budget)2224*4882a593Smuzhiyun static int natsemi_poll(struct napi_struct *napi, int budget)
2225*4882a593Smuzhiyun {
2226*4882a593Smuzhiyun 	struct netdev_private *np = container_of(napi, struct netdev_private, napi);
2227*4882a593Smuzhiyun 	struct net_device *dev = np->dev;
2228*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2229*4882a593Smuzhiyun 	int work_done = 0;
2230*4882a593Smuzhiyun 
2231*4882a593Smuzhiyun 	do {
2232*4882a593Smuzhiyun 		if (netif_msg_intr(np))
2233*4882a593Smuzhiyun 			printk(KERN_DEBUG
2234*4882a593Smuzhiyun 			       "%s: Poll, status %#08x, mask %#08x.\n",
2235*4882a593Smuzhiyun 			       dev->name, np->intr_status,
2236*4882a593Smuzhiyun 			       readl(ioaddr + IntrMask));
2237*4882a593Smuzhiyun 
2238*4882a593Smuzhiyun 		/* netdev_rx() may read IntrStatus again if the RX state
2239*4882a593Smuzhiyun 		 * machine falls over so do it first. */
2240*4882a593Smuzhiyun 		if (np->intr_status &
2241*4882a593Smuzhiyun 		    (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
2242*4882a593Smuzhiyun 		     IntrRxErr | IntrRxOverrun)) {
2243*4882a593Smuzhiyun 			netdev_rx(dev, &work_done, budget);
2244*4882a593Smuzhiyun 		}
2245*4882a593Smuzhiyun 
2246*4882a593Smuzhiyun 		if (np->intr_status &
2247*4882a593Smuzhiyun 		    (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
2248*4882a593Smuzhiyun 			spin_lock(&np->lock);
2249*4882a593Smuzhiyun 			netdev_tx_done(dev);
2250*4882a593Smuzhiyun 			spin_unlock(&np->lock);
2251*4882a593Smuzhiyun 		}
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 		/* Abnormal error summary/uncommon events handlers. */
2254*4882a593Smuzhiyun 		if (np->intr_status & IntrAbnormalSummary)
2255*4882a593Smuzhiyun 			netdev_error(dev, np->intr_status);
2256*4882a593Smuzhiyun 
2257*4882a593Smuzhiyun 		if (work_done >= budget)
2258*4882a593Smuzhiyun 			return work_done;
2259*4882a593Smuzhiyun 
2260*4882a593Smuzhiyun 		np->intr_status = readl(ioaddr + IntrStatus);
2261*4882a593Smuzhiyun 	} while (np->intr_status);
2262*4882a593Smuzhiyun 
2263*4882a593Smuzhiyun 	napi_complete_done(napi, work_done);
2264*4882a593Smuzhiyun 
2265*4882a593Smuzhiyun 	/* Reenable interrupts providing nothing is trying to shut
2266*4882a593Smuzhiyun 	 * the chip down. */
2267*4882a593Smuzhiyun 	spin_lock(&np->lock);
2268*4882a593Smuzhiyun 	if (!np->hands_off)
2269*4882a593Smuzhiyun 		natsemi_irq_enable(dev);
2270*4882a593Smuzhiyun 	spin_unlock(&np->lock);
2271*4882a593Smuzhiyun 
2272*4882a593Smuzhiyun 	return work_done;
2273*4882a593Smuzhiyun }
2274*4882a593Smuzhiyun 
2275*4882a593Smuzhiyun /* This routine is logically part of the interrupt handler, but separated
2276*4882a593Smuzhiyun    for clarity and better register allocation. */
netdev_rx(struct net_device * dev,int * work_done,int work_to_do)2277*4882a593Smuzhiyun static void netdev_rx(struct net_device *dev, int *work_done, int work_to_do)
2278*4882a593Smuzhiyun {
2279*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2280*4882a593Smuzhiyun 	int entry = np->cur_rx % RX_RING_SIZE;
2281*4882a593Smuzhiyun 	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
2282*4882a593Smuzhiyun 	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2283*4882a593Smuzhiyun 	unsigned int buflen = np->rx_buf_sz;
2284*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2285*4882a593Smuzhiyun 
2286*4882a593Smuzhiyun 	/* If the driver owns the next entry it's a new packet. Send it up. */
2287*4882a593Smuzhiyun 	while (desc_status < 0) { /* e.g. & DescOwn */
2288*4882a593Smuzhiyun 		int pkt_len;
2289*4882a593Smuzhiyun 		if (netif_msg_rx_status(np))
2290*4882a593Smuzhiyun 			printk(KERN_DEBUG
2291*4882a593Smuzhiyun 				"  netdev_rx() entry %d status was %#08x.\n",
2292*4882a593Smuzhiyun 				entry, desc_status);
2293*4882a593Smuzhiyun 		if (--boguscnt < 0)
2294*4882a593Smuzhiyun 			break;
2295*4882a593Smuzhiyun 
2296*4882a593Smuzhiyun 		if (*work_done >= work_to_do)
2297*4882a593Smuzhiyun 			break;
2298*4882a593Smuzhiyun 
2299*4882a593Smuzhiyun 		(*work_done)++;
2300*4882a593Smuzhiyun 
2301*4882a593Smuzhiyun 		pkt_len = (desc_status & DescSizeMask) - 4;
2302*4882a593Smuzhiyun 		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
2303*4882a593Smuzhiyun 			if (desc_status & DescMore) {
2304*4882a593Smuzhiyun 				unsigned long flags;
2305*4882a593Smuzhiyun 
2306*4882a593Smuzhiyun 				if (netif_msg_rx_err(np))
2307*4882a593Smuzhiyun 					printk(KERN_WARNING
2308*4882a593Smuzhiyun 						"%s: Oversized(?) Ethernet "
2309*4882a593Smuzhiyun 						"frame spanned multiple "
2310*4882a593Smuzhiyun 						"buffers, entry %#08x "
2311*4882a593Smuzhiyun 						"status %#08x.\n", dev->name,
2312*4882a593Smuzhiyun 						np->cur_rx, desc_status);
2313*4882a593Smuzhiyun 				dev->stats.rx_length_errors++;
2314*4882a593Smuzhiyun 
2315*4882a593Smuzhiyun 				/* The RX state machine has probably
2316*4882a593Smuzhiyun 				 * locked up beneath us.  Follow the
2317*4882a593Smuzhiyun 				 * reset procedure documented in
2318*4882a593Smuzhiyun 				 * AN-1287. */
2319*4882a593Smuzhiyun 
2320*4882a593Smuzhiyun 				spin_lock_irqsave(&np->lock, flags);
2321*4882a593Smuzhiyun 				reset_rx(dev);
2322*4882a593Smuzhiyun 				reinit_rx(dev);
2323*4882a593Smuzhiyun 				writel(np->ring_dma, ioaddr + RxRingPtr);
2324*4882a593Smuzhiyun 				check_link(dev);
2325*4882a593Smuzhiyun 				spin_unlock_irqrestore(&np->lock, flags);
2326*4882a593Smuzhiyun 
2327*4882a593Smuzhiyun 				/* We'll enable RX on exit from this
2328*4882a593Smuzhiyun 				 * function. */
2329*4882a593Smuzhiyun 				break;
2330*4882a593Smuzhiyun 
2331*4882a593Smuzhiyun 			} else {
2332*4882a593Smuzhiyun 				/* There was an error. */
2333*4882a593Smuzhiyun 				dev->stats.rx_errors++;
2334*4882a593Smuzhiyun 				if (desc_status & (DescRxAbort|DescRxOver))
2335*4882a593Smuzhiyun 					dev->stats.rx_over_errors++;
2336*4882a593Smuzhiyun 				if (desc_status & (DescRxLong|DescRxRunt))
2337*4882a593Smuzhiyun 					dev->stats.rx_length_errors++;
2338*4882a593Smuzhiyun 				if (desc_status & (DescRxInvalid|DescRxAlign))
2339*4882a593Smuzhiyun 					dev->stats.rx_frame_errors++;
2340*4882a593Smuzhiyun 				if (desc_status & DescRxCRC)
2341*4882a593Smuzhiyun 					dev->stats.rx_crc_errors++;
2342*4882a593Smuzhiyun 			}
2343*4882a593Smuzhiyun 		} else if (pkt_len > np->rx_buf_sz) {
2344*4882a593Smuzhiyun 			/* if this is the tail of a double buffer
2345*4882a593Smuzhiyun 			 * packet, we've already counted the error
2346*4882a593Smuzhiyun 			 * on the first part.  Ignore the second half.
2347*4882a593Smuzhiyun 			 */
2348*4882a593Smuzhiyun 		} else {
2349*4882a593Smuzhiyun 			struct sk_buff *skb;
2350*4882a593Smuzhiyun 			/* Omit CRC size. */
2351*4882a593Smuzhiyun 			/* Check if the packet is long enough to accept
2352*4882a593Smuzhiyun 			 * without copying to a minimally-sized skbuff. */
2353*4882a593Smuzhiyun 			if (pkt_len < rx_copybreak &&
2354*4882a593Smuzhiyun 			    (skb = netdev_alloc_skb(dev, pkt_len + RX_OFFSET)) != NULL) {
2355*4882a593Smuzhiyun 				/* 16 byte align the IP header */
2356*4882a593Smuzhiyun 				skb_reserve(skb, RX_OFFSET);
2357*4882a593Smuzhiyun 				dma_sync_single_for_cpu(&np->pci_dev->dev,
2358*4882a593Smuzhiyun 							np->rx_dma[entry],
2359*4882a593Smuzhiyun 							buflen,
2360*4882a593Smuzhiyun 							DMA_FROM_DEVICE);
2361*4882a593Smuzhiyun 				skb_copy_to_linear_data(skb,
2362*4882a593Smuzhiyun 					np->rx_skbuff[entry]->data, pkt_len);
2363*4882a593Smuzhiyun 				skb_put(skb, pkt_len);
2364*4882a593Smuzhiyun 				dma_sync_single_for_device(&np->pci_dev->dev,
2365*4882a593Smuzhiyun 							   np->rx_dma[entry],
2366*4882a593Smuzhiyun 							   buflen,
2367*4882a593Smuzhiyun 							   DMA_FROM_DEVICE);
2368*4882a593Smuzhiyun 			} else {
2369*4882a593Smuzhiyun 				dma_unmap_single(&np->pci_dev->dev,
2370*4882a593Smuzhiyun 						 np->rx_dma[entry],
2371*4882a593Smuzhiyun 						 buflen + NATSEMI_PADDING,
2372*4882a593Smuzhiyun 						 DMA_FROM_DEVICE);
2373*4882a593Smuzhiyun 				skb_put(skb = np->rx_skbuff[entry], pkt_len);
2374*4882a593Smuzhiyun 				np->rx_skbuff[entry] = NULL;
2375*4882a593Smuzhiyun 			}
2376*4882a593Smuzhiyun 			skb->protocol = eth_type_trans(skb, dev);
2377*4882a593Smuzhiyun 			netif_receive_skb(skb);
2378*4882a593Smuzhiyun 			dev->stats.rx_packets++;
2379*4882a593Smuzhiyun 			dev->stats.rx_bytes += pkt_len;
2380*4882a593Smuzhiyun 		}
2381*4882a593Smuzhiyun 		entry = (++np->cur_rx) % RX_RING_SIZE;
2382*4882a593Smuzhiyun 		np->rx_head_desc = &np->rx_ring[entry];
2383*4882a593Smuzhiyun 		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
2384*4882a593Smuzhiyun 	}
2385*4882a593Smuzhiyun 	refill_rx(dev);
2386*4882a593Smuzhiyun 
2387*4882a593Smuzhiyun 	/* Restart Rx engine if stopped. */
2388*4882a593Smuzhiyun 	if (np->oom)
2389*4882a593Smuzhiyun 		mod_timer(&np->timer, jiffies + 1);
2390*4882a593Smuzhiyun 	else
2391*4882a593Smuzhiyun 		writel(RxOn, ioaddr + ChipCmd);
2392*4882a593Smuzhiyun }
2393*4882a593Smuzhiyun 
netdev_error(struct net_device * dev,int intr_status)2394*4882a593Smuzhiyun static void netdev_error(struct net_device *dev, int intr_status)
2395*4882a593Smuzhiyun {
2396*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2397*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2398*4882a593Smuzhiyun 
2399*4882a593Smuzhiyun 	spin_lock(&np->lock);
2400*4882a593Smuzhiyun 	if (intr_status & LinkChange) {
2401*4882a593Smuzhiyun 		u16 lpa = mdio_read(dev, MII_LPA);
2402*4882a593Smuzhiyun 		if (mdio_read(dev, MII_BMCR) & BMCR_ANENABLE &&
2403*4882a593Smuzhiyun 		    netif_msg_link(np)) {
2404*4882a593Smuzhiyun 			printk(KERN_INFO
2405*4882a593Smuzhiyun 				"%s: Autonegotiation advertising"
2406*4882a593Smuzhiyun 				" %#04x  partner %#04x.\n", dev->name,
2407*4882a593Smuzhiyun 				np->advertising, lpa);
2408*4882a593Smuzhiyun 		}
2409*4882a593Smuzhiyun 
2410*4882a593Smuzhiyun 		/* read MII int status to clear the flag */
2411*4882a593Smuzhiyun 		readw(ioaddr + MIntrStatus);
2412*4882a593Smuzhiyun 		check_link(dev);
2413*4882a593Smuzhiyun 	}
2414*4882a593Smuzhiyun 	if (intr_status & StatsMax) {
2415*4882a593Smuzhiyun 		__get_stats(dev);
2416*4882a593Smuzhiyun 	}
2417*4882a593Smuzhiyun 	if (intr_status & IntrTxUnderrun) {
2418*4882a593Smuzhiyun 		if ((np->tx_config & TxDrthMask) < TX_DRTH_VAL_LIMIT) {
2419*4882a593Smuzhiyun 			np->tx_config += TX_DRTH_VAL_INC;
2420*4882a593Smuzhiyun 			if (netif_msg_tx_err(np))
2421*4882a593Smuzhiyun 				printk(KERN_NOTICE
2422*4882a593Smuzhiyun 					"%s: increased tx threshold, txcfg %#08x.\n",
2423*4882a593Smuzhiyun 					dev->name, np->tx_config);
2424*4882a593Smuzhiyun 		} else {
2425*4882a593Smuzhiyun 			if (netif_msg_tx_err(np))
2426*4882a593Smuzhiyun 				printk(KERN_NOTICE
2427*4882a593Smuzhiyun 					"%s: tx underrun with maximum tx threshold, txcfg %#08x.\n",
2428*4882a593Smuzhiyun 					dev->name, np->tx_config);
2429*4882a593Smuzhiyun 		}
2430*4882a593Smuzhiyun 		writel(np->tx_config, ioaddr + TxConfig);
2431*4882a593Smuzhiyun 	}
2432*4882a593Smuzhiyun 	if (intr_status & WOLPkt && netif_msg_wol(np)) {
2433*4882a593Smuzhiyun 		int wol_status = readl(ioaddr + WOLCmd);
2434*4882a593Smuzhiyun 		printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
2435*4882a593Smuzhiyun 			dev->name, wol_status);
2436*4882a593Smuzhiyun 	}
2437*4882a593Smuzhiyun 	if (intr_status & RxStatusFIFOOver) {
2438*4882a593Smuzhiyun 		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
2439*4882a593Smuzhiyun 			printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
2440*4882a593Smuzhiyun 				dev->name);
2441*4882a593Smuzhiyun 		}
2442*4882a593Smuzhiyun 		dev->stats.rx_fifo_errors++;
2443*4882a593Smuzhiyun 		dev->stats.rx_errors++;
2444*4882a593Smuzhiyun 	}
2445*4882a593Smuzhiyun 	/* Hmmmmm, it's not clear how to recover from PCI faults. */
2446*4882a593Smuzhiyun 	if (intr_status & IntrPCIErr) {
2447*4882a593Smuzhiyun 		printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
2448*4882a593Smuzhiyun 			intr_status & IntrPCIErr);
2449*4882a593Smuzhiyun 		dev->stats.tx_fifo_errors++;
2450*4882a593Smuzhiyun 		dev->stats.tx_errors++;
2451*4882a593Smuzhiyun 		dev->stats.rx_fifo_errors++;
2452*4882a593Smuzhiyun 		dev->stats.rx_errors++;
2453*4882a593Smuzhiyun 	}
2454*4882a593Smuzhiyun 	spin_unlock(&np->lock);
2455*4882a593Smuzhiyun }
2456*4882a593Smuzhiyun 
__get_stats(struct net_device * dev)2457*4882a593Smuzhiyun static void __get_stats(struct net_device *dev)
2458*4882a593Smuzhiyun {
2459*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2460*4882a593Smuzhiyun 
2461*4882a593Smuzhiyun 	/* The chip only need report frame silently dropped. */
2462*4882a593Smuzhiyun 	dev->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
2463*4882a593Smuzhiyun 	dev->stats.rx_missed_errors += readl(ioaddr + RxMissed);
2464*4882a593Smuzhiyun }
2465*4882a593Smuzhiyun 
get_stats(struct net_device * dev)2466*4882a593Smuzhiyun static struct net_device_stats *get_stats(struct net_device *dev)
2467*4882a593Smuzhiyun {
2468*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2469*4882a593Smuzhiyun 
2470*4882a593Smuzhiyun 	/* The chip only need report frame silently dropped. */
2471*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2472*4882a593Smuzhiyun 	if (netif_running(dev) && !np->hands_off)
2473*4882a593Smuzhiyun 		__get_stats(dev);
2474*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2475*4882a593Smuzhiyun 
2476*4882a593Smuzhiyun 	return &dev->stats;
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun #ifdef CONFIG_NET_POLL_CONTROLLER
natsemi_poll_controller(struct net_device * dev)2480*4882a593Smuzhiyun static void natsemi_poll_controller(struct net_device *dev)
2481*4882a593Smuzhiyun {
2482*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2483*4882a593Smuzhiyun 	const int irq = np->pci_dev->irq;
2484*4882a593Smuzhiyun 
2485*4882a593Smuzhiyun 	disable_irq(irq);
2486*4882a593Smuzhiyun 	intr_handler(irq, dev);
2487*4882a593Smuzhiyun 	enable_irq(irq);
2488*4882a593Smuzhiyun }
2489*4882a593Smuzhiyun #endif
2490*4882a593Smuzhiyun 
2491*4882a593Smuzhiyun #define HASH_TABLE	0x200
__set_rx_mode(struct net_device * dev)2492*4882a593Smuzhiyun static void __set_rx_mode(struct net_device *dev)
2493*4882a593Smuzhiyun {
2494*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2495*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2496*4882a593Smuzhiyun 	u8 mc_filter[64]; /* Multicast hash filter */
2497*4882a593Smuzhiyun 	u32 rx_mode;
2498*4882a593Smuzhiyun 
2499*4882a593Smuzhiyun 	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2500*4882a593Smuzhiyun 		rx_mode = RxFilterEnable | AcceptBroadcast
2501*4882a593Smuzhiyun 			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
2502*4882a593Smuzhiyun 	} else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2503*4882a593Smuzhiyun 		   (dev->flags & IFF_ALLMULTI)) {
2504*4882a593Smuzhiyun 		rx_mode = RxFilterEnable | AcceptBroadcast
2505*4882a593Smuzhiyun 			| AcceptAllMulticast | AcceptMyPhys;
2506*4882a593Smuzhiyun 	} else {
2507*4882a593Smuzhiyun 		struct netdev_hw_addr *ha;
2508*4882a593Smuzhiyun 		int i;
2509*4882a593Smuzhiyun 
2510*4882a593Smuzhiyun 		memset(mc_filter, 0, sizeof(mc_filter));
2511*4882a593Smuzhiyun 		netdev_for_each_mc_addr(ha, dev) {
2512*4882a593Smuzhiyun 			int b = (ether_crc(ETH_ALEN, ha->addr) >> 23) & 0x1ff;
2513*4882a593Smuzhiyun 			mc_filter[b/8] |= (1 << (b & 0x07));
2514*4882a593Smuzhiyun 		}
2515*4882a593Smuzhiyun 		rx_mode = RxFilterEnable | AcceptBroadcast
2516*4882a593Smuzhiyun 			| AcceptMulticast | AcceptMyPhys;
2517*4882a593Smuzhiyun 		for (i = 0; i < 64; i += 2) {
2518*4882a593Smuzhiyun 			writel(HASH_TABLE + i, ioaddr + RxFilterAddr);
2519*4882a593Smuzhiyun 			writel((mc_filter[i + 1] << 8) + mc_filter[i],
2520*4882a593Smuzhiyun 			       ioaddr + RxFilterData);
2521*4882a593Smuzhiyun 		}
2522*4882a593Smuzhiyun 	}
2523*4882a593Smuzhiyun 	writel(rx_mode, ioaddr + RxFilterAddr);
2524*4882a593Smuzhiyun 	np->cur_rx_mode = rx_mode;
2525*4882a593Smuzhiyun }
2526*4882a593Smuzhiyun 
natsemi_change_mtu(struct net_device * dev,int new_mtu)2527*4882a593Smuzhiyun static int natsemi_change_mtu(struct net_device *dev, int new_mtu)
2528*4882a593Smuzhiyun {
2529*4882a593Smuzhiyun 	dev->mtu = new_mtu;
2530*4882a593Smuzhiyun 
2531*4882a593Smuzhiyun 	/* synchronized against open : rtnl_lock() held by caller */
2532*4882a593Smuzhiyun 	if (netif_running(dev)) {
2533*4882a593Smuzhiyun 		struct netdev_private *np = netdev_priv(dev);
2534*4882a593Smuzhiyun 		void __iomem * ioaddr = ns_ioaddr(dev);
2535*4882a593Smuzhiyun 		const int irq = np->pci_dev->irq;
2536*4882a593Smuzhiyun 
2537*4882a593Smuzhiyun 		disable_irq(irq);
2538*4882a593Smuzhiyun 		spin_lock(&np->lock);
2539*4882a593Smuzhiyun 		/* stop engines */
2540*4882a593Smuzhiyun 		natsemi_stop_rxtx(dev);
2541*4882a593Smuzhiyun 		/* drain rx queue */
2542*4882a593Smuzhiyun 		drain_rx(dev);
2543*4882a593Smuzhiyun 		/* change buffers */
2544*4882a593Smuzhiyun 		set_bufsize(dev);
2545*4882a593Smuzhiyun 		reinit_rx(dev);
2546*4882a593Smuzhiyun 		writel(np->ring_dma, ioaddr + RxRingPtr);
2547*4882a593Smuzhiyun 		/* restart engines */
2548*4882a593Smuzhiyun 		writel(RxOn | TxOn, ioaddr + ChipCmd);
2549*4882a593Smuzhiyun 		spin_unlock(&np->lock);
2550*4882a593Smuzhiyun 		enable_irq(irq);
2551*4882a593Smuzhiyun 	}
2552*4882a593Smuzhiyun 	return 0;
2553*4882a593Smuzhiyun }
2554*4882a593Smuzhiyun 
set_rx_mode(struct net_device * dev)2555*4882a593Smuzhiyun static void set_rx_mode(struct net_device *dev)
2556*4882a593Smuzhiyun {
2557*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2558*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2559*4882a593Smuzhiyun 	if (!np->hands_off)
2560*4882a593Smuzhiyun 		__set_rx_mode(dev);
2561*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2562*4882a593Smuzhiyun }
2563*4882a593Smuzhiyun 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)2564*4882a593Smuzhiyun static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2565*4882a593Smuzhiyun {
2566*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2567*4882a593Smuzhiyun 	strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2568*4882a593Smuzhiyun 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2569*4882a593Smuzhiyun 	strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info));
2570*4882a593Smuzhiyun }
2571*4882a593Smuzhiyun 
get_regs_len(struct net_device * dev)2572*4882a593Smuzhiyun static int get_regs_len(struct net_device *dev)
2573*4882a593Smuzhiyun {
2574*4882a593Smuzhiyun 	return NATSEMI_REGS_SIZE;
2575*4882a593Smuzhiyun }
2576*4882a593Smuzhiyun 
get_eeprom_len(struct net_device * dev)2577*4882a593Smuzhiyun static int get_eeprom_len(struct net_device *dev)
2578*4882a593Smuzhiyun {
2579*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2580*4882a593Smuzhiyun 	return np->eeprom_size;
2581*4882a593Smuzhiyun }
2582*4882a593Smuzhiyun 
get_link_ksettings(struct net_device * dev,struct ethtool_link_ksettings * ecmd)2583*4882a593Smuzhiyun static int get_link_ksettings(struct net_device *dev,
2584*4882a593Smuzhiyun 			      struct ethtool_link_ksettings *ecmd)
2585*4882a593Smuzhiyun {
2586*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2587*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2588*4882a593Smuzhiyun 	netdev_get_ecmd(dev, ecmd);
2589*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2590*4882a593Smuzhiyun 	return 0;
2591*4882a593Smuzhiyun }
2592*4882a593Smuzhiyun 
set_link_ksettings(struct net_device * dev,const struct ethtool_link_ksettings * ecmd)2593*4882a593Smuzhiyun static int set_link_ksettings(struct net_device *dev,
2594*4882a593Smuzhiyun 			      const struct ethtool_link_ksettings *ecmd)
2595*4882a593Smuzhiyun {
2596*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2597*4882a593Smuzhiyun 	int res;
2598*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2599*4882a593Smuzhiyun 	res = netdev_set_ecmd(dev, ecmd);
2600*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2601*4882a593Smuzhiyun 	return res;
2602*4882a593Smuzhiyun }
2603*4882a593Smuzhiyun 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2604*4882a593Smuzhiyun static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2605*4882a593Smuzhiyun {
2606*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2607*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2608*4882a593Smuzhiyun 	netdev_get_wol(dev, &wol->supported, &wol->wolopts);
2609*4882a593Smuzhiyun 	netdev_get_sopass(dev, wol->sopass);
2610*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2611*4882a593Smuzhiyun }
2612*4882a593Smuzhiyun 
set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)2613*4882a593Smuzhiyun static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2614*4882a593Smuzhiyun {
2615*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2616*4882a593Smuzhiyun 	int res;
2617*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2618*4882a593Smuzhiyun 	netdev_set_wol(dev, wol->wolopts);
2619*4882a593Smuzhiyun 	res = netdev_set_sopass(dev, wol->sopass);
2620*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2621*4882a593Smuzhiyun 	return res;
2622*4882a593Smuzhiyun }
2623*4882a593Smuzhiyun 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)2624*4882a593Smuzhiyun static void get_regs(struct net_device *dev, struct ethtool_regs *regs, void *buf)
2625*4882a593Smuzhiyun {
2626*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2627*4882a593Smuzhiyun 	regs->version = NATSEMI_REGS_VER;
2628*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2629*4882a593Smuzhiyun 	netdev_get_regs(dev, buf);
2630*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2631*4882a593Smuzhiyun }
2632*4882a593Smuzhiyun 
get_msglevel(struct net_device * dev)2633*4882a593Smuzhiyun static u32 get_msglevel(struct net_device *dev)
2634*4882a593Smuzhiyun {
2635*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2636*4882a593Smuzhiyun 	return np->msg_enable;
2637*4882a593Smuzhiyun }
2638*4882a593Smuzhiyun 
set_msglevel(struct net_device * dev,u32 val)2639*4882a593Smuzhiyun static void set_msglevel(struct net_device *dev, u32 val)
2640*4882a593Smuzhiyun {
2641*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2642*4882a593Smuzhiyun 	np->msg_enable = val;
2643*4882a593Smuzhiyun }
2644*4882a593Smuzhiyun 
nway_reset(struct net_device * dev)2645*4882a593Smuzhiyun static int nway_reset(struct net_device *dev)
2646*4882a593Smuzhiyun {
2647*4882a593Smuzhiyun 	int tmp;
2648*4882a593Smuzhiyun 	int r = -EINVAL;
2649*4882a593Smuzhiyun 	/* if autoneg is off, it's an error */
2650*4882a593Smuzhiyun 	tmp = mdio_read(dev, MII_BMCR);
2651*4882a593Smuzhiyun 	if (tmp & BMCR_ANENABLE) {
2652*4882a593Smuzhiyun 		tmp |= (BMCR_ANRESTART);
2653*4882a593Smuzhiyun 		mdio_write(dev, MII_BMCR, tmp);
2654*4882a593Smuzhiyun 		r = 0;
2655*4882a593Smuzhiyun 	}
2656*4882a593Smuzhiyun 	return r;
2657*4882a593Smuzhiyun }
2658*4882a593Smuzhiyun 
get_link(struct net_device * dev)2659*4882a593Smuzhiyun static u32 get_link(struct net_device *dev)
2660*4882a593Smuzhiyun {
2661*4882a593Smuzhiyun 	/* LSTATUS is latched low until a read - so read twice */
2662*4882a593Smuzhiyun 	mdio_read(dev, MII_BMSR);
2663*4882a593Smuzhiyun 	return (mdio_read(dev, MII_BMSR)&BMSR_LSTATUS) ? 1:0;
2664*4882a593Smuzhiyun }
2665*4882a593Smuzhiyun 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)2666*4882a593Smuzhiyun static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
2667*4882a593Smuzhiyun {
2668*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2669*4882a593Smuzhiyun 	u8 *eebuf;
2670*4882a593Smuzhiyun 	int res;
2671*4882a593Smuzhiyun 
2672*4882a593Smuzhiyun 	eebuf = kmalloc(np->eeprom_size, GFP_KERNEL);
2673*4882a593Smuzhiyun 	if (!eebuf)
2674*4882a593Smuzhiyun 		return -ENOMEM;
2675*4882a593Smuzhiyun 
2676*4882a593Smuzhiyun 	eeprom->magic = PCI_VENDOR_ID_NS | (PCI_DEVICE_ID_NS_83815<<16);
2677*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
2678*4882a593Smuzhiyun 	res = netdev_get_eeprom(dev, eebuf);
2679*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
2680*4882a593Smuzhiyun 	if (!res)
2681*4882a593Smuzhiyun 		memcpy(data, eebuf+eeprom->offset, eeprom->len);
2682*4882a593Smuzhiyun 	kfree(eebuf);
2683*4882a593Smuzhiyun 	return res;
2684*4882a593Smuzhiyun }
2685*4882a593Smuzhiyun 
2686*4882a593Smuzhiyun static const struct ethtool_ops ethtool_ops = {
2687*4882a593Smuzhiyun 	.get_drvinfo = get_drvinfo,
2688*4882a593Smuzhiyun 	.get_regs_len = get_regs_len,
2689*4882a593Smuzhiyun 	.get_eeprom_len = get_eeprom_len,
2690*4882a593Smuzhiyun 	.get_wol = get_wol,
2691*4882a593Smuzhiyun 	.set_wol = set_wol,
2692*4882a593Smuzhiyun 	.get_regs = get_regs,
2693*4882a593Smuzhiyun 	.get_msglevel = get_msglevel,
2694*4882a593Smuzhiyun 	.set_msglevel = set_msglevel,
2695*4882a593Smuzhiyun 	.nway_reset = nway_reset,
2696*4882a593Smuzhiyun 	.get_link = get_link,
2697*4882a593Smuzhiyun 	.get_eeprom = get_eeprom,
2698*4882a593Smuzhiyun 	.get_link_ksettings = get_link_ksettings,
2699*4882a593Smuzhiyun 	.set_link_ksettings = set_link_ksettings,
2700*4882a593Smuzhiyun };
2701*4882a593Smuzhiyun 
netdev_set_wol(struct net_device * dev,u32 newval)2702*4882a593Smuzhiyun static int netdev_set_wol(struct net_device *dev, u32 newval)
2703*4882a593Smuzhiyun {
2704*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2705*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2706*4882a593Smuzhiyun 	u32 data = readl(ioaddr + WOLCmd) & ~WakeOptsSummary;
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 	/* translate to bitmasks this chip understands */
2709*4882a593Smuzhiyun 	if (newval & WAKE_PHY)
2710*4882a593Smuzhiyun 		data |= WakePhy;
2711*4882a593Smuzhiyun 	if (newval & WAKE_UCAST)
2712*4882a593Smuzhiyun 		data |= WakeUnicast;
2713*4882a593Smuzhiyun 	if (newval & WAKE_MCAST)
2714*4882a593Smuzhiyun 		data |= WakeMulticast;
2715*4882a593Smuzhiyun 	if (newval & WAKE_BCAST)
2716*4882a593Smuzhiyun 		data |= WakeBroadcast;
2717*4882a593Smuzhiyun 	if (newval & WAKE_ARP)
2718*4882a593Smuzhiyun 		data |= WakeArp;
2719*4882a593Smuzhiyun 	if (newval & WAKE_MAGIC)
2720*4882a593Smuzhiyun 		data |= WakeMagic;
2721*4882a593Smuzhiyun 	if (np->srr >= SRR_DP83815_D) {
2722*4882a593Smuzhiyun 		if (newval & WAKE_MAGICSECURE) {
2723*4882a593Smuzhiyun 			data |= WakeMagicSecure;
2724*4882a593Smuzhiyun 		}
2725*4882a593Smuzhiyun 	}
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	writel(data, ioaddr + WOLCmd);
2728*4882a593Smuzhiyun 
2729*4882a593Smuzhiyun 	return 0;
2730*4882a593Smuzhiyun }
2731*4882a593Smuzhiyun 
netdev_get_wol(struct net_device * dev,u32 * supported,u32 * cur)2732*4882a593Smuzhiyun static int netdev_get_wol(struct net_device *dev, u32 *supported, u32 *cur)
2733*4882a593Smuzhiyun {
2734*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2735*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2736*4882a593Smuzhiyun 	u32 regval = readl(ioaddr + WOLCmd);
2737*4882a593Smuzhiyun 
2738*4882a593Smuzhiyun 	*supported = (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST
2739*4882a593Smuzhiyun 			| WAKE_ARP | WAKE_MAGIC);
2740*4882a593Smuzhiyun 
2741*4882a593Smuzhiyun 	if (np->srr >= SRR_DP83815_D) {
2742*4882a593Smuzhiyun 		/* SOPASS works on revD and higher */
2743*4882a593Smuzhiyun 		*supported |= WAKE_MAGICSECURE;
2744*4882a593Smuzhiyun 	}
2745*4882a593Smuzhiyun 	*cur = 0;
2746*4882a593Smuzhiyun 
2747*4882a593Smuzhiyun 	/* translate from chip bitmasks */
2748*4882a593Smuzhiyun 	if (regval & WakePhy)
2749*4882a593Smuzhiyun 		*cur |= WAKE_PHY;
2750*4882a593Smuzhiyun 	if (regval & WakeUnicast)
2751*4882a593Smuzhiyun 		*cur |= WAKE_UCAST;
2752*4882a593Smuzhiyun 	if (regval & WakeMulticast)
2753*4882a593Smuzhiyun 		*cur |= WAKE_MCAST;
2754*4882a593Smuzhiyun 	if (regval & WakeBroadcast)
2755*4882a593Smuzhiyun 		*cur |= WAKE_BCAST;
2756*4882a593Smuzhiyun 	if (regval & WakeArp)
2757*4882a593Smuzhiyun 		*cur |= WAKE_ARP;
2758*4882a593Smuzhiyun 	if (regval & WakeMagic)
2759*4882a593Smuzhiyun 		*cur |= WAKE_MAGIC;
2760*4882a593Smuzhiyun 	if (regval & WakeMagicSecure) {
2761*4882a593Smuzhiyun 		/* this can be on in revC, but it's broken */
2762*4882a593Smuzhiyun 		*cur |= WAKE_MAGICSECURE;
2763*4882a593Smuzhiyun 	}
2764*4882a593Smuzhiyun 
2765*4882a593Smuzhiyun 	return 0;
2766*4882a593Smuzhiyun }
2767*4882a593Smuzhiyun 
netdev_set_sopass(struct net_device * dev,u8 * newval)2768*4882a593Smuzhiyun static int netdev_set_sopass(struct net_device *dev, u8 *newval)
2769*4882a593Smuzhiyun {
2770*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2771*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2772*4882a593Smuzhiyun 	u16 *sval = (u16 *)newval;
2773*4882a593Smuzhiyun 	u32 addr;
2774*4882a593Smuzhiyun 
2775*4882a593Smuzhiyun 	if (np->srr < SRR_DP83815_D) {
2776*4882a593Smuzhiyun 		return 0;
2777*4882a593Smuzhiyun 	}
2778*4882a593Smuzhiyun 
2779*4882a593Smuzhiyun 	/* enable writing to these registers by disabling the RX filter */
2780*4882a593Smuzhiyun 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2781*4882a593Smuzhiyun 	addr &= ~RxFilterEnable;
2782*4882a593Smuzhiyun 	writel(addr, ioaddr + RxFilterAddr);
2783*4882a593Smuzhiyun 
2784*4882a593Smuzhiyun 	/* write the three words to (undocumented) RFCR vals 0xa, 0xc, 0xe */
2785*4882a593Smuzhiyun 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2786*4882a593Smuzhiyun 	writew(sval[0], ioaddr + RxFilterData);
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2789*4882a593Smuzhiyun 	writew(sval[1], ioaddr + RxFilterData);
2790*4882a593Smuzhiyun 
2791*4882a593Smuzhiyun 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2792*4882a593Smuzhiyun 	writew(sval[2], ioaddr + RxFilterData);
2793*4882a593Smuzhiyun 
2794*4882a593Smuzhiyun 	/* re-enable the RX filter */
2795*4882a593Smuzhiyun 	writel(addr | RxFilterEnable, ioaddr + RxFilterAddr);
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	return 0;
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun 
netdev_get_sopass(struct net_device * dev,u8 * data)2800*4882a593Smuzhiyun static int netdev_get_sopass(struct net_device *dev, u8 *data)
2801*4882a593Smuzhiyun {
2802*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2803*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
2804*4882a593Smuzhiyun 	u16 *sval = (u16 *)data;
2805*4882a593Smuzhiyun 	u32 addr;
2806*4882a593Smuzhiyun 
2807*4882a593Smuzhiyun 	if (np->srr < SRR_DP83815_D) {
2808*4882a593Smuzhiyun 		sval[0] = sval[1] = sval[2] = 0;
2809*4882a593Smuzhiyun 		return 0;
2810*4882a593Smuzhiyun 	}
2811*4882a593Smuzhiyun 
2812*4882a593Smuzhiyun 	/* read the three words from (undocumented) RFCR vals 0xa, 0xc, 0xe */
2813*4882a593Smuzhiyun 	addr = readl(ioaddr + RxFilterAddr) & ~RFCRAddressMask;
2814*4882a593Smuzhiyun 
2815*4882a593Smuzhiyun 	writel(addr | 0xa, ioaddr + RxFilterAddr);
2816*4882a593Smuzhiyun 	sval[0] = readw(ioaddr + RxFilterData);
2817*4882a593Smuzhiyun 
2818*4882a593Smuzhiyun 	writel(addr | 0xc, ioaddr + RxFilterAddr);
2819*4882a593Smuzhiyun 	sval[1] = readw(ioaddr + RxFilterData);
2820*4882a593Smuzhiyun 
2821*4882a593Smuzhiyun 	writel(addr | 0xe, ioaddr + RxFilterAddr);
2822*4882a593Smuzhiyun 	sval[2] = readw(ioaddr + RxFilterData);
2823*4882a593Smuzhiyun 
2824*4882a593Smuzhiyun 	writel(addr, ioaddr + RxFilterAddr);
2825*4882a593Smuzhiyun 
2826*4882a593Smuzhiyun 	return 0;
2827*4882a593Smuzhiyun }
2828*4882a593Smuzhiyun 
netdev_get_ecmd(struct net_device * dev,struct ethtool_link_ksettings * ecmd)2829*4882a593Smuzhiyun static int netdev_get_ecmd(struct net_device *dev,
2830*4882a593Smuzhiyun 			   struct ethtool_link_ksettings *ecmd)
2831*4882a593Smuzhiyun {
2832*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2833*4882a593Smuzhiyun 	u32 supported, advertising;
2834*4882a593Smuzhiyun 	u32 tmp;
2835*4882a593Smuzhiyun 
2836*4882a593Smuzhiyun 	ecmd->base.port   = dev->if_port;
2837*4882a593Smuzhiyun 	ecmd->base.speed  = np->speed;
2838*4882a593Smuzhiyun 	ecmd->base.duplex = np->duplex;
2839*4882a593Smuzhiyun 	ecmd->base.autoneg = np->autoneg;
2840*4882a593Smuzhiyun 	advertising = 0;
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 	if (np->advertising & ADVERTISE_10HALF)
2843*4882a593Smuzhiyun 		advertising |= ADVERTISED_10baseT_Half;
2844*4882a593Smuzhiyun 	if (np->advertising & ADVERTISE_10FULL)
2845*4882a593Smuzhiyun 		advertising |= ADVERTISED_10baseT_Full;
2846*4882a593Smuzhiyun 	if (np->advertising & ADVERTISE_100HALF)
2847*4882a593Smuzhiyun 		advertising |= ADVERTISED_100baseT_Half;
2848*4882a593Smuzhiyun 	if (np->advertising & ADVERTISE_100FULL)
2849*4882a593Smuzhiyun 		advertising |= ADVERTISED_100baseT_Full;
2850*4882a593Smuzhiyun 	supported   = (SUPPORTED_Autoneg |
2851*4882a593Smuzhiyun 		SUPPORTED_10baseT_Half  | SUPPORTED_10baseT_Full  |
2852*4882a593Smuzhiyun 		SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
2853*4882a593Smuzhiyun 		SUPPORTED_TP | SUPPORTED_MII | SUPPORTED_FIBRE);
2854*4882a593Smuzhiyun 	ecmd->base.phy_address = np->phy_addr_external;
2855*4882a593Smuzhiyun 	/*
2856*4882a593Smuzhiyun 	 * We intentionally report the phy address of the external
2857*4882a593Smuzhiyun 	 * phy, even if the internal phy is used. This is necessary
2858*4882a593Smuzhiyun 	 * to work around a deficiency of the ethtool interface:
2859*4882a593Smuzhiyun 	 * It's only possible to query the settings of the active
2860*4882a593Smuzhiyun 	 * port. Therefore
2861*4882a593Smuzhiyun 	 * # ethtool -s ethX port mii
2862*4882a593Smuzhiyun 	 * actually sends an ioctl to switch to port mii with the
2863*4882a593Smuzhiyun 	 * settings that are used for the current active port.
2864*4882a593Smuzhiyun 	 * If we would report a different phy address in this
2865*4882a593Smuzhiyun 	 * command, then
2866*4882a593Smuzhiyun 	 * # ethtool -s ethX port tp;ethtool -s ethX port mii
2867*4882a593Smuzhiyun 	 * would unintentionally change the phy address.
2868*4882a593Smuzhiyun 	 *
2869*4882a593Smuzhiyun 	 * Fortunately the phy address doesn't matter with the
2870*4882a593Smuzhiyun 	 * internal phy...
2871*4882a593Smuzhiyun 	 */
2872*4882a593Smuzhiyun 
2873*4882a593Smuzhiyun 	/* set information based on active port type */
2874*4882a593Smuzhiyun 	switch (ecmd->base.port) {
2875*4882a593Smuzhiyun 	default:
2876*4882a593Smuzhiyun 	case PORT_TP:
2877*4882a593Smuzhiyun 		advertising |= ADVERTISED_TP;
2878*4882a593Smuzhiyun 		break;
2879*4882a593Smuzhiyun 	case PORT_MII:
2880*4882a593Smuzhiyun 		advertising |= ADVERTISED_MII;
2881*4882a593Smuzhiyun 		break;
2882*4882a593Smuzhiyun 	case PORT_FIBRE:
2883*4882a593Smuzhiyun 		advertising |= ADVERTISED_FIBRE;
2884*4882a593Smuzhiyun 		break;
2885*4882a593Smuzhiyun 	}
2886*4882a593Smuzhiyun 
2887*4882a593Smuzhiyun 	/* if autonegotiation is on, try to return the active speed/duplex */
2888*4882a593Smuzhiyun 	if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2889*4882a593Smuzhiyun 		advertising |= ADVERTISED_Autoneg;
2890*4882a593Smuzhiyun 		tmp = mii_nway_result(
2891*4882a593Smuzhiyun 			np->advertising & mdio_read(dev, MII_LPA));
2892*4882a593Smuzhiyun 		if (tmp == LPA_100FULL || tmp == LPA_100HALF)
2893*4882a593Smuzhiyun 			ecmd->base.speed = SPEED_100;
2894*4882a593Smuzhiyun 		else
2895*4882a593Smuzhiyun 			ecmd->base.speed = SPEED_10;
2896*4882a593Smuzhiyun 		if (tmp == LPA_100FULL || tmp == LPA_10FULL)
2897*4882a593Smuzhiyun 			ecmd->base.duplex = DUPLEX_FULL;
2898*4882a593Smuzhiyun 		else
2899*4882a593Smuzhiyun 			ecmd->base.duplex = DUPLEX_HALF;
2900*4882a593Smuzhiyun 	}
2901*4882a593Smuzhiyun 
2902*4882a593Smuzhiyun 	/* ignore maxtxpkt, maxrxpkt for now */
2903*4882a593Smuzhiyun 
2904*4882a593Smuzhiyun 	ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.supported,
2905*4882a593Smuzhiyun 						supported);
2906*4882a593Smuzhiyun 	ethtool_convert_legacy_u32_to_link_mode(ecmd->link_modes.advertising,
2907*4882a593Smuzhiyun 						advertising);
2908*4882a593Smuzhiyun 
2909*4882a593Smuzhiyun 	return 0;
2910*4882a593Smuzhiyun }
2911*4882a593Smuzhiyun 
netdev_set_ecmd(struct net_device * dev,const struct ethtool_link_ksettings * ecmd)2912*4882a593Smuzhiyun static int netdev_set_ecmd(struct net_device *dev,
2913*4882a593Smuzhiyun 			   const struct ethtool_link_ksettings *ecmd)
2914*4882a593Smuzhiyun {
2915*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
2916*4882a593Smuzhiyun 	u32 advertising;
2917*4882a593Smuzhiyun 
2918*4882a593Smuzhiyun 	ethtool_convert_link_mode_to_legacy_u32(&advertising,
2919*4882a593Smuzhiyun 						ecmd->link_modes.advertising);
2920*4882a593Smuzhiyun 
2921*4882a593Smuzhiyun 	if (ecmd->base.port != PORT_TP &&
2922*4882a593Smuzhiyun 	    ecmd->base.port != PORT_MII &&
2923*4882a593Smuzhiyun 	    ecmd->base.port != PORT_FIBRE)
2924*4882a593Smuzhiyun 		return -EINVAL;
2925*4882a593Smuzhiyun 	if (ecmd->base.autoneg == AUTONEG_ENABLE) {
2926*4882a593Smuzhiyun 		if ((advertising & (ADVERTISED_10baseT_Half |
2927*4882a593Smuzhiyun 					  ADVERTISED_10baseT_Full |
2928*4882a593Smuzhiyun 					  ADVERTISED_100baseT_Half |
2929*4882a593Smuzhiyun 					  ADVERTISED_100baseT_Full)) == 0) {
2930*4882a593Smuzhiyun 			return -EINVAL;
2931*4882a593Smuzhiyun 		}
2932*4882a593Smuzhiyun 	} else if (ecmd->base.autoneg == AUTONEG_DISABLE) {
2933*4882a593Smuzhiyun 		u32 speed = ecmd->base.speed;
2934*4882a593Smuzhiyun 		if (speed != SPEED_10 && speed != SPEED_100)
2935*4882a593Smuzhiyun 			return -EINVAL;
2936*4882a593Smuzhiyun 		if (ecmd->base.duplex != DUPLEX_HALF &&
2937*4882a593Smuzhiyun 		    ecmd->base.duplex != DUPLEX_FULL)
2938*4882a593Smuzhiyun 			return -EINVAL;
2939*4882a593Smuzhiyun 	} else {
2940*4882a593Smuzhiyun 		return -EINVAL;
2941*4882a593Smuzhiyun 	}
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	/*
2944*4882a593Smuzhiyun 	 * If we're ignoring the PHY then autoneg and the internal
2945*4882a593Smuzhiyun 	 * transceiver are really not going to work so don't let the
2946*4882a593Smuzhiyun 	 * user select them.
2947*4882a593Smuzhiyun 	 */
2948*4882a593Smuzhiyun 	if (np->ignore_phy && (ecmd->base.autoneg == AUTONEG_ENABLE ||
2949*4882a593Smuzhiyun 			       ecmd->base.port == PORT_TP))
2950*4882a593Smuzhiyun 		return -EINVAL;
2951*4882a593Smuzhiyun 
2952*4882a593Smuzhiyun 	/*
2953*4882a593Smuzhiyun 	 * maxtxpkt, maxrxpkt: ignored for now.
2954*4882a593Smuzhiyun 	 *
2955*4882a593Smuzhiyun 	 * transceiver:
2956*4882a593Smuzhiyun 	 * PORT_TP is always XCVR_INTERNAL, PORT_MII and PORT_FIBRE are always
2957*4882a593Smuzhiyun 	 * XCVR_EXTERNAL. The implementation thus ignores ecmd->transceiver and
2958*4882a593Smuzhiyun 	 * selects based on ecmd->port.
2959*4882a593Smuzhiyun 	 *
2960*4882a593Smuzhiyun 	 * Actually PORT_FIBRE is nearly identical to PORT_MII: it's for fibre
2961*4882a593Smuzhiyun 	 * phys that are connected to the mii bus. It's used to apply fibre
2962*4882a593Smuzhiyun 	 * specific updates.
2963*4882a593Smuzhiyun 	 */
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	/* WHEW! now lets bang some bits */
2966*4882a593Smuzhiyun 
2967*4882a593Smuzhiyun 	/* save the parms */
2968*4882a593Smuzhiyun 	dev->if_port          = ecmd->base.port;
2969*4882a593Smuzhiyun 	np->autoneg           = ecmd->base.autoneg;
2970*4882a593Smuzhiyun 	np->phy_addr_external = ecmd->base.phy_address & PhyAddrMask;
2971*4882a593Smuzhiyun 	if (np->autoneg == AUTONEG_ENABLE) {
2972*4882a593Smuzhiyun 		/* advertise only what has been requested */
2973*4882a593Smuzhiyun 		np->advertising &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4);
2974*4882a593Smuzhiyun 		if (advertising & ADVERTISED_10baseT_Half)
2975*4882a593Smuzhiyun 			np->advertising |= ADVERTISE_10HALF;
2976*4882a593Smuzhiyun 		if (advertising & ADVERTISED_10baseT_Full)
2977*4882a593Smuzhiyun 			np->advertising |= ADVERTISE_10FULL;
2978*4882a593Smuzhiyun 		if (advertising & ADVERTISED_100baseT_Half)
2979*4882a593Smuzhiyun 			np->advertising |= ADVERTISE_100HALF;
2980*4882a593Smuzhiyun 		if (advertising & ADVERTISED_100baseT_Full)
2981*4882a593Smuzhiyun 			np->advertising |= ADVERTISE_100FULL;
2982*4882a593Smuzhiyun 	} else {
2983*4882a593Smuzhiyun 		np->speed  = ecmd->base.speed;
2984*4882a593Smuzhiyun 		np->duplex = ecmd->base.duplex;
2985*4882a593Smuzhiyun 		/* user overriding the initial full duplex parm? */
2986*4882a593Smuzhiyun 		if (np->duplex == DUPLEX_HALF)
2987*4882a593Smuzhiyun 			np->full_duplex = 0;
2988*4882a593Smuzhiyun 	}
2989*4882a593Smuzhiyun 
2990*4882a593Smuzhiyun 	/* get the right phy enabled */
2991*4882a593Smuzhiyun 	if (ecmd->base.port == PORT_TP)
2992*4882a593Smuzhiyun 		switch_port_internal(dev);
2993*4882a593Smuzhiyun 	else
2994*4882a593Smuzhiyun 		switch_port_external(dev);
2995*4882a593Smuzhiyun 
2996*4882a593Smuzhiyun 	/* set parms and see how this affected our link status */
2997*4882a593Smuzhiyun 	init_phy_fixup(dev);
2998*4882a593Smuzhiyun 	check_link(dev);
2999*4882a593Smuzhiyun 	return 0;
3000*4882a593Smuzhiyun }
3001*4882a593Smuzhiyun 
netdev_get_regs(struct net_device * dev,u8 * buf)3002*4882a593Smuzhiyun static int netdev_get_regs(struct net_device *dev, u8 *buf)
3003*4882a593Smuzhiyun {
3004*4882a593Smuzhiyun 	int i;
3005*4882a593Smuzhiyun 	int j;
3006*4882a593Smuzhiyun 	u32 rfcr;
3007*4882a593Smuzhiyun 	u32 *rbuf = (u32 *)buf;
3008*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
3009*4882a593Smuzhiyun 
3010*4882a593Smuzhiyun 	/* read non-mii page 0 of registers */
3011*4882a593Smuzhiyun 	for (i = 0; i < NATSEMI_PG0_NREGS/2; i++) {
3012*4882a593Smuzhiyun 		rbuf[i] = readl(ioaddr + i*4);
3013*4882a593Smuzhiyun 	}
3014*4882a593Smuzhiyun 
3015*4882a593Smuzhiyun 	/* read current mii registers */
3016*4882a593Smuzhiyun 	for (i = NATSEMI_PG0_NREGS/2; i < NATSEMI_PG0_NREGS; i++)
3017*4882a593Smuzhiyun 		rbuf[i] = mdio_read(dev, i & 0x1f);
3018*4882a593Smuzhiyun 
3019*4882a593Smuzhiyun 	/* read only the 'magic' registers from page 1 */
3020*4882a593Smuzhiyun 	writew(1, ioaddr + PGSEL);
3021*4882a593Smuzhiyun 	rbuf[i++] = readw(ioaddr + PMDCSR);
3022*4882a593Smuzhiyun 	rbuf[i++] = readw(ioaddr + TSTDAT);
3023*4882a593Smuzhiyun 	rbuf[i++] = readw(ioaddr + DSPCFG);
3024*4882a593Smuzhiyun 	rbuf[i++] = readw(ioaddr + SDCFG);
3025*4882a593Smuzhiyun 	writew(0, ioaddr + PGSEL);
3026*4882a593Smuzhiyun 
3027*4882a593Smuzhiyun 	/* read RFCR indexed registers */
3028*4882a593Smuzhiyun 	rfcr = readl(ioaddr + RxFilterAddr);
3029*4882a593Smuzhiyun 	for (j = 0; j < NATSEMI_RFDR_NREGS; j++) {
3030*4882a593Smuzhiyun 		writel(j*2, ioaddr + RxFilterAddr);
3031*4882a593Smuzhiyun 		rbuf[i++] = readw(ioaddr + RxFilterData);
3032*4882a593Smuzhiyun 	}
3033*4882a593Smuzhiyun 	writel(rfcr, ioaddr + RxFilterAddr);
3034*4882a593Smuzhiyun 
3035*4882a593Smuzhiyun 	/* the interrupt status is clear-on-read - see if we missed any */
3036*4882a593Smuzhiyun 	if (rbuf[4] & rbuf[5]) {
3037*4882a593Smuzhiyun 		printk(KERN_WARNING
3038*4882a593Smuzhiyun 			"%s: shoot, we dropped an interrupt (%#08x)\n",
3039*4882a593Smuzhiyun 			dev->name, rbuf[4] & rbuf[5]);
3040*4882a593Smuzhiyun 	}
3041*4882a593Smuzhiyun 
3042*4882a593Smuzhiyun 	return 0;
3043*4882a593Smuzhiyun }
3044*4882a593Smuzhiyun 
3045*4882a593Smuzhiyun #define SWAP_BITS(x)	( (((x) & 0x0001) << 15) | (((x) & 0x0002) << 13) \
3046*4882a593Smuzhiyun 			| (((x) & 0x0004) << 11) | (((x) & 0x0008) << 9)  \
3047*4882a593Smuzhiyun 			| (((x) & 0x0010) << 7)  | (((x) & 0x0020) << 5)  \
3048*4882a593Smuzhiyun 			| (((x) & 0x0040) << 3)  | (((x) & 0x0080) << 1)  \
3049*4882a593Smuzhiyun 			| (((x) & 0x0100) >> 1)  | (((x) & 0x0200) >> 3)  \
3050*4882a593Smuzhiyun 			| (((x) & 0x0400) >> 5)  | (((x) & 0x0800) >> 7)  \
3051*4882a593Smuzhiyun 			| (((x) & 0x1000) >> 9)  | (((x) & 0x2000) >> 11) \
3052*4882a593Smuzhiyun 			| (((x) & 0x4000) >> 13) | (((x) & 0x8000) >> 15) )
3053*4882a593Smuzhiyun 
netdev_get_eeprom(struct net_device * dev,u8 * buf)3054*4882a593Smuzhiyun static int netdev_get_eeprom(struct net_device *dev, u8 *buf)
3055*4882a593Smuzhiyun {
3056*4882a593Smuzhiyun 	int i;
3057*4882a593Smuzhiyun 	u16 *ebuf = (u16 *)buf;
3058*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
3059*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 	/* eeprom_read reads 16 bits, and indexes by 16 bits */
3062*4882a593Smuzhiyun 	for (i = 0; i < np->eeprom_size/2; i++) {
3063*4882a593Smuzhiyun 		ebuf[i] = eeprom_read(ioaddr, i);
3064*4882a593Smuzhiyun 		/* The EEPROM itself stores data bit-swapped, but eeprom_read
3065*4882a593Smuzhiyun 		 * reads it back "sanely". So we swap it back here in order to
3066*4882a593Smuzhiyun 		 * present it to userland as it is stored. */
3067*4882a593Smuzhiyun 		ebuf[i] = SWAP_BITS(ebuf[i]);
3068*4882a593Smuzhiyun 	}
3069*4882a593Smuzhiyun 	return 0;
3070*4882a593Smuzhiyun }
3071*4882a593Smuzhiyun 
netdev_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)3072*4882a593Smuzhiyun static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
3073*4882a593Smuzhiyun {
3074*4882a593Smuzhiyun 	struct mii_ioctl_data *data = if_mii(rq);
3075*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
3076*4882a593Smuzhiyun 
3077*4882a593Smuzhiyun 	switch(cmd) {
3078*4882a593Smuzhiyun 	case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
3079*4882a593Smuzhiyun 		data->phy_id = np->phy_addr_external;
3080*4882a593Smuzhiyun 		fallthrough;
3081*4882a593Smuzhiyun 
3082*4882a593Smuzhiyun 	case SIOCGMIIREG:		/* Read MII PHY register. */
3083*4882a593Smuzhiyun 		/* The phy_id is not enough to uniquely identify
3084*4882a593Smuzhiyun 		 * the intended target. Therefore the command is sent to
3085*4882a593Smuzhiyun 		 * the given mii on the current port.
3086*4882a593Smuzhiyun 		 */
3087*4882a593Smuzhiyun 		if (dev->if_port == PORT_TP) {
3088*4882a593Smuzhiyun 			if ((data->phy_id & 0x1f) == np->phy_addr_external)
3089*4882a593Smuzhiyun 				data->val_out = mdio_read(dev,
3090*4882a593Smuzhiyun 							data->reg_num & 0x1f);
3091*4882a593Smuzhiyun 			else
3092*4882a593Smuzhiyun 				data->val_out = 0;
3093*4882a593Smuzhiyun 		} else {
3094*4882a593Smuzhiyun 			move_int_phy(dev, data->phy_id & 0x1f);
3095*4882a593Smuzhiyun 			data->val_out = miiport_read(dev, data->phy_id & 0x1f,
3096*4882a593Smuzhiyun 							data->reg_num & 0x1f);
3097*4882a593Smuzhiyun 		}
3098*4882a593Smuzhiyun 		return 0;
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	case SIOCSMIIREG:		/* Write MII PHY register. */
3101*4882a593Smuzhiyun 		if (dev->if_port == PORT_TP) {
3102*4882a593Smuzhiyun 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3103*4882a593Smuzhiyun  				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3104*4882a593Smuzhiyun 					np->advertising = data->val_in;
3105*4882a593Smuzhiyun 				mdio_write(dev, data->reg_num & 0x1f,
3106*4882a593Smuzhiyun 							data->val_in);
3107*4882a593Smuzhiyun 			}
3108*4882a593Smuzhiyun 		} else {
3109*4882a593Smuzhiyun 			if ((data->phy_id & 0x1f) == np->phy_addr_external) {
3110*4882a593Smuzhiyun  				if ((data->reg_num & 0x1f) == MII_ADVERTISE)
3111*4882a593Smuzhiyun 					np->advertising = data->val_in;
3112*4882a593Smuzhiyun 			}
3113*4882a593Smuzhiyun 			move_int_phy(dev, data->phy_id & 0x1f);
3114*4882a593Smuzhiyun 			miiport_write(dev, data->phy_id & 0x1f,
3115*4882a593Smuzhiyun 						data->reg_num & 0x1f,
3116*4882a593Smuzhiyun 						data->val_in);
3117*4882a593Smuzhiyun 		}
3118*4882a593Smuzhiyun 		return 0;
3119*4882a593Smuzhiyun 	default:
3120*4882a593Smuzhiyun 		return -EOPNOTSUPP;
3121*4882a593Smuzhiyun 	}
3122*4882a593Smuzhiyun }
3123*4882a593Smuzhiyun 
enable_wol_mode(struct net_device * dev,int enable_intr)3124*4882a593Smuzhiyun static void enable_wol_mode(struct net_device *dev, int enable_intr)
3125*4882a593Smuzhiyun {
3126*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
3127*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
3128*4882a593Smuzhiyun 
3129*4882a593Smuzhiyun 	if (netif_msg_wol(np))
3130*4882a593Smuzhiyun 		printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
3131*4882a593Smuzhiyun 			dev->name);
3132*4882a593Smuzhiyun 
3133*4882a593Smuzhiyun 	/* For WOL we must restart the rx process in silent mode.
3134*4882a593Smuzhiyun 	 * Write NULL to the RxRingPtr. Only possible if
3135*4882a593Smuzhiyun 	 * rx process is stopped
3136*4882a593Smuzhiyun 	 */
3137*4882a593Smuzhiyun 	writel(0, ioaddr + RxRingPtr);
3138*4882a593Smuzhiyun 
3139*4882a593Smuzhiyun 	/* read WoL status to clear */
3140*4882a593Smuzhiyun 	readl(ioaddr + WOLCmd);
3141*4882a593Smuzhiyun 
3142*4882a593Smuzhiyun 	/* PME on, clear status */
3143*4882a593Smuzhiyun 	writel(np->SavedClkRun | PMEEnable | PMEStatus, ioaddr + ClkRun);
3144*4882a593Smuzhiyun 
3145*4882a593Smuzhiyun 	/* and restart the rx process */
3146*4882a593Smuzhiyun 	writel(RxOn, ioaddr + ChipCmd);
3147*4882a593Smuzhiyun 
3148*4882a593Smuzhiyun 	if (enable_intr) {
3149*4882a593Smuzhiyun 		/* enable the WOL interrupt.
3150*4882a593Smuzhiyun 		 * Could be used to send a netlink message.
3151*4882a593Smuzhiyun 		 */
3152*4882a593Smuzhiyun 		writel(WOLPkt | LinkChange, ioaddr + IntrMask);
3153*4882a593Smuzhiyun 		natsemi_irq_enable(dev);
3154*4882a593Smuzhiyun 	}
3155*4882a593Smuzhiyun }
3156*4882a593Smuzhiyun 
netdev_close(struct net_device * dev)3157*4882a593Smuzhiyun static int netdev_close(struct net_device *dev)
3158*4882a593Smuzhiyun {
3159*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
3160*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
3161*4882a593Smuzhiyun 	const int irq = np->pci_dev->irq;
3162*4882a593Smuzhiyun 
3163*4882a593Smuzhiyun 	if (netif_msg_ifdown(np))
3164*4882a593Smuzhiyun 		printk(KERN_DEBUG
3165*4882a593Smuzhiyun 			"%s: Shutting down ethercard, status was %#04x.\n",
3166*4882a593Smuzhiyun 			dev->name, (int)readl(ioaddr + ChipCmd));
3167*4882a593Smuzhiyun 	if (netif_msg_pktdata(np))
3168*4882a593Smuzhiyun 		printk(KERN_DEBUG
3169*4882a593Smuzhiyun 			"%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
3170*4882a593Smuzhiyun 			dev->name, np->cur_tx, np->dirty_tx,
3171*4882a593Smuzhiyun 			np->cur_rx, np->dirty_rx);
3172*4882a593Smuzhiyun 
3173*4882a593Smuzhiyun 	napi_disable(&np->napi);
3174*4882a593Smuzhiyun 
3175*4882a593Smuzhiyun 	/*
3176*4882a593Smuzhiyun 	 * FIXME: what if someone tries to close a device
3177*4882a593Smuzhiyun 	 * that is suspended?
3178*4882a593Smuzhiyun 	 * Should we reenable the nic to switch to
3179*4882a593Smuzhiyun 	 * the final WOL settings?
3180*4882a593Smuzhiyun 	 */
3181*4882a593Smuzhiyun 
3182*4882a593Smuzhiyun 	del_timer_sync(&np->timer);
3183*4882a593Smuzhiyun 	disable_irq(irq);
3184*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
3185*4882a593Smuzhiyun 	natsemi_irq_disable(dev);
3186*4882a593Smuzhiyun 	np->hands_off = 1;
3187*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
3188*4882a593Smuzhiyun 	enable_irq(irq);
3189*4882a593Smuzhiyun 
3190*4882a593Smuzhiyun 	free_irq(irq, dev);
3191*4882a593Smuzhiyun 
3192*4882a593Smuzhiyun 	/* Interrupt disabled, interrupt handler released,
3193*4882a593Smuzhiyun 	 * queue stopped, timer deleted, rtnl_lock held
3194*4882a593Smuzhiyun 	 * All async codepaths that access the driver are disabled.
3195*4882a593Smuzhiyun 	 */
3196*4882a593Smuzhiyun 	spin_lock_irq(&np->lock);
3197*4882a593Smuzhiyun 	np->hands_off = 0;
3198*4882a593Smuzhiyun 	readl(ioaddr + IntrMask);
3199*4882a593Smuzhiyun 	readw(ioaddr + MIntrStatus);
3200*4882a593Smuzhiyun 
3201*4882a593Smuzhiyun 	/* Freeze Stats */
3202*4882a593Smuzhiyun 	writel(StatsFreeze, ioaddr + StatsCtrl);
3203*4882a593Smuzhiyun 
3204*4882a593Smuzhiyun 	/* Stop the chip's Tx and Rx processes. */
3205*4882a593Smuzhiyun 	natsemi_stop_rxtx(dev);
3206*4882a593Smuzhiyun 
3207*4882a593Smuzhiyun 	__get_stats(dev);
3208*4882a593Smuzhiyun 	spin_unlock_irq(&np->lock);
3209*4882a593Smuzhiyun 
3210*4882a593Smuzhiyun 	/* clear the carrier last - an interrupt could reenable it otherwise */
3211*4882a593Smuzhiyun 	netif_carrier_off(dev);
3212*4882a593Smuzhiyun 	netif_stop_queue(dev);
3213*4882a593Smuzhiyun 
3214*4882a593Smuzhiyun 	dump_ring(dev);
3215*4882a593Smuzhiyun 	drain_ring(dev);
3216*4882a593Smuzhiyun 	free_ring(dev);
3217*4882a593Smuzhiyun 
3218*4882a593Smuzhiyun 	{
3219*4882a593Smuzhiyun 		u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3220*4882a593Smuzhiyun 		if (wol) {
3221*4882a593Smuzhiyun 			/* restart the NIC in WOL mode.
3222*4882a593Smuzhiyun 			 * The nic must be stopped for this.
3223*4882a593Smuzhiyun 			 */
3224*4882a593Smuzhiyun 			enable_wol_mode(dev, 0);
3225*4882a593Smuzhiyun 		} else {
3226*4882a593Smuzhiyun 			/* Restore PME enable bit unmolested */
3227*4882a593Smuzhiyun 			writel(np->SavedClkRun, ioaddr + ClkRun);
3228*4882a593Smuzhiyun 		}
3229*4882a593Smuzhiyun 	}
3230*4882a593Smuzhiyun 	return 0;
3231*4882a593Smuzhiyun }
3232*4882a593Smuzhiyun 
3233*4882a593Smuzhiyun 
natsemi_remove1(struct pci_dev * pdev)3234*4882a593Smuzhiyun static void natsemi_remove1(struct pci_dev *pdev)
3235*4882a593Smuzhiyun {
3236*4882a593Smuzhiyun 	struct net_device *dev = pci_get_drvdata(pdev);
3237*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	NATSEMI_REMOVE_FILE(pdev, dspcfg_workaround);
3240*4882a593Smuzhiyun 	unregister_netdev (dev);
3241*4882a593Smuzhiyun 	iounmap(ioaddr);
3242*4882a593Smuzhiyun 	free_netdev (dev);
3243*4882a593Smuzhiyun }
3244*4882a593Smuzhiyun 
3245*4882a593Smuzhiyun /*
3246*4882a593Smuzhiyun  * The ns83815 chip doesn't have explicit RxStop bits.
3247*4882a593Smuzhiyun  * Kicking the Rx or Tx process for a new packet reenables the Rx process
3248*4882a593Smuzhiyun  * of the nic, thus this function must be very careful:
3249*4882a593Smuzhiyun  *
3250*4882a593Smuzhiyun  * suspend/resume synchronization:
3251*4882a593Smuzhiyun  * entry points:
3252*4882a593Smuzhiyun  *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
3253*4882a593Smuzhiyun  *   start_tx, ns_tx_timeout
3254*4882a593Smuzhiyun  *
3255*4882a593Smuzhiyun  * No function accesses the hardware without checking np->hands_off.
3256*4882a593Smuzhiyun  *	the check occurs under spin_lock_irq(&np->lock);
3257*4882a593Smuzhiyun  * exceptions:
3258*4882a593Smuzhiyun  *	* netdev_ioctl: noncritical access.
3259*4882a593Smuzhiyun  *	* netdev_open: cannot happen due to the device_detach
3260*4882a593Smuzhiyun  *	* netdev_close: doesn't hurt.
3261*4882a593Smuzhiyun  *	* netdev_timer: timer stopped by natsemi_suspend.
3262*4882a593Smuzhiyun  *	* intr_handler: doesn't acquire the spinlock. suspend calls
3263*4882a593Smuzhiyun  *		disable_irq() to enforce synchronization.
3264*4882a593Smuzhiyun  *      * natsemi_poll: checks before reenabling interrupts.  suspend
3265*4882a593Smuzhiyun  *              sets hands_off, disables interrupts and then waits with
3266*4882a593Smuzhiyun  *              napi_disable().
3267*4882a593Smuzhiyun  *
3268*4882a593Smuzhiyun  * Interrupts must be disabled, otherwise hands_off can cause irq storms.
3269*4882a593Smuzhiyun  */
3270*4882a593Smuzhiyun 
natsemi_suspend(struct device * dev_d)3271*4882a593Smuzhiyun static int __maybe_unused natsemi_suspend(struct device *dev_d)
3272*4882a593Smuzhiyun {
3273*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(dev_d);
3274*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
3275*4882a593Smuzhiyun 	void __iomem * ioaddr = ns_ioaddr(dev);
3276*4882a593Smuzhiyun 
3277*4882a593Smuzhiyun 	rtnl_lock();
3278*4882a593Smuzhiyun 	if (netif_running (dev)) {
3279*4882a593Smuzhiyun 		const int irq = np->pci_dev->irq;
3280*4882a593Smuzhiyun 
3281*4882a593Smuzhiyun 		del_timer_sync(&np->timer);
3282*4882a593Smuzhiyun 
3283*4882a593Smuzhiyun 		disable_irq(irq);
3284*4882a593Smuzhiyun 		spin_lock_irq(&np->lock);
3285*4882a593Smuzhiyun 
3286*4882a593Smuzhiyun 		natsemi_irq_disable(dev);
3287*4882a593Smuzhiyun 		np->hands_off = 1;
3288*4882a593Smuzhiyun 		natsemi_stop_rxtx(dev);
3289*4882a593Smuzhiyun 		netif_stop_queue(dev);
3290*4882a593Smuzhiyun 
3291*4882a593Smuzhiyun 		spin_unlock_irq(&np->lock);
3292*4882a593Smuzhiyun 		enable_irq(irq);
3293*4882a593Smuzhiyun 
3294*4882a593Smuzhiyun 		napi_disable(&np->napi);
3295*4882a593Smuzhiyun 
3296*4882a593Smuzhiyun 		/* Update the error counts. */
3297*4882a593Smuzhiyun 		__get_stats(dev);
3298*4882a593Smuzhiyun 
3299*4882a593Smuzhiyun 		/* pci_power_off(pdev, -1); */
3300*4882a593Smuzhiyun 		drain_ring(dev);
3301*4882a593Smuzhiyun 		{
3302*4882a593Smuzhiyun 			u32 wol = readl(ioaddr + WOLCmd) & WakeOptsSummary;
3303*4882a593Smuzhiyun 			/* Restore PME enable bit */
3304*4882a593Smuzhiyun 			if (wol) {
3305*4882a593Smuzhiyun 				/* restart the NIC in WOL mode.
3306*4882a593Smuzhiyun 				 * The nic must be stopped for this.
3307*4882a593Smuzhiyun 				 * FIXME: use the WOL interrupt
3308*4882a593Smuzhiyun 				 */
3309*4882a593Smuzhiyun 				enable_wol_mode(dev, 0);
3310*4882a593Smuzhiyun 			} else {
3311*4882a593Smuzhiyun 				/* Restore PME enable bit unmolested */
3312*4882a593Smuzhiyun 				writel(np->SavedClkRun, ioaddr + ClkRun);
3313*4882a593Smuzhiyun 			}
3314*4882a593Smuzhiyun 		}
3315*4882a593Smuzhiyun 	}
3316*4882a593Smuzhiyun 	netif_device_detach(dev);
3317*4882a593Smuzhiyun 	rtnl_unlock();
3318*4882a593Smuzhiyun 	return 0;
3319*4882a593Smuzhiyun }
3320*4882a593Smuzhiyun 
3321*4882a593Smuzhiyun 
natsemi_resume(struct device * dev_d)3322*4882a593Smuzhiyun static int __maybe_unused natsemi_resume(struct device *dev_d)
3323*4882a593Smuzhiyun {
3324*4882a593Smuzhiyun 	struct net_device *dev = dev_get_drvdata(dev_d);
3325*4882a593Smuzhiyun 	struct netdev_private *np = netdev_priv(dev);
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	rtnl_lock();
3328*4882a593Smuzhiyun 	if (netif_device_present(dev))
3329*4882a593Smuzhiyun 		goto out;
3330*4882a593Smuzhiyun 	if (netif_running(dev)) {
3331*4882a593Smuzhiyun 		const int irq = np->pci_dev->irq;
3332*4882a593Smuzhiyun 
3333*4882a593Smuzhiyun 		BUG_ON(!np->hands_off);
3334*4882a593Smuzhiyun 	/*	pci_power_on(pdev); */
3335*4882a593Smuzhiyun 
3336*4882a593Smuzhiyun 		napi_enable(&np->napi);
3337*4882a593Smuzhiyun 
3338*4882a593Smuzhiyun 		natsemi_reset(dev);
3339*4882a593Smuzhiyun 		init_ring(dev);
3340*4882a593Smuzhiyun 		disable_irq(irq);
3341*4882a593Smuzhiyun 		spin_lock_irq(&np->lock);
3342*4882a593Smuzhiyun 		np->hands_off = 0;
3343*4882a593Smuzhiyun 		init_registers(dev);
3344*4882a593Smuzhiyun 		netif_device_attach(dev);
3345*4882a593Smuzhiyun 		spin_unlock_irq(&np->lock);
3346*4882a593Smuzhiyun 		enable_irq(irq);
3347*4882a593Smuzhiyun 
3348*4882a593Smuzhiyun 		mod_timer(&np->timer, round_jiffies(jiffies + 1*HZ));
3349*4882a593Smuzhiyun 	}
3350*4882a593Smuzhiyun 	netif_device_attach(dev);
3351*4882a593Smuzhiyun out:
3352*4882a593Smuzhiyun 	rtnl_unlock();
3353*4882a593Smuzhiyun 	return 0;
3354*4882a593Smuzhiyun }
3355*4882a593Smuzhiyun 
3356*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(natsemi_pm_ops, natsemi_suspend, natsemi_resume);
3357*4882a593Smuzhiyun 
3358*4882a593Smuzhiyun static struct pci_driver natsemi_driver = {
3359*4882a593Smuzhiyun 	.name		= DRV_NAME,
3360*4882a593Smuzhiyun 	.id_table	= natsemi_pci_tbl,
3361*4882a593Smuzhiyun 	.probe		= natsemi_probe1,
3362*4882a593Smuzhiyun 	.remove		= natsemi_remove1,
3363*4882a593Smuzhiyun 	.driver.pm	= &natsemi_pm_ops,
3364*4882a593Smuzhiyun };
3365*4882a593Smuzhiyun 
natsemi_init_mod(void)3366*4882a593Smuzhiyun static int __init natsemi_init_mod (void)
3367*4882a593Smuzhiyun {
3368*4882a593Smuzhiyun /* when a module, this is printed whether or not devices are found in probe */
3369*4882a593Smuzhiyun #ifdef MODULE
3370*4882a593Smuzhiyun 	printk(version);
3371*4882a593Smuzhiyun #endif
3372*4882a593Smuzhiyun 
3373*4882a593Smuzhiyun 	return pci_register_driver(&natsemi_driver);
3374*4882a593Smuzhiyun }
3375*4882a593Smuzhiyun 
natsemi_exit_mod(void)3376*4882a593Smuzhiyun static void __exit natsemi_exit_mod (void)
3377*4882a593Smuzhiyun {
3378*4882a593Smuzhiyun 	pci_unregister_driver (&natsemi_driver);
3379*4882a593Smuzhiyun }
3380*4882a593Smuzhiyun 
3381*4882a593Smuzhiyun module_init(natsemi_init_mod);
3382*4882a593Smuzhiyun module_exit(natsemi_exit_mod);
3383*4882a593Smuzhiyun 
3384